diff --git a/internal/addrs/check.go b/addrs/check.go similarity index 99% rename from internal/addrs/check.go rename to addrs/check.go index 430b50c990d8..0e4a8b028e16 100644 --- a/internal/addrs/check.go +++ b/addrs/check.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // Check is the address of a check rule within a checkable object. diff --git a/internal/addrs/checkablekind_string.go b/addrs/checkablekind_string.go similarity index 100% rename from internal/addrs/checkablekind_string.go rename to addrs/checkablekind_string.go diff --git a/internal/addrs/checktype_string.go b/addrs/checktype_string.go similarity index 100% rename from internal/addrs/checktype_string.go rename to addrs/checktype_string.go diff --git a/internal/addrs/count_attr.go b/addrs/count_attr.go similarity index 100% rename from internal/addrs/count_attr.go rename to addrs/count_attr.go diff --git a/internal/addrs/doc.go b/addrs/doc.go similarity index 100% rename from internal/addrs/doc.go rename to addrs/doc.go diff --git a/internal/addrs/for_each_attr.go b/addrs/for_each_attr.go similarity index 100% rename from internal/addrs/for_each_attr.go rename to addrs/for_each_attr.go diff --git a/internal/addrs/input_variable.go b/addrs/input_variable.go similarity index 100% rename from internal/addrs/input_variable.go rename to addrs/input_variable.go diff --git a/internal/addrs/instance_key.go b/addrs/instance_key.go similarity index 100% rename from internal/addrs/instance_key.go rename to addrs/instance_key.go diff --git a/internal/addrs/instance_key_test.go b/addrs/instance_key_test.go similarity index 100% rename from internal/addrs/instance_key_test.go rename to addrs/instance_key_test.go diff --git a/internal/addrs/local_value.go b/addrs/local_value.go similarity index 100% rename from internal/addrs/local_value.go rename to addrs/local_value.go diff --git a/internal/addrs/map.go b/addrs/map.go similarity index 100% rename from internal/addrs/map.go rename to addrs/map.go diff --git a/internal/addrs/map_test.go b/addrs/map_test.go similarity index 100% rename from internal/addrs/map_test.go rename to addrs/map_test.go diff --git a/internal/addrs/module.go b/addrs/module.go similarity index 100% rename from internal/addrs/module.go rename to addrs/module.go diff --git a/internal/addrs/module_call.go b/addrs/module_call.go similarity index 100% rename from internal/addrs/module_call.go rename to addrs/module_call.go diff --git a/internal/addrs/module_instance.go b/addrs/module_instance.go similarity index 99% rename from internal/addrs/module_instance.go rename to addrs/module_instance.go index f197dc144f95..44bea48a20ce 100644 --- a/internal/addrs/module_instance.go +++ b/addrs/module_instance.go @@ -9,7 +9,7 @@ import ( "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/gocty" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // ModuleInstance is an address for a particular module instance within the diff --git a/internal/addrs/module_instance_test.go b/addrs/module_instance_test.go similarity index 100% rename from internal/addrs/module_instance_test.go rename to addrs/module_instance_test.go diff --git a/internal/addrs/module_package.go b/addrs/module_package.go similarity index 100% rename from internal/addrs/module_package.go rename to addrs/module_package.go diff --git a/internal/addrs/module_source.go b/addrs/module_source.go similarity index 99% rename from internal/addrs/module_source.go rename to addrs/module_source.go index 82000dbc4181..cc41f4e4309d 100644 --- a/internal/addrs/module_source.go +++ b/addrs/module_source.go @@ -6,7 +6,7 @@ import ( "strings" tfaddr "github.com/hashicorp/terraform-registry-address" - "github.com/hashicorp/terraform/internal/getmodules" + "github.com/hashicorp/terraform/getmodules" ) // ModuleSource is the general type for all three of the possible module source diff --git a/internal/addrs/module_source_test.go b/addrs/module_source_test.go similarity index 100% rename from internal/addrs/module_source_test.go rename to addrs/module_source_test.go diff --git a/internal/addrs/module_test.go b/addrs/module_test.go similarity index 100% rename from internal/addrs/module_test.go rename to addrs/module_test.go diff --git a/internal/addrs/move_endpoint.go b/addrs/move_endpoint.go similarity index 99% rename from internal/addrs/move_endpoint.go rename to addrs/move_endpoint.go index 765b66f5eecc..e83325365842 100644 --- a/internal/addrs/move_endpoint.go +++ b/addrs/move_endpoint.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // MoveEndpoint is to AbsMoveable and ConfigMoveable what Target is to diff --git a/internal/addrs/move_endpoint_kind.go b/addrs/move_endpoint_kind.go similarity index 100% rename from internal/addrs/move_endpoint_kind.go rename to addrs/move_endpoint_kind.go diff --git a/internal/addrs/move_endpoint_module.go b/addrs/move_endpoint_module.go similarity index 99% rename from internal/addrs/move_endpoint_module.go rename to addrs/move_endpoint_module.go index f2c1408d66b1..a230a60d6525 100644 --- a/internal/addrs/move_endpoint_module.go +++ b/addrs/move_endpoint_module.go @@ -7,7 +7,7 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // anyKeyImpl is the InstanceKey representation indicating a wildcard, which diff --git a/internal/addrs/move_endpoint_module_test.go b/addrs/move_endpoint_module_test.go similarity index 99% rename from internal/addrs/move_endpoint_module_test.go rename to addrs/move_endpoint_module_test.go index c1643d44c257..d07fa7ba0c23 100644 --- a/internal/addrs/move_endpoint_module_test.go +++ b/addrs/move_endpoint_module_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) func TestModuleInstanceMoveDestination(t *testing.T) { diff --git a/internal/addrs/move_endpoint_test.go b/addrs/move_endpoint_test.go similarity index 100% rename from internal/addrs/move_endpoint_test.go rename to addrs/move_endpoint_test.go diff --git a/internal/addrs/moveable.go b/addrs/moveable.go similarity index 100% rename from internal/addrs/moveable.go rename to addrs/moveable.go diff --git a/internal/addrs/moveendpointkind_string.go b/addrs/moveendpointkind_string.go similarity index 100% rename from internal/addrs/moveendpointkind_string.go rename to addrs/moveendpointkind_string.go diff --git a/addrs/output_value.go b/addrs/output_value.go new file mode 100644 index 000000000000..343e0299d76e --- /dev/null +++ b/addrs/output_value.go @@ -0,0 +1,223 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/tfdiags" +) + +// OutputValue is the address of an output value, in the context of the module +// that is defining it. +// +// This is related to but separate from ModuleCallOutput, which represents +// a module output from the perspective of its parent module. Since output +// values cannot be represented from the module where they are defined, +// OutputValue is not Referenceable, while ModuleCallOutput is. +type OutputValue struct { + Name string +} + +func (v OutputValue) String() string { + return "output." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: v, + } +} + +// InModule converts the receiver into a config address within the given +// module. +func (v OutputValue) InModule(m Module) ConfigOutputValue { + return ConfigOutputValue{ + Module: m, + OutputValue: v, + } +} + +// AbsOutputValue is the absolute address of an output value within a module instance. +// +// This represents an output globally within the namespace of a particular +// configuration. It is related to but separate from ModuleCallOutput, which +// represents a module output from the perspective of its parent module. +type AbsOutputValue struct { + Module ModuleInstance + OutputValue OutputValue +} + +// OutputValue returns the absolute address of an output value of the given +// name within the receiving module instance. +func (m ModuleInstance) OutputValue(name string) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: OutputValue{ + Name: name, + }, + } +} + +func (v AbsOutputValue) Check(t CheckType, i int) Check { + return Check{ + Container: v, + Type: t, + Index: i, + } +} + +func (v AbsOutputValue) String() string { + if v.Module.IsRoot() { + return v.OutputValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) +} + +func (v AbsOutputValue) Equal(o AbsOutputValue) bool { + return v.OutputValue == o.OutputValue && v.Module.Equal(o.Module) +} + +func (v AbsOutputValue) ConfigOutputValue() ConfigOutputValue { + return ConfigOutputValue{ + Module: v.Module.Module(), + OutputValue: v.OutputValue, + } +} + +func (v AbsOutputValue) checkableSigil() { + // Output values are checkable +} + +func (v AbsOutputValue) ConfigCheckable() ConfigCheckable { + // Output values are declared by "output" blocks in the configuration, + // represented as ConfigOutputValue. + return v.ConfigOutputValue() +} + +func (v AbsOutputValue) CheckableKind() CheckableKind { + return CheckableOutputValue +} + +func (v AbsOutputValue) UniqueKey() UniqueKey { + return absOutputValueUniqueKey(v.String()) +} + +type absOutputValueUniqueKey string + +func (k absOutputValueUniqueKey) uniqueKeySigil() {} + +func ParseAbsOutputValue(traversal hcl.Traversal) (AbsOutputValue, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return AbsOutputValue{}, diags + } + + if len(remain) != 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "An output name is required.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + if remain.RootName() != "output" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Output address must start with \"output.\".", + Subject: remain[0].SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + var name string + switch tt := remain[1].(type) { + case hcl.TraverseAttr: + name = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "An output name is required.", + Subject: remain[1].SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + return AbsOutputValue{ + Module: path, + OutputValue: OutputValue{ + Name: name, + }, + }, diags +} + +func ParseAbsOutputValueStr(str string) (AbsOutputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsOutputValue{}, diags + } + + addr, addrDiags := ParseAbsOutputValue(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, +// returning also the module instance that the ModuleCallOutput is relative +// to. +// +// The root module does not have a call, and so this method cannot be used +// with outputs in the root module, and will panic in that case. +func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallInstanceOutput) { + if v.Module.IsRoot() { + panic("ReferenceFromCall used with root module output") + } + + caller, call := v.Module.CallInstance() + return caller, ModuleCallInstanceOutput{ + Call: call, + Name: v.OutputValue.Name, + } +} + +// ConfigOutputValue represents a particular "output" block in the +// configuration, which might have many AbsOutputValue addresses associated +// with it at runtime if it belongs to a module that was called using +// "count" or "for_each". +type ConfigOutputValue struct { + Module Module + OutputValue OutputValue +} + +func (v ConfigOutputValue) String() string { + if v.Module.IsRoot() { + return v.OutputValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) +} + +func (v ConfigOutputValue) configCheckableSigil() { + // ConfigOutputValue is the ConfigCheckable for AbsOutputValue. +} + +func (v ConfigOutputValue) CheckableKind() CheckableKind { + return CheckableOutputValue +} + +func (v ConfigOutputValue) UniqueKey() UniqueKey { + return configOutputValueUniqueKey(v.String()) +} + +type configOutputValueUniqueKey string + +func (k configOutputValueUniqueKey) uniqueKeySigil() {} diff --git a/internal/addrs/output_value_test.go b/addrs/output_value_test.go similarity index 100% rename from internal/addrs/output_value_test.go rename to addrs/output_value_test.go diff --git a/internal/addrs/parse_ref.go b/addrs/parse_ref.go similarity index 99% rename from internal/addrs/parse_ref.go rename to addrs/parse_ref.go index bd5bcc7c51ae..f9413cbd4094 100644 --- a/internal/addrs/parse_ref.go +++ b/addrs/parse_ref.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/addrs/parse_ref_test.go b/addrs/parse_ref_test.go similarity index 99% rename from internal/addrs/parse_ref_test.go rename to addrs/parse_ref_test.go index 52c9b2cd33e2..dceb1144e155 100644 --- a/internal/addrs/parse_ref_test.go +++ b/addrs/parse_ref_test.go @@ -6,7 +6,7 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/addrs/parse_target.go b/addrs/parse_target.go similarity index 99% rename from internal/addrs/parse_target.go rename to addrs/parse_target.go index 378e4de6a5fb..2a4d0ecbb1e6 100644 --- a/internal/addrs/parse_target.go +++ b/addrs/parse_target.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // Target describes a targeted address with source location information. diff --git a/internal/addrs/parse_target_test.go b/addrs/parse_target_test.go similarity index 99% rename from internal/addrs/parse_target_test.go rename to addrs/parse_target_test.go index 6e838d0e51ad..84796084f0fc 100644 --- a/internal/addrs/parse_target_test.go +++ b/addrs/parse_target_test.go @@ -6,7 +6,7 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) func TestParseTarget(t *testing.T) { diff --git a/internal/addrs/path_attr.go b/addrs/path_attr.go similarity index 100% rename from internal/addrs/path_attr.go rename to addrs/path_attr.go diff --git a/addrs/provider.go b/addrs/provider.go new file mode 100644 index 000000000000..ce41d650ffd7 --- /dev/null +++ b/addrs/provider.go @@ -0,0 +1,205 @@ +package addrs + +import ( + "github.com/hashicorp/hcl/v2" + tfaddr "github.com/hashicorp/terraform-registry-address" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform/tfdiags" +) + +// Provider encapsulates a single provider type. In the future this will be +// extended to include additional fields including Namespace and SourceHost +type Provider = tfaddr.Provider + +// DefaultProviderRegistryHost is the hostname used for provider addresses that do +// not have an explicit hostname. +const DefaultProviderRegistryHost = tfaddr.DefaultProviderRegistryHost + +// BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider +// namespace. Built-in provider addresses must also have their namespace set +// to BuiltInProviderNamespace in order to be considered as built-in. +const BuiltInProviderHost = tfaddr.BuiltInProviderHost + +// BuiltInProviderNamespace is the provider namespace used for "built-in" +// providers. Built-in provider addresses must also have their hostname +// set to BuiltInProviderHost in order to be considered as built-in. +// +// The this namespace is literally named "builtin", in the hope that users +// who see FQNs containing this will be able to infer the way in which they are +// special, even if they haven't encountered the concept formally yet. +const BuiltInProviderNamespace = tfaddr.BuiltInProviderNamespace + +// LegacyProviderNamespace is the special string used in the Namespace field +// of type Provider to mark a legacy provider address. This special namespace +// value would normally be invalid, and can be used only when the hostname is +// DefaultRegistryHost because that host owns the mapping from legacy name to +// FQN. +const LegacyProviderNamespace = tfaddr.LegacyProviderNamespace + +func IsDefaultProvider(addr Provider) bool { + return addr.Hostname == DefaultProviderRegistryHost && addr.Namespace == "hashicorp" +} + +// NewProvider constructs a provider address from its parts, and normalizes +// the namespace and type parts to lowercase using unicode case folding rules +// so that resulting addrs.Provider values can be compared using standard +// Go equality rules (==). +// +// The hostname is given as a svchost.Hostname, which is required by the +// contract of that type to have already been normalized for equality testing. +// +// This function will panic if the given namespace or type name are not valid. +// When accepting namespace or type values from outside the program, use +// ParseProviderPart first to check that the given value is valid. +func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider { + return tfaddr.NewProvider(hostname, namespace, typeName) +} + +// ImpliedProviderForUnqualifiedType represents the rules for inferring what +// provider FQN a user intended when only a naked type name is available. +// +// For all except the type name "terraform" this returns a so-called "default" +// provider, which is under the registry.terraform.io/hashicorp/ namespace. +// +// As a special case, the string "terraform" maps to +// "terraform.io/builtin/terraform" because that is the more likely user +// intent than the now-unmaintained "registry.terraform.io/hashicorp/terraform" +// which remains only for compatibility with older Terraform versions. +func ImpliedProviderForUnqualifiedType(typeName string) Provider { + switch typeName { + case "terraform": + // Note for future maintainers: any additional strings we add here + // as implied to be builtin must never also be use as provider names + // in the registry.terraform.io/hashicorp/... namespace, because + // otherwise older versions of Terraform could implicitly select + // the registry name instead of the internal one. + return NewBuiltInProvider(typeName) + default: + return NewDefaultProvider(typeName) + } +} + +// NewDefaultProvider returns the default address of a HashiCorp-maintained, +// Registry-hosted provider. +func NewDefaultProvider(name string) Provider { + return tfaddr.Provider{ + Type: MustParseProviderPart(name), + Namespace: "hashicorp", + Hostname: DefaultProviderRegistryHost, + } +} + +// NewBuiltInProvider returns the address of a "built-in" provider. See +// the docs for Provider.IsBuiltIn for more information. +func NewBuiltInProvider(name string) Provider { + return tfaddr.Provider{ + Type: MustParseProviderPart(name), + Namespace: BuiltInProviderNamespace, + Hostname: BuiltInProviderHost, + } +} + +// NewLegacyProvider returns a mock address for a provider. +// This will be removed when ProviderType is fully integrated. +func NewLegacyProvider(name string) Provider { + return Provider{ + // We intentionally don't normalize and validate the legacy names, + // because existing code expects legacy provider names to pass through + // verbatim, even if not compliant with our new naming rules. + Type: name, + Namespace: LegacyProviderNamespace, + Hostname: DefaultProviderRegistryHost, + } +} + +// ParseProviderSourceString parses a value of the form expected in the "source" +// argument of a required_providers entry and returns the corresponding +// fully-qualified provider address. This is intended primarily to parse the +// FQN-like strings returned by terraform-config-inspect. +// +// The following are valid source string formats: +// +// - name +// - namespace/name +// - hostname/namespace/name +func ParseProviderSourceString(str string) (tfaddr.Provider, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + ret, err := tfaddr.ParseProviderSource(str) + if pe, ok := err.(*tfaddr.ParserError); ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: pe.Summary, + Detail: pe.Detail, + }) + return ret, diags + } + + if !ret.HasKnownNamespace() { + ret.Namespace = "hashicorp" + } + + return ret, nil +} + +// MustParseProviderSourceString is a wrapper around ParseProviderSourceString that panics if +// it returns an error. +func MustParseProviderSourceString(str string) Provider { + result, diags := ParseProviderSourceString(str) + if diags.HasErrors() { + panic(diags.Err().Error()) + } + return result +} + +// ParseProviderPart processes an addrs.Provider namespace or type string +// provided by an end-user, producing a normalized version if possible or +// an error if the string contains invalid characters. +// +// A provider part is processed in the same way as an individual label in a DNS +// domain name: it is transformed to lowercase per the usual DNS case mapping +// and normalization rules and may contain only letters, digits, and dashes. +// Additionally, dashes may not appear at the start or end of the string. +// +// These restrictions are intended to allow these names to appear in fussy +// contexts such as directory/file names on case-insensitive filesystems, +// repository names on GitHub, etc. We're using the DNS rules in particular, +// rather than some similar rules defined locally, because the hostname part +// of an addrs.Provider is already a hostname and it's ideal to use exactly +// the same case folding and normalization rules for all of the parts. +// +// In practice a provider type string conventionally does not contain dashes +// either. Such names are permitted, but providers with such type names will be +// hard to use because their resource type names will not be able to contain +// the provider type name and thus each resource will need an explicit provider +// address specified. (A real-world example of such a provider is the +// "google-beta" variant of the GCP provider, which has resource types that +// start with the "google_" prefix instead.) +// +// It's valid to pass the result of this function as the argument to a +// subsequent call, in which case the result will be identical. +func ParseProviderPart(given string) (string, error) { + return tfaddr.ParseProviderPart(given) +} + +// MustParseProviderPart is a wrapper around ParseProviderPart that panics if +// it returns an error. +func MustParseProviderPart(given string) string { + result, err := ParseProviderPart(given) + if err != nil { + panic(err.Error()) + } + return result +} + +// IsProviderPartNormalized compares a given string to the result of ParseProviderPart(string) +func IsProviderPartNormalized(str string) (bool, error) { + normalized, err := ParseProviderPart(str) + if err != nil { + return false, err + } + if str == normalized { + return true, nil + } + return false, nil +} diff --git a/internal/addrs/provider_config.go b/addrs/provider_config.go similarity index 99% rename from internal/addrs/provider_config.go rename to addrs/provider_config.go index 3790c46d6b44..4300deba10f1 100644 --- a/internal/addrs/provider_config.go +++ b/addrs/provider_config.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" "github.com/hashicorp/hcl/v2" diff --git a/internal/addrs/provider_config_test.go b/addrs/provider_config_test.go similarity index 100% rename from internal/addrs/provider_config_test.go rename to addrs/provider_config_test.go diff --git a/internal/addrs/provider_test.go b/addrs/provider_test.go similarity index 100% rename from internal/addrs/provider_test.go rename to addrs/provider_test.go diff --git a/internal/addrs/referenceable.go b/addrs/referenceable.go similarity index 100% rename from internal/addrs/referenceable.go rename to addrs/referenceable.go diff --git a/internal/addrs/resource.go b/addrs/resource.go similarity index 100% rename from internal/addrs/resource.go rename to addrs/resource.go diff --git a/internal/addrs/resource_phase.go b/addrs/resource_phase.go similarity index 100% rename from internal/addrs/resource_phase.go rename to addrs/resource_phase.go diff --git a/internal/addrs/resource_test.go b/addrs/resource_test.go similarity index 100% rename from internal/addrs/resource_test.go rename to addrs/resource_test.go diff --git a/internal/addrs/resourcemode_string.go b/addrs/resourcemode_string.go similarity index 100% rename from internal/addrs/resourcemode_string.go rename to addrs/resourcemode_string.go diff --git a/internal/addrs/self.go b/addrs/self.go similarity index 100% rename from internal/addrs/self.go rename to addrs/self.go diff --git a/internal/addrs/set.go b/addrs/set.go similarity index 100% rename from internal/addrs/set.go rename to addrs/set.go diff --git a/internal/addrs/target_test.go b/addrs/target_test.go similarity index 100% rename from internal/addrs/target_test.go rename to addrs/target_test.go diff --git a/internal/addrs/targetable.go b/addrs/targetable.go similarity index 100% rename from internal/addrs/targetable.go rename to addrs/targetable.go diff --git a/internal/addrs/terraform_attr.go b/addrs/terraform_attr.go similarity index 100% rename from internal/addrs/terraform_attr.go rename to addrs/terraform_attr.go diff --git a/internal/addrs/unique_key.go b/addrs/unique_key.go similarity index 100% rename from internal/addrs/unique_key.go rename to addrs/unique_key.go diff --git a/internal/addrs/unique_key_test.go b/addrs/unique_key_test.go similarity index 100% rename from internal/addrs/unique_key_test.go rename to addrs/unique_key_test.go diff --git a/backend/backend.go b/backend/backend.go new file mode 100644 index 000000000000..03100c0eb93e --- /dev/null +++ b/backend/backend.go @@ -0,0 +1,423 @@ +// Package backend provides interfaces that the CLI uses to interact with +// Terraform. A backend provides the abstraction that allows the same CLI +// to simultaneously support both local and remote operations for seamlessly +// using Terraform in a team environment. +package backend + +import ( + "context" + "errors" + "io/ioutil" + "log" + "os" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" +) + +// DefaultStateName is the name of the default, initial state that every +// backend must have. This state cannot be deleted. +const DefaultStateName = "default" + +var ( + // ErrDefaultWorkspaceNotSupported is returned when an operation does not + // support using the default workspace, but requires a named workspace to + // be selected. + ErrDefaultWorkspaceNotSupported = errors.New("default workspace not supported\n" + + "You can create a new workspace with the \"workspace new\" command.") + + // ErrWorkspacesNotSupported is an error returned when a caller attempts + // to perform an operation on a workspace other than "default" for a + // backend that doesn't support multiple workspaces. + // + // The caller can detect this to do special fallback behavior or produce + // a specific, helpful error message. + ErrWorkspacesNotSupported = errors.New("workspaces not supported") +) + +// InitFn is used to initialize a new backend. +type InitFn func() Backend + +// Backend is the minimal interface that must be implemented to enable Terraform. +type Backend interface { + // ConfigSchema returns a description of the expected configuration + // structure for the receiving backend. + // + // This method does not have any side-effects for the backend and can + // be safely used before configuring. + ConfigSchema() *configschema.Block + + // PrepareConfig checks the validity of the values in the given + // configuration, and inserts any missing defaults, assuming that its + // structure has already been validated per the schema returned by + // ConfigSchema. + // + // This method does not have any side-effects for the backend and can + // be safely used before configuring. It also does not consult any + // external data such as environment variables, disk files, etc. Validation + // that requires such external data should be deferred until the + // Configure call. + // + // If error diagnostics are returned then the configuration is not valid + // and must not subsequently be passed to the Configure method. + // + // This method may return configuration-contextual diagnostics such + // as tfdiags.AttributeValue, and so the caller should provide the + // necessary context via the diags.InConfigBody method before returning + // diagnostics to the user. + PrepareConfig(cty.Value) (cty.Value, tfdiags.Diagnostics) + + // Configure uses the provided configuration to set configuration fields + // within the backend. + // + // The given configuration is assumed to have already been validated + // against the schema returned by ConfigSchema and passed validation + // via PrepareConfig. + // + // This method may be called only once per backend instance, and must be + // called before all other methods except where otherwise stated. + // + // If error diagnostics are returned, the internal state of the instance + // is undefined and no other methods may be called. + Configure(cty.Value) tfdiags.Diagnostics + + // StateMgr returns the state manager for the given workspace name. + // + // If the returned state manager also implements statemgr.Locker then + // it's the caller's responsibility to call Lock and Unlock as appropriate. + // + // If the named workspace doesn't exist, or if it has no state, it will + // be created either immediately on this call or the first time + // PersistState is called, depending on the state manager implementation. + StateMgr(workspace string) (statemgr.Full, error) + + // DeleteWorkspace removes the workspace with the given name if it exists. + // + // DeleteWorkspace cannot prevent deleting a state that is in use. It is + // the responsibility of the caller to hold a Lock for the state manager + // belonging to this workspace before calling this method. + DeleteWorkspace(name string, force bool) error + + // States returns a list of the names of all of the workspaces that exist + // in this backend. + Workspaces() ([]string, error) +} + +// Enhanced implements additional behavior on top of a normal backend. +// +// 'Enhanced' backends are an implementation detail only, and are no longer reflected as an external +// 'feature' of backends. In other words, backends refer to plugins for remote state snapshot +// storage only, and the Enhanced interface here is a necessary vestige of the 'local' and +// remote/cloud backends only. +type Enhanced interface { + Backend + + // Operation performs a Terraform operation such as refresh, plan, apply. + // It is up to the implementation to determine what "performing" means. + // This DOES NOT BLOCK. The context returned as part of RunningOperation + // should be used to block for completion. + // If the state used in the operation can be locked, it is the + // responsibility of the Backend to lock the state for the duration of the + // running operation. + Operation(context.Context, *Operation) (*RunningOperation, error) +} + +// Local implements additional behavior on a Backend that allows local +// operations in addition to remote operations. +// +// This enables more behaviors of Terraform that require more data such +// as `console`, `import`, `graph`. These require direct access to +// configurations, variables, and more. Not all backends may support this +// so we separate it out into its own optional interface. +type Local interface { + // LocalRun uses information in the Operation to prepare a set of objects + // needed to start running that operation. + // + // The operation doesn't need a Type set, but it needs various other + // options set. This is a rather odd API that tries to treat all + // operations as the same when they really aren't; see the local and remote + // backend's implementations of this to understand what this actually + // does, because this operation has no well-defined contract aside from + // "whatever it already does". + LocalRun(*Operation) (*LocalRun, statemgr.Full, tfdiags.Diagnostics) +} + +// LocalRun represents the assortment of objects that we can collect or +// calculate from an Operation object, which we can then use for local +// operations. +// +// The operation methods on terraform.Context (Plan, Apply, Import, etc) each +// generate new artifacts which supersede parts of the LocalRun object that +// started the operation, so callers should be careful to use those subsequent +// artifacts instead of the fields of LocalRun where appropriate. The LocalRun +// data intentionally doesn't update as a result of calling methods on Context, +// in order to make data flow explicit. +// +// This type is a weird architectural wart resulting from the overly-general +// way our backend API models operations, whereby we behave as if all +// Terraform operations have the same inputs and outputs even though they +// are actually all rather different. The exact meaning of the fields in +// this type therefore vary depending on which OperationType was passed to +// Local.Context in order to create an object of this type. +type LocalRun struct { + // Core is an already-initialized Terraform Core context, ready to be + // used to run operations such as Plan and Apply. + Core *terraform.Context + + // Config is the configuration we're working with, which typically comes + // from either config files directly on local disk (when we're creating + // a plan, or similar) or from a snapshot embedded in a plan file + // (when we're applying a saved plan). + Config *configs.Config + + // InputState is the state that should be used for whatever is the first + // method call to a context created with CoreOpts. When creating a plan + // this will be the previous run state, but when applying a saved plan + // this will be the prior state recorded in that plan. + InputState *states.State + + // PlanOpts are options to pass to a Plan or Plan-like operation. + // + // This is nil when we're applying a saved plan, because the plan itself + // contains enough information about its options to apply it. + PlanOpts *terraform.PlanOpts + + // Plan is a plan loaded from a saved plan file, if our operation is to + // apply that saved plan. + // + // This is nil when we're not applying a saved plan. + Plan *plans.Plan +} + +// An operation represents an operation for Terraform to execute. +// +// Note that not all fields are supported by all backends and can result +// in an error if set. All backend implementations should show user-friendly +// errors explaining any incorrectly set values. For example, the local +// backend doesn't support a PlanId being set. +// +// The operation options are purposely designed to have maximal compatibility +// between Terraform and Terraform Servers (a commercial product offered by +// HashiCorp). Therefore, it isn't expected that other implementation support +// every possible option. The struct here is generalized in order to allow +// even partial implementations to exist in the open, without walling off +// remote functionality 100% behind a commercial wall. Anyone can implement +// against this interface and have Terraform interact with it just as it +// would with HashiCorp-provided Terraform Servers. +type Operation struct { + // Type is the operation to perform. + Type OperationType + + // PlanId is an opaque value that backends can use to execute a specific + // plan for an apply operation. + // + // PlanOutBackend is the backend to store with the plan. This is the + // backend that will be used when applying the plan. + PlanId string + PlanRefresh bool // PlanRefresh will do a refresh before a plan + PlanOutPath string // PlanOutPath is the path to save the plan + PlanOutBackend *plans.Backend + + // ConfigDir is the path to the directory containing the configuration's + // root module. + ConfigDir string + + // ConfigLoader is a configuration loader that can be used to load + // configuration from ConfigDir. + ConfigLoader *configload.Loader + + // DependencyLocks represents the locked dependencies associated with + // the configuration directory given in ConfigDir. + // + // Note that if field PlanFile is set then the plan file should contain + // its own dependency locks. The backend is responsible for correctly + // selecting between these two sets of locks depending on whether it + // will be using ConfigDir or PlanFile to get the configuration for + // this operation. + DependencyLocks *depsfile.Locks + + // Hooks can be used to perform actions triggered by various events during + // the operation's lifecycle. + Hooks []terraform.Hook + + // Plan is a plan that was passed as an argument. This is valid for + // plan and apply arguments but may not work for all backends. + PlanFile *planfile.Reader + + // The options below are more self-explanatory and affect the runtime + // behavior of the operation. + PlanMode plans.Mode + AutoApprove bool + Targets []addrs.Targetable + ForceReplace []addrs.AbsResourceInstance + Variables map[string]UnparsedVariableValue + + // Some operations use root module variables only opportunistically or + // don't need them at all. If this flag is set, the backend must treat + // all variables as optional and provide an unknown value for any required + // variables that aren't set in order to allow partial evaluation against + // the resulting incomplete context. + // + // This flag is honored only if PlanFile isn't set. If PlanFile is set then + // the variables set in the plan are used instead, and they must be valid. + AllowUnsetVariables bool + + // View implements the logic for all UI interactions. + View views.Operation + + // Input/output/control options. + UIIn terraform.UIInput + UIOut terraform.UIOutput + + // StateLocker is used to lock the state while providing UI feedback to the + // user. This will be replaced by the Backend to update the context. + // + // If state locking is not necessary, this should be set to a no-op + // implementation of clistate.Locker. + StateLocker clistate.Locker + + // Workspace is the name of the workspace that this operation should run + // in, which controls which named state is used. + Workspace string +} + +// HasConfig returns true if and only if the operation has a ConfigDir value +// that refers to a directory containing at least one Terraform configuration +// file. +func (o *Operation) HasConfig() bool { + return o.ConfigLoader.IsConfigDir(o.ConfigDir) +} + +// Config loads the configuration that the operation applies to, using the +// ConfigDir and ConfigLoader fields within the receiving operation. +func (o *Operation) Config() (*configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + config, hclDiags := o.ConfigLoader.LoadConfig(o.ConfigDir) + diags = diags.Append(hclDiags) + return config, diags +} + +// ReportResult is a helper for the common chore of setting the status of +// a running operation and showing any diagnostics produced during that +// operation. +// +// If the given diagnostics contains errors then the operation's result +// will be set to backend.OperationFailure. It will be set to +// backend.OperationSuccess otherwise. It will then use o.View.Diagnostics +// to show the given diagnostics before returning. +// +// Callers should feel free to do each of these operations separately in +// more complex cases where e.g. diagnostics are interleaved with other +// output, but terminating immediately after reporting error diagnostics is +// common and can be expressed concisely via this method. +func (o *Operation) ReportResult(op *RunningOperation, diags tfdiags.Diagnostics) { + if diags.HasErrors() { + op.Result = OperationFailure + } else { + op.Result = OperationSuccess + } + if o.View != nil { + o.View.Diagnostics(diags) + } else { + // Shouldn't generally happen, but if it does then we'll at least + // make some noise in the logs to help us spot it. + if len(diags) != 0 { + log.Printf( + "[ERROR] Backend needs to report diagnostics but View is not set:\n%s", + diags.ErrWithWarnings(), + ) + } + } +} + +// RunningOperation is the result of starting an operation. +type RunningOperation struct { + // For implementers of a backend, this context should not wrap the + // passed in context. Otherwise, cancelling the parent context will + // immediately mark this context as "done" but those aren't the semantics + // we want: we want this context to be done only when the operation itself + // is fully done. + context.Context + + // Stop requests the operation to complete early, by calling Stop on all + // the plugins. If the process needs to terminate immediately, call Cancel. + Stop context.CancelFunc + + // Cancel is the context.CancelFunc associated with the embedded context, + // and can be called to terminate the operation early. + // Once Cancel is called, the operation should return as soon as possible + // to avoid running operations during process exit. + Cancel context.CancelFunc + + // Result is the exit status of the operation, populated only after the + // operation has completed. + Result OperationResult + + // PlanEmpty is populated after a Plan operation completes to note whether + // a plan is empty or has changes. This is only used in the CLI to determine + // the exit status because the plan value is not available at that point. + PlanEmpty bool + + // State is the final state after the operation completed. Persisting + // this state is managed by the backend. This should only be read + // after the operation completes to avoid read/write races. + State *states.State +} + +// OperationResult describes the result status of an operation. +type OperationResult int + +const ( + // OperationSuccess indicates that the operation completed as expected. + OperationSuccess OperationResult = 0 + + // OperationFailure indicates that the operation encountered some sort + // of error, and thus may have been only partially performed or not + // performed at all. + OperationFailure OperationResult = 1 +) + +func (r OperationResult) ExitStatus() int { + return int(r) +} + +// If the argument is a path, Read loads it and returns the contents, +// otherwise the argument is assumed to be the desired contents and is simply +// returned. +func ReadPathOrContents(poc string) (string, error) { + if len(poc) == 0 { + return poc, nil + } + + path := poc + if path[0] == '~' { + var err error + path, err = homedir.Expand(path) + if err != nil { + return path, err + } + } + + if _, err := os.Stat(path); err == nil { + contents, err := ioutil.ReadFile(path) + if err != nil { + return string(contents), err + } + return string(contents), nil + } + + return poc, nil +} diff --git a/internal/backend/backend_test.go b/backend/backend_test.go similarity index 100% rename from internal/backend/backend_test.go rename to backend/backend_test.go diff --git a/backend/cli.go b/backend/cli.go new file mode 100644 index 000000000000..46a357e836c2 --- /dev/null +++ b/backend/cli.go @@ -0,0 +1,91 @@ +package backend + +import ( + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" +) + +// CLI is an optional interface that can be implemented to be initialized +// with information from the Terraform CLI. If this is implemented, this +// initialization function will be called with data to help interact better +// with a CLI. +// +// This interface was created to improve backend interaction with the +// official Terraform CLI while making it optional for API users to have +// to provide full CLI interaction to every backend. +// +// If you're implementing a Backend, it is acceptable to require CLI +// initialization. In this case, your backend should be coded to error +// on other methods (such as State, Operation) if CLI initialization was not +// done with all required fields. +type CLI interface { + Backend + + // CLIInit is called once with options. The options passed to this + // function may not be modified after calling this since they can be + // read/written at any time by the Backend implementation. + // + // This may be called before or after Configure is called, so if settings + // here affect configurable settings, care should be taken to handle + // whether they should be overwritten or not. + CLIInit(*CLIOpts) error +} + +// CLIOpts are the options passed into CLIInit for the CLI interface. +// +// These options represent the functionality the CLI exposes and often +// maps to meta-flags available on every CLI (such as -input). +// +// When implementing a backend, it isn't expected that every option applies. +// Your backend should be documented clearly to explain to end users what +// options have an affect and what won't. In some cases, it may even make sense +// to error in your backend when an option is set so that users don't make +// a critically incorrect assumption about behavior. +type CLIOpts struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // Streams describes the low-level streams for Stdout, Stderr and Stdin, + // including some metadata about whether they are terminals. Most output + // should go via the object in field CLI above, but Streams can be useful + // for tailoring the output to fit the attached terminal, for example. + Streams *terminal.Streams + + // StatePath is the local path where state is read from. + // + // StateOutPath is the local path where the state will be written. + // If this is empty, it will default to StatePath. + // + // StateBackupPath is the local path where a backup file will be written. + // If this is empty, no backup will be taken. + StatePath string + StateOutPath string + StateBackupPath string + + // ContextOpts are the base context options to set when initializing a + // Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // Input will ask for necessary input prior to performing any operations. + // + // Validation will perform validation prior to running an operation. The + // variable naming doesn't match the style of others since we have a func + // Validate. + Input bool + Validation bool + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint not to produce messages that expect that a user can + // run a follow-up command, perhaps because Terraform is running in + // some sort of workflow automation tool that abstracts away the + // exact commands that are being run. + RunningInAutomation bool +} diff --git a/internal/backend/init/deprecate_test.go b/backend/init/deprecate_test.go similarity index 89% rename from internal/backend/init/deprecate_test.go rename to backend/init/deprecate_test.go index f84cab808da8..c45d62d2b37b 100644 --- a/internal/backend/init/deprecate_test.go +++ b/backend/init/deprecate_test.go @@ -3,7 +3,7 @@ package init import ( "testing" - "github.com/hashicorp/terraform/internal/backend/remote-state/inmem" + "github.com/hashicorp/terraform/backend/remote-state/inmem" "github.com/zclconf/go-cty/cty" ) diff --git a/backend/init/init.go b/backend/init/init.go new file mode 100644 index 000000000000..b77f56ae2452 --- /dev/null +++ b/backend/init/init.go @@ -0,0 +1,143 @@ +// Package init contains the list of backends that can be initialized and +// basic helper functions for initializing those backends. +package init + +import ( + "sync" + + "github.com/hashicorp/terraform-svchost/disco" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" + backendRemote "github.com/hashicorp/terraform/backend/remote" + backendAzure "github.com/hashicorp/terraform/backend/remote-state/azure" + backendConsul "github.com/hashicorp/terraform/backend/remote-state/consul" + backendCos "github.com/hashicorp/terraform/backend/remote-state/cos" + backendGCS "github.com/hashicorp/terraform/backend/remote-state/gcs" + backendHTTP "github.com/hashicorp/terraform/backend/remote-state/http" + backendInmem "github.com/hashicorp/terraform/backend/remote-state/inmem" + backendKubernetes "github.com/hashicorp/terraform/backend/remote-state/kubernetes" + backendOSS "github.com/hashicorp/terraform/backend/remote-state/oss" + backendPg "github.com/hashicorp/terraform/backend/remote-state/pg" + backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3" + backendCloud "github.com/hashicorp/terraform/cloud" +) + +// backends is the list of available backends. This is a global variable +// because backends are currently hardcoded into Terraform and can't be +// modified without recompilation. +// +// To read an available backend, use the Backend function. This ensures +// safe concurrent read access to the list of built-in backends. +// +// Backends are hardcoded into Terraform because the API for backends uses +// complex structures and supporting that over the plugin system is currently +// prohibitively difficult. For those wanting to implement a custom backend, +// they can do so with recompilation. +var backends map[string]backend.InitFn +var backendsLock sync.Mutex + +// RemovedBackends is a record of previously supported backends which have +// since been deprecated and removed. +var RemovedBackends map[string]string + +// Init initializes the backends map with all our hardcoded backends. +func Init(services *disco.Disco) { + backendsLock.Lock() + defer backendsLock.Unlock() + + backends = map[string]backend.InitFn{ + "local": func() backend.Backend { return backendLocal.New() }, + "remote": func() backend.Backend { return backendRemote.New(services) }, + + // Remote State backends. + "azurerm": func() backend.Backend { return backendAzure.New() }, + "consul": func() backend.Backend { return backendConsul.New() }, + "cos": func() backend.Backend { return backendCos.New() }, + "gcs": func() backend.Backend { return backendGCS.New() }, + "http": func() backend.Backend { return backendHTTP.New() }, + "inmem": func() backend.Backend { return backendInmem.New() }, + "kubernetes": func() backend.Backend { return backendKubernetes.New() }, + "oss": func() backend.Backend { return backendOSS.New() }, + "pg": func() backend.Backend { return backendPg.New() }, + "s3": func() backend.Backend { return backendS3.New() }, + + // Terraform Cloud 'backend' + // This is an implementation detail only, used for the cloud package + "cloud": func() backend.Backend { return backendCloud.New(services) }, + } + + RemovedBackends = map[string]string{ + "artifactory": `The "artifactory" backend is not supported in Terraform v1.3 or later.`, + "azure": `The "azure" backend name has been removed, please use "azurerm".`, + "etcd": `The "etcd" backend is not supported in Terraform v1.3 or later.`, + "etcdv3": `The "etcdv3" backend is not supported in Terraform v1.3 or later.`, + "manta": `The "manta" backend is not supported in Terraform v1.3 or later.`, + "swift": `The "swift" backend is not supported in Terraform v1.3 or later.`, + } +} + +// Backend returns the initialization factory for the given backend, or +// nil if none exists. +func Backend(name string) backend.InitFn { + backendsLock.Lock() + defer backendsLock.Unlock() + return backends[name] +} + +// Set sets a new backend in the list of backends. If f is nil then the +// backend will be removed from the map. If this backend already exists +// then it will be overwritten. +// +// This method sets this backend globally and care should be taken to do +// this only before Terraform is executing to prevent odd behavior of backends +// changing mid-execution. +func Set(name string, f backend.InitFn) { + backendsLock.Lock() + defer backendsLock.Unlock() + + if f == nil { + delete(backends, name) + return + } + + backends[name] = f +} + +// deprecatedBackendShim is used to wrap a backend and inject a deprecation +// warning into the Validate method. +type deprecatedBackendShim struct { + backend.Backend + Message string +} + +// PrepareConfig delegates to the wrapped backend to validate its config +// and then appends shim's deprecation warning. +func (b deprecatedBackendShim) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + newObj, diags := b.Backend.PrepareConfig(obj) + return newObj, diags.Append(tfdiags.SimpleWarning(b.Message)) +} + +// DeprecateBackend can be used to wrap a backend to retrun a deprecation +// warning during validation. +func deprecateBackend(b backend.Backend, message string) backend.Backend { + // Since a Backend wrapped by deprecatedBackendShim can no longer be + // asserted as an Enhanced or Local backend, disallow those types here + // entirely. If something other than a basic backend.Backend needs to be + // deprecated, we can add that functionality to schema.Backend or the + // backend itself. + if _, ok := b.(backend.Enhanced); ok { + panic("cannot use DeprecateBackend on an Enhanced Backend") + } + + if _, ok := b.(backend.Local); ok { + panic("cannot use DeprecateBackend on a Local Backend") + } + + return deprecatedBackendShim{ + Backend: b, + Message: message, + } +} diff --git a/internal/backend/init/init_test.go b/backend/init/init_test.go similarity index 100% rename from internal/backend/init/init_test.go rename to backend/init/init_test.go diff --git a/backend/local/backend.go b/backend/local/backend.go new file mode 100644 index 000000000000..8520e68a5b38 --- /dev/null +++ b/backend/local/backend.go @@ -0,0 +1,489 @@ +package local + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +const ( + DefaultWorkspaceDir = "terraform.tfstate.d" + DefaultWorkspaceFile = "environment" + DefaultStateFilename = "terraform.tfstate" + DefaultBackupExtension = ".backup" +) + +// Local is an implementation of EnhancedBackend that performs all operations +// locally. This is the "default" backend and implements normal Terraform +// behavior as it is well known. +type Local struct { + // The State* paths are set from the backend config, and may be left blank + // to use the defaults. If the actual paths for the local backend state are + // needed, use the StatePaths method. + // + // StatePath is the local path where state is read from. + // + // StateOutPath is the local path where the state will be written. + // If this is empty, it will default to StatePath. + // + // StateBackupPath is the local path where a backup file will be written. + // Set this to "-" to disable state backup. + // + // StateWorkspaceDir is the path to the folder containing data for + // non-default workspaces. This defaults to DefaultWorkspaceDir if not set. + StatePath string + StateOutPath string + StateBackupPath string + StateWorkspaceDir string + + // The OverrideState* paths are set based on per-operation CLI arguments + // and will override what'd be built from the State* fields if non-empty. + // While the interpretation of the State* fields depends on the active + // workspace, the OverrideState* fields are always used literally. + OverrideStatePath string + OverrideStateOutPath string + OverrideStateBackupPath string + + // We only want to create a single instance of a local state, so store them + // here as they're loaded. + states map[string]statemgr.Full + + // Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // OpInput will ask for necessary input prior to performing any operations. + // + // OpValidation will perform validation prior to running an operation. The + // variable naming doesn't match the style of others since we have a func + // Validate. + OpInput bool + OpValidation bool + + // Backend, if non-nil, will use this backend for non-enhanced behavior. + // This allows local behavior with remote state storage. It is a way to + // "upgrade" a non-enhanced backend to an enhanced backend with typical + // behavior. + // + // If this is nil, local performs normal state loading and storage. + Backend backend.Backend + + // opLock locks operations + opLock sync.Mutex +} + +var _ backend.Backend = (*Local)(nil) + +// New returns a new initialized local backend. +func New() *Local { + return NewWithBackend(nil) +} + +// NewWithBackend returns a new local backend initialized with a +// dedicated backend for non-enhanced behavior. +func NewWithBackend(backend backend.Backend) *Local { + return &Local{ + Backend: backend, + } +} + +func (b *Local) ConfigSchema() *configschema.Block { + if b.Backend != nil { + return b.Backend.ConfigSchema() + } + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "path": { + Type: cty.String, + Optional: true, + }, + "workspace_dir": { + Type: cty.String, + Optional: true, + }, + }, + } +} + +func (b *Local) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + if b.Backend != nil { + return b.Backend.PrepareConfig(obj) + } + + var diags tfdiags.Diagnostics + + if val := obj.GetAttr("path"); !val.IsNull() { + p := val.AsString() + if p == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid local state file path", + `The "path" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "path"}}, + )) + } + } + + if val := obj.GetAttr("workspace_dir"); !val.IsNull() { + p := val.AsString() + if p == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid local workspace directory path", + `The "workspace_dir" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "workspace_dir"}}, + )) + } + } + + return obj, diags +} + +func (b *Local) Configure(obj cty.Value) tfdiags.Diagnostics { + if b.Backend != nil { + return b.Backend.Configure(obj) + } + + var diags tfdiags.Diagnostics + + if val := obj.GetAttr("path"); !val.IsNull() { + p := val.AsString() + b.StatePath = p + b.StateOutPath = p + } else { + b.StatePath = DefaultStateFilename + b.StateOutPath = DefaultStateFilename + } + + if val := obj.GetAttr("workspace_dir"); !val.IsNull() { + p := val.AsString() + b.StateWorkspaceDir = p + } else { + b.StateWorkspaceDir = DefaultWorkspaceDir + } + + return diags +} + +func (b *Local) Workspaces() ([]string, error) { + // If we have a backend handling state, defer to that. + if b.Backend != nil { + return b.Backend.Workspaces() + } + + // the listing always start with "default" + envs := []string{backend.DefaultStateName} + + entries, err := ioutil.ReadDir(b.stateWorkspaceDir()) + // no error if there's no envs configured + if os.IsNotExist(err) { + return envs, nil + } + if err != nil { + return nil, err + } + + var listed []string + for _, entry := range entries { + if entry.IsDir() { + listed = append(listed, filepath.Base(entry.Name())) + } + } + + sort.Strings(listed) + envs = append(envs, listed...) + + return envs, nil +} + +// DeleteWorkspace removes a workspace. +// +// The "default" workspace cannot be removed. +func (b *Local) DeleteWorkspace(name string, force bool) error { + // If we have a backend handling state, defer to that. + if b.Backend != nil { + return b.Backend.DeleteWorkspace(name, force) + } + + if name == "" { + return errors.New("empty state name") + } + + if name == backend.DefaultStateName { + return errors.New("cannot delete default state") + } + + delete(b.states, name) + return os.RemoveAll(filepath.Join(b.stateWorkspaceDir(), name)) +} + +func (b *Local) StateMgr(name string) (statemgr.Full, error) { + // If we have a backend handling state, delegate to that. + if b.Backend != nil { + return b.Backend.StateMgr(name) + } + + if s, ok := b.states[name]; ok { + return s, nil + } + + if err := b.createState(name); err != nil { + return nil, err + } + + statePath, stateOutPath, backupPath := b.StatePaths(name) + log.Printf("[TRACE] backend/local: state manager for workspace %q will:\n - read initial snapshot from %s\n - write new snapshots to %s\n - create any backup at %s", name, statePath, stateOutPath, backupPath) + + s := statemgr.NewFilesystemBetweenPaths(statePath, stateOutPath) + if backupPath != "" { + s.SetBackupPath(backupPath) + } + + if b.states == nil { + b.states = map[string]statemgr.Full{} + } + b.states[name] = s + return s, nil +} + +// Operation implements backend.Enhanced +// +// This will initialize an in-memory terraform.Context to perform the +// operation within this process. +// +// The given operation parameter will be merged with the ContextOpts on +// the structure with the following rules. If a rule isn't specified and the +// name conflicts, assume that the field is overwritten if set. +func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + if op.View == nil { + panic("Operation called with nil View") + } + + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation, *backend.RunningOperation) + switch op.Type { + case backend.OperationTypeRefresh: + f = b.opRefresh + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + default: + return nil, fmt.Errorf( + "unsupported operation type: %s\n\n"+ + "This is a bug in Terraform and should be reported. The local backend\n"+ + "is built-in to Terraform and should always support all operations.", + op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + op.StateLocker = op.StateLocker.WithContext(stopCtx) + + // Do it + go func() { + defer logging.PanicHandler() + defer done() + defer stop() + defer cancel() + + defer b.opLock.Unlock() + f(stopCtx, cancelCtx, op, runningOp) + }() + + // Return + return runningOp, nil +} + +// opWait waits for the operation to complete, and a stop signal or a +// cancelation signal. +func (b *Local) opWait( + doneCh <-chan struct{}, + stopCtx context.Context, + cancelCtx context.Context, + tfCtx *terraform.Context, + opStateMgr statemgr.Persister, + view views.Operation) (canceled bool) { + // Wait for the operation to finish or for us to be interrupted so + // we can handle it properly. + select { + case <-stopCtx.Done(): + view.Stopping() + + // try to force a PersistState just in case the process is terminated + // before we can complete. + if err := opStateMgr.PersistState(nil); err != nil { + // We can't error out from here, but warn the user if there was an error. + // If this isn't transient, we will catch it again below, and + // attempt to save the state another way. + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error saving current state", + fmt.Sprintf(earlyStateWriteErrorFmt, err), + )) + view.Diagnostics(diags) + } + + // Stop execution + log.Println("[TRACE] backend/local: waiting for the running operation to stop") + go tfCtx.Stop() + + select { + case <-cancelCtx.Done(): + log.Println("[WARN] running operation was forcefully canceled") + // if the operation was canceled, we need to return immediately + canceled = true + case <-doneCh: + log.Println("[TRACE] backend/local: graceful stop has completed") + } + case <-cancelCtx.Done(): + // this should not be called without first attempting to stop the + // operation + log.Println("[ERROR] running operation canceled without Stop") + canceled = true + case <-doneCh: + } + return +} + +// StatePaths returns the StatePath, StateOutPath, and StateBackupPath as +// configured from the CLI. +func (b *Local) StatePaths(name string) (stateIn, stateOut, backupOut string) { + statePath := b.OverrideStatePath + stateOutPath := b.OverrideStateOutPath + backupPath := b.OverrideStateBackupPath + + isDefault := name == backend.DefaultStateName || name == "" + + baseDir := "" + if !isDefault { + baseDir = filepath.Join(b.stateWorkspaceDir(), name) + } + + if statePath == "" { + if isDefault { + statePath = b.StatePath // s.StatePath applies only to the default workspace, since StateWorkspaceDir is used otherwise + } + if statePath == "" { + statePath = filepath.Join(baseDir, DefaultStateFilename) + } + } + if stateOutPath == "" { + stateOutPath = statePath + } + if backupPath == "" { + backupPath = b.StateBackupPath + } + switch backupPath { + case "-": + backupPath = "" + case "": + backupPath = stateOutPath + DefaultBackupExtension + } + + return statePath, stateOutPath, backupPath +} + +// PathsConflictWith returns true if any state path used by a workspace in +// the receiver is the same as any state path used by the other given +// local backend instance. +// +// This should be used when "migrating" from one local backend configuration to +// another in order to avoid deleting the "old" state snapshots if they are +// in the same files as the "new" state snapshots. +func (b *Local) PathsConflictWith(other *Local) bool { + otherPaths := map[string]struct{}{} + otherWorkspaces, err := other.Workspaces() + if err != nil { + // If we can't enumerate the workspaces then we'll conservatively + // assume that paths _do_ overlap, since we can't be certain. + return true + } + for _, name := range otherWorkspaces { + p, _, _ := other.StatePaths(name) + otherPaths[p] = struct{}{} + } + + ourWorkspaces, err := other.Workspaces() + if err != nil { + // If we can't enumerate the workspaces then we'll conservatively + // assume that paths _do_ overlap, since we can't be certain. + return true + } + + for _, name := range ourWorkspaces { + p, _, _ := b.StatePaths(name) + if _, exists := otherPaths[p]; exists { + return true + } + } + return false +} + +// this only ensures that the named directory exists +func (b *Local) createState(name string) error { + if name == backend.DefaultStateName { + return nil + } + + stateDir := filepath.Join(b.stateWorkspaceDir(), name) + s, err := os.Stat(stateDir) + if err == nil && s.IsDir() { + // no need to check for os.IsNotExist, since that is covered by os.MkdirAll + // which will catch the other possible errors as well. + return nil + } + + err = os.MkdirAll(stateDir, 0755) + if err != nil { + return err + } + + return nil +} + +// stateWorkspaceDir returns the directory where state environments are stored. +func (b *Local) stateWorkspaceDir() string { + if b.StateWorkspaceDir != "" { + return b.StateWorkspaceDir + } + + return DefaultWorkspaceDir +} + +const earlyStateWriteErrorFmt = `Error: %s + +Terraform encountered an error attempting to save the state before cancelling the current operation. Once the operation is complete another attempt will be made to save the final state.` diff --git a/backend/local/backend_apply.go b/backend/local/backend_apply.go new file mode 100644 index 000000000000..2321da53a8b9 --- /dev/null +++ b/backend/local/backend_apply.go @@ -0,0 +1,330 @@ +package local + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// test hook called between plan+apply during opApply +var testHookStopPlanApply func() + +func (b *Local) opApply( + stopCtx context.Context, + cancelCtx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + log.Printf("[INFO] backend/local: starting Apply operation") + + var diags, moreDiags tfdiags.Diagnostics + + // If we have a nil module at this point, then set it to an empty tree + // to avoid any potential crashes. + if op.PlanFile == nil && op.PlanMode != plans.DestroyMode && !op.HasConfig() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files", + "Apply requires configuration to be present. Applying without a configuration "+ + "would mark everything for destruction, which is normally not what is desired. "+ + "If you would like to destroy everything, run 'terraform destroy' instead.", + )) + op.ReportResult(runningOp, diags) + return + } + + stateHook := new(StateHook) + op.Hooks = append(op.Hooks, stateHook) + + // Get our context + lr, _, opState, contextDiags := b.localRun(op) + diags = diags.Append(contextDiags) + if contextDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + // the state was locked during successful context creation; unlock the state + // when the operation completes + defer func() { + diags := op.StateLocker.Unlock() + if diags.HasErrors() { + op.View.Diagnostics(diags) + runningOp.Result = backend.OperationFailure + } + }() + + // We'll start off with our result being the input state, and replace it + // with the result state only if we eventually complete the apply + // operation. + runningOp.State = lr.InputState + + schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + + var plan *plans.Plan + // If we weren't given a plan, then we refresh/plan + if op.PlanFile == nil { + // Perform the plan + log.Printf("[INFO] backend/local: apply calling Plan") + plan, moreDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + // If Terraform Core generated a partial plan despite the errors + // then we'll make a best effort to render it. Terraform Core + // promises that if it returns a non-nil plan along with errors + // then the plan won't necessarily contain all of the needed + // actions but that any it does include will be properly-formed. + // plan.Errored will be true in this case, which our plan + // renderer can rely on to tailor its messaging. + if plan != nil && (len(plan.Changes.Resources) != 0 || len(plan.Changes.Outputs) != 0) { + schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) + // If schema loading returns errors then we'll just give up and + // ignore them to avoid distracting from the plan-time errors we're + // mainly trying to report here. + if !moreDiags.HasErrors() { + op.View.Plan(plan, schemas) + } + } + op.ReportResult(runningOp, diags) + return + } + + trivialPlan := !plan.CanApply() + hasUI := op.UIOut != nil && op.UIIn != nil + mustConfirm := hasUI && !op.AutoApprove && !trivialPlan + op.View.Plan(plan, schemas) + + if testHookStopPlanApply != nil { + testHookStopPlanApply() + } + + // Check if we've been stopped before going through confirmation, or + // skipping confirmation in the case of -auto-approve. + // This can currently happen if a single stop request was received + // during the final batch of resource plan calls, so no operations were + // forced to abort, and no errors were returned from Plan. + if stopCtx.Err() != nil { + diags = diags.Append(errors.New("execution halted")) + runningOp.Result = backend.OperationFailure + op.ReportResult(runningOp, diags) + return + } + + if mustConfirm { + var desc, query string + switch op.PlanMode { + case plans.DestroyMode: + if op.Workspace != "default" { + query = "Do you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + } else { + query = "Do you really want to destroy all resources?" + } + desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + case plans.RefreshOnlyMode: + if op.Workspace != "default" { + query = "Would you like to update the Terraform state for \"" + op.Workspace + "\" to reflect these detected changes?" + } else { + query = "Would you like to update the Terraform state to reflect these detected changes?" + } + desc = "Terraform will write these changes to the state without modifying any real infrastructure.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + default: + if op.Workspace != "default" { + query = "Do you want to perform these actions in workspace \"" + op.Workspace + "\"?" + } else { + query = "Do you want to perform these actions?" + } + desc = "Terraform will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + // We'll show any accumulated warnings before we display the prompt, + // so the user can consider them when deciding how to answer. + if len(diags) > 0 { + op.View.Diagnostics(diags) + diags = nil // reset so we won't show the same diagnostics again later + } + + v, err := op.UIIn.Input(stopCtx, &terraform.InputOpts{ + Id: "approve", + Query: "\n" + query, + Description: desc, + }) + if err != nil { + diags = diags.Append(fmt.Errorf("error asking for approval: %w", err)) + op.ReportResult(runningOp, diags) + return + } + if v != "yes" { + op.View.Cancelled(op.PlanMode) + runningOp.Result = backend.OperationFailure + return + } + } + } else { + plan = lr.Plan + if plan.Errored { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot apply incomplete plan", + "Terraform encountered an error when generating this plan, so it cannot be applied.", + )) + op.ReportResult(runningOp, diags) + return + } + for _, change := range plan.Changes.Resources { + if change.Action != plans.NoOp { + op.View.PlannedChange(change) + } + } + } + + // Set up our hook for continuous state updates + stateHook.StateMgr = opState + + // Start the apply in a goroutine so that we can be interrupted. + var applyState *states.State + var applyDiags tfdiags.Diagnostics + doneCh := make(chan struct{}) + go func() { + defer logging.PanicHandler() + defer close(doneCh) + log.Printf("[INFO] backend/local: apply calling Apply") + applyState, applyDiags = lr.Core.Apply(plan, lr.Config) + }() + + if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) { + return + } + diags = diags.Append(applyDiags) + + // Even on error with an empty state, the state value should not be nil. + // Return early here to prevent corrupting any existing state. + if diags.HasErrors() && applyState == nil { + log.Printf("[ERROR] backend/local: apply returned nil state") + op.ReportResult(runningOp, diags) + return + } + + // Store the final state + runningOp.State = applyState + err := statemgr.WriteAndPersist(opState, applyState, schemas) + if err != nil { + // Export the state file from the state manager and assign the new + // state. This is needed to preserve the existing serial and lineage. + stateFile := statemgr.Export(opState) + if stateFile == nil { + stateFile = &statefile.File{} + } + stateFile.State = applyState + + diags = diags.Append(b.backupStateForError(stateFile, err, op.View)) + op.ReportResult(runningOp, diags) + return + } + + if applyDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + + // If we've accumulated any warnings along the way then we'll show them + // here just before we show the summary and next steps. If we encountered + // errors then we would've returned early at some other point above. + op.View.Diagnostics(diags) +} + +// backupStateForError is called in a scenario where we're unable to persist the +// state for some reason, and will attempt to save a backup copy of the state +// to local disk to help the user recover. This is a "last ditch effort" sort +// of thing, so we really don't want to end up in this codepath; we should do +// everything we possibly can to get the state saved _somewhere_. +func (b *Local) backupStateForError(stateFile *statefile.File, err error, view views.Operation) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to save state", + fmt.Sprintf("Error saving state: %s", err), + )) + + local := statemgr.NewFilesystem("errored.tfstate") + writeErr := local.WriteStateForMigration(stateFile, true) + if writeErr != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create local state file", + fmt.Sprintf("Error creating local state file for recovery: %s", writeErr), + )) + + // To avoid leaving the user with no state at all, our last resort + // is to print the JSON state out onto the terminal. This is an awful + // UX, so we should definitely avoid doing this if at all possible, + // but at least the user has _some_ path to recover if we end up + // here for some reason. + if dumpErr := view.EmergencyDumpState(stateFile); dumpErr != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize state", + fmt.Sprintf(stateWriteFatalErrorFmt, dumpErr), + )) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to persist state to backend", + stateWriteConsoleFallbackError, + )) + return diags + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to persist state to backend", + stateWriteBackedUpError, + )) + + return diags +} + +const stateWriteBackedUpError = `The error shown above has prevented Terraform from writing the updated state to the configured backend. To allow for recovery, the state has been written to the file "errored.tfstate" in the current working directory. + +Running "terraform apply" again at this point will create a forked state, making it harder to recover. + +To retry writing this state, use the following command: + terraform state push errored.tfstate +` + +const stateWriteConsoleFallbackError = `The errors shown above prevented Terraform from writing the updated state to +the configured backend and from creating a local backup file. As a fallback, +the raw state data is printed above as a JSON object. + +To retry writing this state, copy the state data (from the first { to the last } inclusive) and save it into a local file called errored.tfstate, then run the following command: + terraform state push errored.tfstate +` + +const stateWriteFatalErrorFmt = `Failed to save state after apply. + +Error serializing state: %s + +A catastrophic error has prevented Terraform from persisting the state file or creating a backup. Unfortunately this means that the record of any resources created during this apply has been lost, and such resources may exist outside of Terraform's management. + +For resources that support import, it is possible to recover by manually importing each resource using its id from the target system. + +This is a serious bug in Terraform and should be reported. +` diff --git a/backend/local/backend_apply_test.go b/backend/local/backend_apply_test.go new file mode 100644 index 000000000000..6d54a76b1bb3 --- /dev/null +++ b/backend/local/backend_apply_test.go @@ -0,0 +1,386 @@ +package local + +import ( + "context" + "errors" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestLocal_applyBasic(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", applyFixtureSchema()) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + "ami": cty.StringVal("bar"), + })} + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("operation failed") + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + if !p.PlanResourceChangeCalled { + t.Fatal("diff should be called") + } + + if !p.ApplyResourceChangeCalled { + t.Fatal("apply should be called") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] + ami = bar +`) + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_applyEmptyDir(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})} + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("operation succeeded; want error") + } + + if p.ApplyResourceChangeCalled { + t.Fatal("apply should not be called") + } + + if _, err := os.Stat(b.StateOutPath); err == nil { + t.Fatal("should not exist") + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) + + if got, want := done(t).Stderr(), "Error: No configuration files"; !strings.Contains(got, want) { + t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) + } +} + +func TestLocal_applyEmptyDirDestroy(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{} + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + op.PlanMode = plans.DestroyMode + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("apply operation failed") + } + + if p.ApplyResourceChangeCalled { + t.Fatal("apply should not be called") + } + + checkState(t, b.StateOutPath, ``) + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_applyError(t *testing.T) { + b := TestLocal(t) + + schema := &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + } + p := TestLocalProvider(t, b, "test", schema) + + var lock sync.Mutex + errored := false + p.ApplyResourceChangeFn = func( + r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + + lock.Lock() + defer lock.Unlock() + var diags tfdiags.Diagnostics + + ami := r.Config.GetAttr("ami").AsString() + if !errored && ami == "error" { + errored = true + diags = diags.Append(errors.New("ami error")) + return providers.ApplyResourceChangeResponse{ + Diagnostics: diags, + } + } + return providers.ApplyResourceChangeResponse{ + Diagnostics: diags, + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + "ami": cty.StringVal("bar"), + }), + } + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-error") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("operation succeeded; want failure") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = foo + provider = provider["registry.terraform.io/hashicorp/test"] + ami = bar + `) + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) + + if got, want := done(t).Stderr(), "Error: ami error"; !strings.Contains(got, want) { + t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) + } +} + +func TestLocal_applyBackendFail(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", applyFixtureSchema()) + + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + "ami": cty.StringVal("bar"), + }), + Diagnostics: tfdiags.Diagnostics.Append(nil, errors.New("error before backend failure")), + } + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get current working directory") + } + err = os.Chdir(filepath.Dir(b.StatePath)) + if err != nil { + t.Fatalf("failed to set temporary working directory") + } + defer os.Chdir(wd) + + op, configCleanup, done := testOperationApply(t, wd+"/testdata/apply") + defer configCleanup() + + b.Backend = &backendWithFailingState{} + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + output := done(t) + + if run.Result == backend.OperationSuccess { + t.Fatalf("apply succeeded; want error") + } + + diagErr := output.Stderr() + + if !strings.Contains(diagErr, "Error saving state: fake failure") { + t.Fatalf("missing \"fake failure\" message in diags:\n%s", diagErr) + } + + if !strings.Contains(diagErr, "error before backend failure") { + t.Fatalf("missing 'error before backend failure' diagnostic from apply") + } + + // The fallback behavior should've created a file errored.tfstate in the + // current working directory. + checkState(t, "errored.tfstate", ` +test_instance.foo: (tainted) + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] + ami = bar + `) + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) +} + +func TestLocal_applyRefreshFalse(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + op, configCleanup, done := testOperationApply(t, "./testdata/plan") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +type backendWithFailingState struct { + Local +} + +func (b *backendWithFailingState) StateMgr(name string) (statemgr.Full, error) { + return &failingState{ + statemgr.NewFilesystem("failing-state.tfstate"), + }, nil +} + +type failingState struct { + *statemgr.Filesystem +} + +func (s failingState) WriteState(state *states.State) error { + return errors.New("fake failure") +} + +func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + + // Many of our tests use an overridden "test" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/test")) + + return &backend.Operation{ + Type: backend.OperationTypeApply, + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewNoopLocker(), + View: view, + DependencyLocks: depLocks, + }, configCleanup, done +} + +// applyFixtureSchema returns a schema suitable for processing the +// configuration in testdata/apply . This schema should be +// assigned to a mock provider named "test". +func applyFixtureSchema() *terraform.ProviderSchema { + return &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + } +} + +func TestApply_applyCanceledAutoApprove(t *testing.T) { + b := TestLocal(t) + + TestLocalProvider(t, b, "test", applyFixtureSchema()) + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + op.AutoApprove = true + defer configCleanup() + defer func() { + output := done(t) + if !strings.Contains(output.Stderr(), "execution halted") { + t.Fatal("expected 'execution halted', got:\n", output.All()) + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + testHookStopPlanApply = cancel + defer func() { + testHookStopPlanApply = nil + }() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + +} diff --git a/internal/backend/local/backend_local.go b/backend/local/backend_local.go similarity index 97% rename from internal/backend/local/backend_local.go rename to backend/local/backend_local.go index 84646ab48297..0c71b31320ce 100644 --- a/internal/backend/local/backend_local.go +++ b/backend/local/backend_local.go @@ -7,13 +7,13 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/backend/local/backend_local_test.go b/backend/local/backend_local_test.go similarity index 88% rename from internal/backend/local/backend_local_test.go rename to backend/local/backend_local_test.go index 827c05da9ea3..ffd540cc30d7 100644 --- a/internal/backend/local/backend_local_test.go +++ b/backend/local/backend_local_test.go @@ -8,21 +8,21 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) func TestLocalRun(t *testing.T) { diff --git a/backend/local/backend_plan.go b/backend/local/backend_plan.go new file mode 100644 index 000000000000..48605cfb12c0 --- /dev/null +++ b/backend/local/backend_plan.go @@ -0,0 +1,183 @@ +package local + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Local) opPlan( + stopCtx context.Context, + cancelCtx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + + log.Printf("[INFO] backend/local: starting Plan operation") + + var diags tfdiags.Diagnostics + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't re-plan a saved plan", + "The plan command was given a saved plan file as its input. This command generates "+ + "a new plan, and so it requires a configuration directory as its argument.", + )) + op.ReportResult(runningOp, diags) + return + } + + // Local planning requires a config, unless we're planning to destroy. + if op.PlanMode != plans.DestroyMode && !op.HasConfig() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files", + "Plan requires configuration to be present. Planning without a configuration would "+ + "mark everything for destruction, which is normally not what is desired. If you "+ + "would like to destroy everything, run plan with the -destroy option. Otherwise, "+ + "create a Terraform configuration file (.tf file) and try again.", + )) + op.ReportResult(runningOp, diags) + return + } + + if b.ContextOpts == nil { + b.ContextOpts = new(terraform.ContextOpts) + } + + // Get our context + lr, configSnap, opState, ctxDiags := b.localRun(op) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + // the state was locked during succesfull context creation; unlock the state + // when the operation completes + defer func() { + diags := op.StateLocker.Unlock() + if diags.HasErrors() { + op.View.Diagnostics(diags) + runningOp.Result = backend.OperationFailure + } + }() + + // Since planning doesn't immediately change the persisted state, the + // resulting state is always just the input state. + runningOp.State = lr.InputState + + // Perform the plan in a goroutine so we can be interrupted + var plan *plans.Plan + var planDiags tfdiags.Diagnostics + doneCh := make(chan struct{}) + go func() { + defer logging.PanicHandler() + defer close(doneCh) + log.Printf("[INFO] backend/local: plan calling Plan") + plan, planDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts) + }() + + if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) { + // If we get in here then the operation was cancelled, which is always + // considered to be a failure. + log.Printf("[INFO] backend/local: plan operation was force-cancelled by interrupt") + runningOp.Result = backend.OperationFailure + return + } + log.Printf("[INFO] backend/local: plan operation completed") + + // NOTE: We intentionally don't stop here on errors because we always want + // to try to present a partial plan report and, if the user chose to, + // generate a partial saved plan file for external analysis. + diags = diags.Append(planDiags) + + // Even if there are errors we need to handle anything that may be + // contained within the plan, so only exit if there is no data at all. + if plan == nil { + runningOp.PlanEmpty = true + op.ReportResult(runningOp, diags) + return + } + + // Record whether this plan includes any side-effects that could be applied. + runningOp.PlanEmpty = !plan.CanApply() + + // Save the plan to disk + if path := op.PlanOutPath; path != "" { + if op.PlanOutBackend == nil { + // This is always a bug in the operation caller; it's not valid + // to set PlanOutPath without also setting PlanOutBackend. + diags = diags.Append(fmt.Errorf( + "PlanOutPath set without also setting PlanOutBackend (this is a bug in Terraform)"), + ) + op.ReportResult(runningOp, diags) + return + } + plan.Backend = *op.PlanOutBackend + + // We may have updated the state in the refresh step above, but we + // will freeze that updated state in the plan file for now and + // only write it if this plan is subsequently applied. + plannedStateFile := statemgr.PlannedStateUpdate(opState, plan.PriorState) + + // We also include a file containing the state as it existed before + // we took any action at all, but this one isn't intended to ever + // be saved to the backend (an equivalent snapshot should already be + // there) and so we just use a stub state file header in this case. + // NOTE: This won't be exactly identical to the latest state snapshot + // in the backend because it's still been subject to state upgrading + // to make it consumable by the current Terraform version, and + // intentionally doesn't preserve the header info. + prevStateFile := &statefile.File{ + State: plan.PrevRunState, + } + + log.Printf("[INFO] backend/local: writing plan output to: %s", path) + err := planfile.Create(path, planfile.CreateArgs{ + ConfigSnapshot: configSnap, + PreviousRunStateFile: prevStateFile, + StateFile: plannedStateFile, + Plan: plan, + DependencyLocks: op.DependencyLocks, + }) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write plan file", + fmt.Sprintf("The plan file could not be written: %s.", err), + )) + op.ReportResult(runningOp, diags) + return + } + } + + // Render the plan, if we produced one. + // (This might potentially be a partial plan with Errored set to true) + schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + op.View.Plan(plan, schemas) + + // If we've accumulated any diagnostics along the way then we'll show them + // here just before we show the summary and next steps. This can potentially + // include errors, because we intentionally try to show a partial plan + // above even if Terraform Core encountered an error partway through + // creating it. + op.ReportResult(runningOp, diags) + + if !runningOp.PlanEmpty { + op.View.PlanNextStep(op.PlanOutPath) + } +} diff --git a/backend/local/backend_plan_test.go b/backend/local/backend_plan_test.go new file mode 100644 index 000000000000..f693c1a310c0 --- /dev/null +++ b/backend/local/backend_plan_test.go @@ -0,0 +1,906 @@ +package local + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" +) + +func TestLocal_planBasic(t *testing.T) { + b := TestLocal(t) + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if !p.PlanResourceChangeCalled { + t.Fatal("PlanResourceChange should be called") + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_planInAutomation(t *testing.T) { + b := TestLocal(t) + TestLocalProvider(t, b, "test", planFixtureSchema()) + + const msg = `You didn't use the -out option` + + // When we're "in automation" we omit certain text from the plan output. + // However, the responsibility for this omission is in the view, so here we + // test for its presence while the "in automation" setting is false, to + // validate that we are calling the correct view method. + // + // Ideally this test would be replaced by a call-logging mock view, but + // that's future work. + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if output := done(t).Stdout(); !strings.Contains(output, msg) { + t.Fatalf("missing next-steps message when not in automation\nwant: %s\noutput:\n%s", msg, output) + } +} + +func TestLocal_planNoConfig(t *testing.T) { + b := TestLocal(t) + TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + op.PlanRefresh = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + output := done(t) + + if run.Result == backend.OperationSuccess { + t.Fatal("plan operation succeeded; want failure") + } + + if stderr := output.Stderr(); !strings.Contains(stderr, "No configuration files") { + t.Fatalf("bad: %s", stderr) + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) +} + +// This test validates the state lacking behavior when the inner call to +// Context() fails +func TestLocal_plan_context_error(t *testing.T) { + b := TestLocal(t) + + // This is an intentionally-invalid value to make terraform.NewContext fail + // when b.Operation calls it. + // NOTE: This test was originally using a provider initialization failure + // as its forced error condition, but terraform.NewContext is no longer + // responsible for checking that. Invalid parallelism is the last situation + // where terraform.NewContext can return error diagnostics, and arguably + // we should be validating this argument at the UI layer anyway, so perhaps + // in future we'll make terraform.NewContext never return errors and then + // this test will become redundant, because its purpose is specifically + // to test that we properly unlock the state if terraform.NewContext + // returns an error. + if b.ContextOpts == nil { + b.ContextOpts = &terraform.ContextOpts{} + } + b.ContextOpts.Parallelism = -1 + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + // we coerce a failure in Context() by omitting the provider schema + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationFailure { + t.Fatalf("plan operation succeeded") + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) + + if got, want := done(t).Stderr(), "Error: Invalid parallelism value"; !strings.Contains(got, want) { + t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) + } +} + +func TestLocal_planOutputsChanged(t *testing.T) { + b := TestLocal(t) + testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "changed"}, + }, cty.StringVal("before"), false) + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "sensitive_before"}, + }, cty.StringVal("before"), true) + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "sensitive_after"}, + }, cty.StringVal("before"), false) + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "removed"}, // not present in the config fixture + }, cty.StringVal("before"), false) + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "unchanged"}, + }, cty.StringVal("before"), false) + // NOTE: This isn't currently testing the situation where the new + // value of an output is unknown, because to do that requires there to + // be at least one managed resource Create action in the plan and that + // would defeat the point of this test, which is to ensure that a + // plan containing only output changes is considered "non-empty". + // For now we're not too worried about testing the "new value is + // unknown" situation because that's already common for printing out + // resource changes and we already have many tests for that. + })) + outDir := t.TempDir() + defer os.RemoveAll(outDir) + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-outputs-changed") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if run.PlanEmpty { + t.Error("plan should not be empty") + } + + expectedOutput := strings.TrimSpace(` +Changes to Outputs: + + added = "after" + ~ changed = "before" -> "after" + - removed = "before" -> null + ~ sensitive_after = (sensitive value) + ~ sensitive_before = (sensitive value) + +You can apply this plan to save these new output values to the Terraform +state, without changing any real infrastructure. +`) + + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Errorf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput) + } +} + +// Module outputs should not cause the plan to be rendered +func TestLocal_planModuleOutputsChanged(t *testing.T) { + b := TestLocal(t) + testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("mod", addrs.NoKey), + OutputValue: addrs.OutputValue{Name: "changed"}, + }, cty.StringVal("before"), false) + })) + outDir := t.TempDir() + defer os.RemoveAll(outDir) + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-module-outputs-changed") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !run.PlanEmpty { + t.Fatal("plan should be empty") + } + + expectedOutput := strings.TrimSpace(` +No changes. Your infrastructure matches the configuration. +`) + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput) + } +} + +func TestLocal_planTainted(t *testing.T) { + b := TestLocal(t) + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState_tainted()) + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + expectedOutput := `Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: +-/+ destroy and then create replacement + +Terraform will perform the following actions: + + # test_instance.foo is tainted, so must be replaced +-/+ resource "test_instance" "foo" { + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + } + +Plan: 1 to add, 0 to change, 1 to destroy.` + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output\ngot\n%s\n\nwant:\n%s", output, expectedOutput) + } +} + +func TestLocal_planDeposedOnly(t *testing.T) { + b := TestLocal(t) + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { + ss.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + states.DeposedKey("00000000"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "ami": "bar", + "network_interface": [{ + "device_index": 0, + "description": "Main network interface" + }] + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + })) + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !p.ReadResourceCalled { + t.Fatal("ReadResource should've been called to refresh the deposed object") + } + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + // The deposed object and the current object are distinct, so our + // plan includes separate actions for each of them. This strange situation + // is not common: it should arise only if Terraform fails during + // a create-before-destroy when the create hasn't completed yet but + // in a severe way that prevents the previous object from being restored + // as "current". + // + // However, that situation was more common in some earlier Terraform + // versions where deposed objects were not managed properly, so this + // can arise when upgrading from an older version with deposed objects + // already in the state. + // + // This is one of the few cases where we expose the idea of "deposed" in + // the UI, including the user-unfriendly "deposed key" (00000000 in this + // case) just so that users can correlate this with what they might + // see in `terraform show` and in the subsequent apply output, because + // it's also possible for there to be _multiple_ deposed objects, in the + // unlikely event that create_before_destroy _keeps_ crashing across + // subsequent runs. + expectedOutput := `Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + - destroy + +Terraform will perform the following actions: + + # test_instance.foo will be created + + resource "test_instance" "foo" { + + ami = "bar" + + + network_interface { + + description = "Main network interface" + + device_index = 0 + } + } + + # test_instance.foo (deposed object 00000000) will be destroyed + # (left over from a partially-failed replacement of this instance) + - resource "test_instance" "foo" { + - ami = "bar" -> null + + - network_interface { + - description = "Main network interface" -> null + - device_index = 0 -> null + } + } + +Plan: 1 to add, 0 to change, 1 to destroy.` + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output:\n%s", output) + } +} + +func TestLocal_planTainted_createBeforeDestroy(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState_tainted()) + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cbd") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + expectedOutput := `Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: ++/- create replacement and then destroy + +Terraform will perform the following actions: + + # test_instance.foo is tainted, so must be replaced ++/- resource "test_instance" "foo" { + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + } + +Plan: 1 to add, 0 to change, 1 to destroy.` + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output:\n%s", output) + } +} + +func TestLocal_planRefreshFalse(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + if !run.PlanEmpty { + t.Fatal("plan should be empty") + } + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_planDestroy(t *testing.T) { + b := TestLocal(t) + + TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanMode = plans.DestroyMode + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + plan := testReadPlan(t, planPath) + for _, r := range plan.Changes.Resources { + if r.Action.String() != "Delete" { + t.Fatalf("bad: %#v", r.Action.String()) + } + } + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_planDestroy_withDataSources(t *testing.T) { + b := TestLocal(t) + + TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState_withDataSource()) + + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + + op, configCleanup, done := testOperationPlan(t, "./testdata/destroy-with-ds") + defer configCleanup() + op.PlanMode = plans.DestroyMode + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + // Data source should still exist in the the plan file + plan := testReadPlan(t, planPath) + if len(plan.Changes.Resources) != 2 { + t.Fatalf("Expected exactly 1 resource for destruction, %d given: %q", + len(plan.Changes.Resources), getAddrs(plan.Changes.Resources)) + } + + // Data source should not be rendered in the output + expectedOutput := `Terraform will perform the following actions: + + # test_instance.foo[0] will be destroyed + - resource "test_instance" "foo" { + - ami = "bar" -> null + + - network_interface { + - description = "Main network interface" -> null + - device_index = 0 -> null + } + } + +Plan: 0 to add, 0 to change, 1 to destroy.` + + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output:\n%s", output) + } +} + +func getAddrs(resources []*plans.ResourceInstanceChangeSrc) []string { + addrs := make([]string, len(resources)) + for i, r := range resources { + addrs[i] = r.Addr.String() + } + return addrs +} + +func TestLocal_planOutPathNoChange(t *testing.T) { + b := TestLocal(t) + TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + op.PlanRefresh = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + plan := testReadPlan(t, planPath) + + if !plan.Changes.Empty() { + t.Fatalf("expected empty plan to be written") + } + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + + // Many of our tests use an overridden "test" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/test")) + + return &backend.Operation{ + Type: backend.OperationTypePlan, + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewNoopLocker(), + View: view, + DependencyLocks: depLocks, + }, configCleanup, done +} + +// testPlanState is just a common state that we use for testing plan. +func testPlanState() *states.State { + state := states.NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "ami": "bar", + "network_interface": [{ + "device_index": 0, + "description": "Main network interface" + }] + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func testPlanState_withDataSource() *states.State { + state := states.NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "ami": "bar", + "network_interface": [{ + "device_index": 0, + "description": "Main network interface" + }] + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_ds", + Name: "bar", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "filter": "foo" + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func testPlanState_tainted() *states.State { + state := states.NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{ + "ami": "bar", + "network_interface": [{ + "device_index": 0, + "description": "Main network interface" + }] + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func testReadPlan(t *testing.T, path string) *plans.Plan { + t.Helper() + + p, err := planfile.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer p.Close() + + plan, err := p.ReadPlan() + if err != nil { + t.Fatalf("err: %s", err) + } + + return plan +} + +// planFixtureSchema returns a schema suitable for processing the +// configuration in testdata/plan . This schema should be +// assigned to a mock provider named "test". +func planFixtureSchema() *terraform.ProviderSchema { + return &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.Number, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "test_ds": { + Attributes: map[string]*configschema.Attribute{ + "filter": {Type: cty.String, Required: true}, + }, + }, + }, + } +} + +func TestLocal_invalidOptions(t *testing.T) { + b := TestLocal(t) + TestLocalProvider(t, b, "test", planFixtureSchema()) + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + op.PlanMode = plans.RefreshOnlyMode + op.ForceReplace = []addrs.AbsResourceInstance{mustResourceInstanceAddr("test_instance.foo")} + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if errOutput := done(t).Stderr(); errOutput == "" { + t.Fatal("expected error output") + } +} diff --git a/internal/backend/local/backend_refresh.go b/backend/local/backend_refresh.go similarity index 91% rename from internal/backend/local/backend_refresh.go rename to backend/local/backend_refresh.go index 244e8e89bb6c..84fa5130feb7 100644 --- a/internal/backend/local/backend_refresh.go +++ b/backend/local/backend_refresh.go @@ -6,11 +6,11 @@ import ( "log" "os" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" ) func (b *Local) opRefresh( diff --git a/backend/local/backend_refresh_test.go b/backend/local/backend_refresh_test.go new file mode 100644 index 000000000000..78511df2c21a --- /dev/null +++ b/backend/local/backend_refresh_test.go @@ -0,0 +1,308 @@ +package local + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + + "github.com/zclconf/go-cty/cty" +) + +func TestLocal_refresh(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) + testStateFile(t, b.StatePath, testRefreshState()) + + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") + defer configCleanup() + defer done(t) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] + `) + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) +} + +func TestLocal_refreshInput(t *testing.T) { + b := TestLocal(t) + + schema := &terraform.ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + } + + p := TestLocalProvider(t, b, "test", schema) + testStateFile(t, b.StatePath, testRefreshState()) + + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() || val.AsString() != "bar" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("incorrect value %#v", val)) + } + + return + } + + // Enable input asking since it is normally disabled by default + b.OpInput = true + b.ContextOpts.UIInput = &terraform.MockUIInput{InputReturnString: "bar"} + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh-var-unset") + defer configCleanup() + defer done(t) + op.UIIn = b.ContextOpts.UIInput + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] + `) +} + +func TestLocal_refreshValidate(t *testing.T) { + b := TestLocal(t) + p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) + testStateFile(t, b.StatePath, testRefreshState()) + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + // Enable validation + b.OpValidation = true + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") + defer configCleanup() + defer done(t) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] + `) +} + +func TestLocal_refreshValidateProviderConfigured(t *testing.T) { + b := TestLocal(t) + + schema := &terraform.ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + } + + p := TestLocalProvider(t, b, "test", schema) + testStateFile(t, b.StatePath, testRefreshState()) + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + // Enable validation + b.OpValidation = true + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh-provider-config") + defer configCleanup() + defer done(t) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + if !p.ValidateProviderConfigCalled { + t.Fatal("Validate provider config should be called") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] + `) +} + +// This test validates the state lacking behavior when the inner call to +// Context() fails +func TestLocal_refresh_context_error(t *testing.T) { + b := TestLocal(t) + testStateFile(t, b.StatePath, testRefreshState()) + op, configCleanup, done := testOperationRefresh(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + // we coerce a failure in Context() by omitting the provider schema + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("operation succeeded; want failure") + } + assertBackendStateUnlocked(t, b) +} + +func TestLocal_refreshEmptyState(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) + testStateFile(t, b.StatePath, states.NewState()) + + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + output := done(t) + + if stderr := output.Stderr(); stderr != "" { + t.Fatalf("expected only warning diags, got errors: %s", stderr) + } + if got, want := output.Stdout(), "Warning: Empty or non-existent state"; !strings.Contains(got, want) { + t.Errorf("wrong diags\n got: %s\nwant: %s", got, want) + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) +} + +func testOperationRefresh(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + + // Many of our tests use an overridden "test" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/test")) + + return &backend.Operation{ + Type: backend.OperationTypeRefresh, + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewNoopLocker(), + View: view, + DependencyLocks: depLocks, + }, configCleanup, done +} + +// testRefreshState is just a common state that we use for testing refresh. +func testRefreshState() *states.State { + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), + ) + return state +} + +// refreshFixtureSchema returns a schema suitable for processing the +// configuration in testdata/refresh . This schema should be +// assigned to a mock provider named "test". +func refreshFixtureSchema() *terraform.ProviderSchema { + return &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + } +} diff --git a/backend/local/backend_test.go b/backend/local/backend_test.go new file mode 100644 index 000000000000..cd548c99cd8d --- /dev/null +++ b/backend/local/backend_test.go @@ -0,0 +1,246 @@ +package local + +import ( + "errors" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" +) + +func TestLocal_impl(t *testing.T) { + var _ backend.Enhanced = New() + var _ backend.Local = New() + var _ backend.CLI = New() +} + +func TestLocal_backend(t *testing.T) { + testTmpDir(t) + b := New() + backend.TestBackendStates(t, b) + backend.TestBackendStateLocks(t, b, b) +} + +func checkState(t *testing.T, path, expected string) { + t.Helper() + // Read the state + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + + state, err := statefile.Read(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := state.State.String() + expected = strings.TrimSpace(expected) + if actual != expected { + t.Fatalf("state does not match! actual:\n%s\n\nexpected:\n%s", actual, expected) + } +} + +func TestLocal_StatePaths(t *testing.T) { + b := New() + + // Test the defaults + path, out, back := b.StatePaths("") + + if path != DefaultStateFilename { + t.Fatalf("expected %q, got %q", DefaultStateFilename, path) + } + + if out != DefaultStateFilename { + t.Fatalf("expected %q, got %q", DefaultStateFilename, out) + } + + dfltBackup := DefaultStateFilename + DefaultBackupExtension + if back != dfltBackup { + t.Fatalf("expected %q, got %q", dfltBackup, back) + } + + // check with env + testEnv := "test_env" + path, out, back = b.StatePaths(testEnv) + + expectedPath := filepath.Join(DefaultWorkspaceDir, testEnv, DefaultStateFilename) + expectedOut := expectedPath + expectedBackup := expectedPath + DefaultBackupExtension + + if path != expectedPath { + t.Fatalf("expected %q, got %q", expectedPath, path) + } + + if out != expectedOut { + t.Fatalf("expected %q, got %q", expectedOut, out) + } + + if back != expectedBackup { + t.Fatalf("expected %q, got %q", expectedBackup, back) + } + +} + +func TestLocal_addAndRemoveStates(t *testing.T) { + testTmpDir(t) + dflt := backend.DefaultStateName + expectedStates := []string{dflt} + + b := New() + states, err := b.Workspaces() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected []string{%q}, got %q", dflt, states) + } + + expectedA := "test_A" + if _, err := b.StateMgr(expectedA); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedStates = append(expectedStates, expectedA) + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected %q, got %q", expectedStates, states) + } + + expectedB := "test_B" + if _, err := b.StateMgr(expectedB); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedStates = append(expectedStates, expectedB) + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected %q, got %q", expectedStates, states) + } + + if err := b.DeleteWorkspace(expectedA, true); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedStates = []string{dflt, expectedB} + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected %q, got %q", expectedStates, states) + } + + if err := b.DeleteWorkspace(expectedB, true); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedStates = []string{dflt} + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected %q, got %q", expectedStates, states) + } + + if err := b.DeleteWorkspace(dflt, true); err == nil { + t.Fatal("expected error deleting default state") + } +} + +// a local backend which returns sentinel errors for NamedState methods to +// verify it's being called. +type testDelegateBackend struct { + *Local + + // return a sentinel error on these calls + stateErr bool + statesErr bool + deleteErr bool +} + +var errTestDelegateState = errors.New("state called") +var errTestDelegateStates = errors.New("states called") +var errTestDelegateDeleteState = errors.New("delete called") + +func (b *testDelegateBackend) StateMgr(name string) (statemgr.Full, error) { + if b.stateErr { + return nil, errTestDelegateState + } + s := statemgr.NewFilesystem("terraform.tfstate") + return s, nil +} + +func (b *testDelegateBackend) Workspaces() ([]string, error) { + if b.statesErr { + return nil, errTestDelegateStates + } + return []string{"default"}, nil +} + +func (b *testDelegateBackend) DeleteWorkspace(name string, force bool) error { + if b.deleteErr { + return errTestDelegateDeleteState + } + return nil +} + +// verify that the MultiState methods are dispatched to the correct Backend. +func TestLocal_multiStateBackend(t *testing.T) { + // assign a separate backend where we can read the state + b := NewWithBackend(&testDelegateBackend{ + stateErr: true, + statesErr: true, + deleteErr: true, + }) + + if _, err := b.StateMgr("test"); err != errTestDelegateState { + t.Fatal("expected errTestDelegateState, got:", err) + } + + if _, err := b.Workspaces(); err != errTestDelegateStates { + t.Fatal("expected errTestDelegateStates, got:", err) + } + + if err := b.DeleteWorkspace("test", true); err != errTestDelegateDeleteState { + t.Fatal("expected errTestDelegateDeleteState, got:", err) + } +} + +// testTmpDir changes into a tmp dir and change back automatically when the test +// and all its subtests complete. +func testTmpDir(t *testing.T) { + tmp := t.TempDir() + + old, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + if err := os.Chdir(tmp); err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + // ignore errors and try to clean up + os.Chdir(old) + }) +} diff --git a/backend/local/cli.go b/backend/local/cli.go new file mode 100644 index 000000000000..9963cbee6b39 --- /dev/null +++ b/backend/local/cli.go @@ -0,0 +1,32 @@ +package local + +import ( + "log" + + "github.com/hashicorp/terraform/backend" +) + +// backend.CLI impl. +func (b *Local) CLIInit(opts *backend.CLIOpts) error { + b.ContextOpts = opts.ContextOpts + b.OpInput = opts.Input + b.OpValidation = opts.Validation + + // configure any new cli options + if opts.StatePath != "" { + log.Printf("[TRACE] backend/local: CLI option -state is overriding state path to %s", opts.StatePath) + b.OverrideStatePath = opts.StatePath + } + + if opts.StateOutPath != "" { + log.Printf("[TRACE] backend/local: CLI option -state-out is overriding state output path to %s", opts.StateOutPath) + b.OverrideStateOutPath = opts.StateOutPath + } + + if opts.StateBackupPath != "" { + log.Printf("[TRACE] backend/local: CLI option -backup is overriding state backup path to %s", opts.StateBackupPath) + b.OverrideStateBackupPath = opts.StateBackupPath + } + + return nil +} diff --git a/internal/backend/local/hook_state.go b/backend/local/hook_state.go similarity index 77% rename from internal/backend/local/hook_state.go rename to backend/local/hook_state.go index 4c11496c2502..c54f22371fa0 100644 --- a/internal/backend/local/hook_state.go +++ b/backend/local/hook_state.go @@ -3,9 +3,9 @@ package local import ( "sync" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" ) // StateHook is a hook that continuously updates the state by calling diff --git a/internal/backend/local/hook_state_test.go b/backend/local/hook_state_test.go similarity index 82% rename from internal/backend/local/hook_state_test.go rename to backend/local/hook_state_test.go index 6e86ac728f2a..715f7d27a472 100644 --- a/internal/backend/local/hook_state_test.go +++ b/backend/local/hook_state_test.go @@ -3,8 +3,8 @@ package local import ( "testing" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" ) func TestStateHook_impl(t *testing.T) { diff --git a/backend/local/local_test.go b/backend/local/local_test.go new file mode 100644 index 000000000000..edaedf22c046 --- /dev/null +++ b/backend/local/local_test.go @@ -0,0 +1,14 @@ +package local + +import ( + "flag" + "os" + "testing" + + _ "github.com/hashicorp/terraform/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} diff --git a/internal/backend/local/testdata/apply-empty/hello.txt b/backend/local/testdata/apply-empty/hello.txt similarity index 100% rename from internal/backend/local/testdata/apply-empty/hello.txt rename to backend/local/testdata/apply-empty/hello.txt diff --git a/internal/backend/local/testdata/apply-error/main.tf b/backend/local/testdata/apply-error/main.tf similarity index 100% rename from internal/backend/local/testdata/apply-error/main.tf rename to backend/local/testdata/apply-error/main.tf diff --git a/internal/backend/local/testdata/apply/main.tf b/backend/local/testdata/apply/main.tf similarity index 100% rename from internal/backend/local/testdata/apply/main.tf rename to backend/local/testdata/apply/main.tf diff --git a/internal/backend/local/testdata/destroy-with-ds/main.tf b/backend/local/testdata/destroy-with-ds/main.tf similarity index 100% rename from internal/backend/local/testdata/destroy-with-ds/main.tf rename to backend/local/testdata/destroy-with-ds/main.tf diff --git a/internal/backend/local/testdata/empty/.gitignore b/backend/local/testdata/empty/.gitignore similarity index 100% rename from internal/backend/local/testdata/empty/.gitignore rename to backend/local/testdata/empty/.gitignore diff --git a/internal/backend/local/testdata/invalid/invalid.tf b/backend/local/testdata/invalid/invalid.tf similarity index 100% rename from internal/backend/local/testdata/invalid/invalid.tf rename to backend/local/testdata/invalid/invalid.tf diff --git a/internal/backend/local/testdata/plan-cbd/main.tf b/backend/local/testdata/plan-cbd/main.tf similarity index 100% rename from internal/backend/local/testdata/plan-cbd/main.tf rename to backend/local/testdata/plan-cbd/main.tf diff --git a/internal/backend/local/testdata/plan-module-outputs-changed/main.tf b/backend/local/testdata/plan-module-outputs-changed/main.tf similarity index 100% rename from internal/backend/local/testdata/plan-module-outputs-changed/main.tf rename to backend/local/testdata/plan-module-outputs-changed/main.tf diff --git a/internal/backend/local/testdata/plan-module-outputs-changed/mod/main.tf b/backend/local/testdata/plan-module-outputs-changed/mod/main.tf similarity index 100% rename from internal/backend/local/testdata/plan-module-outputs-changed/mod/main.tf rename to backend/local/testdata/plan-module-outputs-changed/mod/main.tf diff --git a/internal/backend/local/testdata/plan-outputs-changed/main.tf b/backend/local/testdata/plan-outputs-changed/main.tf similarity index 100% rename from internal/backend/local/testdata/plan-outputs-changed/main.tf rename to backend/local/testdata/plan-outputs-changed/main.tf diff --git a/internal/backend/local/testdata/plan-outputs-changed/submodule/main.tf b/backend/local/testdata/plan-outputs-changed/submodule/main.tf similarity index 100% rename from internal/backend/local/testdata/plan-outputs-changed/submodule/main.tf rename to backend/local/testdata/plan-outputs-changed/submodule/main.tf diff --git a/internal/backend/local/testdata/plan/main.tf b/backend/local/testdata/plan/main.tf similarity index 100% rename from internal/backend/local/testdata/plan/main.tf rename to backend/local/testdata/plan/main.tf diff --git a/internal/backend/local/testdata/refresh-provider-config/main.tf b/backend/local/testdata/refresh-provider-config/main.tf similarity index 100% rename from internal/backend/local/testdata/refresh-provider-config/main.tf rename to backend/local/testdata/refresh-provider-config/main.tf diff --git a/internal/backend/local/testdata/refresh-var-unset/main.tf b/backend/local/testdata/refresh-var-unset/main.tf similarity index 100% rename from internal/backend/local/testdata/refresh-var-unset/main.tf rename to backend/local/testdata/refresh-var-unset/main.tf diff --git a/internal/backend/local/testdata/refresh/main.tf b/backend/local/testdata/refresh/main.tf similarity index 100% rename from internal/backend/local/testdata/refresh/main.tf rename to backend/local/testdata/refresh/main.tf diff --git a/backend/local/testing.go b/backend/local/testing.go new file mode 100644 index 000000000000..35a805324cab --- /dev/null +++ b/backend/local/testing.go @@ -0,0 +1,239 @@ +package local + +import ( + "path/filepath" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" +) + +// TestLocal returns a configured Local struct with temporary paths and +// in-memory ContextOpts. +// +// No operations will be called on the returned value, so you can still set +// public fields without any locks. +func TestLocal(t *testing.T) *Local { + t.Helper() + tempDir, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + + local := New() + local.StatePath = filepath.Join(tempDir, "state.tfstate") + local.StateOutPath = filepath.Join(tempDir, "state.tfstate") + local.StateBackupPath = filepath.Join(tempDir, "state.tfstate.bak") + local.StateWorkspaceDir = filepath.Join(tempDir, "state.tfstate.d") + local.ContextOpts = &terraform.ContextOpts{} + + return local +} + +// TestLocalProvider modifies the ContextOpts of the *Local parameter to +// have a provider with the given name. +func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.ProviderSchema) *terraform.MockProvider { + // Build a mock resource provider for in-memory operations + p := new(terraform.MockProvider) + + if schema == nil { + schema = &terraform.ProviderSchema{} // default schema is empty + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: schema.Provider}, + ProviderMeta: providers.Schema{Block: schema.ProviderMeta}, + ResourceTypes: map[string]providers.Schema{}, + DataSources: map[string]providers.Schema{}, + } + for name, res := range schema.ResourceTypes { + p.GetProviderSchemaResponse.ResourceTypes[name] = providers.Schema{ + Block: res, + Version: int64(schema.ResourceTypeSchemaVersions[name]), + } + } + for name, dat := range schema.DataSources { + p.GetProviderSchemaResponse.DataSources[name] = providers.Schema{Block: dat} + } + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // this is a destroy plan, + if req.ProposedNewState.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + rSchema, _ := schema.SchemaForResourceType(addrs.ManagedResourceMode, req.TypeName) + if rSchema == nil { + rSchema = &configschema.Block{} // default schema is empty + } + plannedVals := map[string]cty.Value{} + for name, attrS := range rSchema.Attributes { + val := req.ProposedNewState.GetAttr(name) + if attrS.Computed && val.IsNull() { + val = cty.UnknownVal(attrS.Type) + } + plannedVals[name] = val + } + for name := range rSchema.BlockTypes { + // For simplicity's sake we just copy the block attributes over + // verbatim, since this package's mock providers are all relatively + // simple -- we're testing the backend, not esoteric provider features. + plannedVals[name] = req.ProposedNewState.GetAttr(name) + } + + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(plannedVals), + PlannedPrivate: req.PriorPrivate, + } + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{State: req.Config} + } + + // Initialize the opts + if b.ContextOpts == nil { + b.ContextOpts = &terraform.ContextOpts{} + } + + // Set up our provider + b.ContextOpts.Providers = map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider(name): providers.FactoryFixed(p), + } + + return p + +} + +// TestLocalSingleState is a backend implementation that wraps Local +// and modifies it to only support single states (returns +// ErrWorkspacesNotSupported for multi-state operations). +// +// This isn't an actual use case, this is exported just to provide a +// easy way to test that behavior. +type TestLocalSingleState struct { + *Local +} + +// TestNewLocalSingle is a factory for creating a TestLocalSingleState. +// This function matches the signature required for backend/init. +func TestNewLocalSingle() backend.Backend { + return &TestLocalSingleState{Local: New()} +} + +func (b *TestLocalSingleState) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *TestLocalSingleState) DeleteWorkspace(string, bool) error { + return backend.ErrWorkspacesNotSupported +} + +func (b *TestLocalSingleState) StateMgr(name string) (statemgr.Full, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + return b.Local.StateMgr(name) +} + +// TestLocalNoDefaultState is a backend implementation that wraps +// Local and modifies it to support named states, but not the +// default state. It returns ErrDefaultWorkspaceNotSupported when +// the DefaultStateName is used. +type TestLocalNoDefaultState struct { + *Local +} + +// TestNewLocalNoDefault is a factory for creating a TestLocalNoDefaultState. +// This function matches the signature required for backend/init. +func TestNewLocalNoDefault() backend.Backend { + return &TestLocalNoDefaultState{Local: New()} +} + +func (b *TestLocalNoDefaultState) Workspaces() ([]string, error) { + workspaces, err := b.Local.Workspaces() + if err != nil { + return nil, err + } + + filtered := workspaces[:0] + for _, name := range workspaces { + if name != backend.DefaultStateName { + filtered = append(filtered, name) + } + } + + return filtered, nil +} + +func (b *TestLocalNoDefaultState) DeleteWorkspace(name string, force bool) error { + if name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + return b.Local.DeleteWorkspace(name, force) +} + +func (b *TestLocalNoDefaultState) StateMgr(name string) (statemgr.Full, error) { + if name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + return b.Local.StateMgr(name) +} + +func testStateFile(t *testing.T, path string, s *states.State) { + stateFile := statemgr.NewFilesystem(path) + stateFile.WriteState(s) +} + +func mustProviderConfig(s string) addrs.AbsProviderConfig { + p, diags := addrs.ParseAbsProviderConfigStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return p +} + +func mustResourceInstanceAddr(s string) addrs.AbsResourceInstance { + addr, diags := addrs.ParseAbsResourceInstanceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} + +// assertBackendStateUnlocked attempts to lock the backend state. Failure +// indicates that the state was indeed locked and therefore this function will +// return true. +func assertBackendStateUnlocked(t *testing.T, b *Local) bool { + t.Helper() + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Errorf("state is already locked: %s", err.Error()) + return false + } + return true +} + +// assertBackendStateLocked attempts to lock the backend state. Failure +// indicates that the state was already locked and therefore this function will +// return false. +func assertBackendStateLocked(t *testing.T, b *Local) bool { + t.Helper() + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + return true + } + t.Error("unexpected success locking state") + return true +} diff --git a/internal/backend/operation_type.go b/backend/operation_type.go similarity index 100% rename from internal/backend/operation_type.go rename to backend/operation_type.go diff --git a/internal/backend/operationtype_string.go b/backend/operationtype_string.go similarity index 100% rename from internal/backend/operationtype_string.go rename to backend/operationtype_string.go diff --git a/internal/backend/remote-state/azure/arm_client.go b/backend/remote-state/azure/arm_client.go similarity index 99% rename from internal/backend/remote-state/azure/arm_client.go rename to backend/remote-state/azure/arm_client.go index d548c30b7ba5..ae111b1dd8db 100644 --- a/internal/backend/remote-state/azure/arm_client.go +++ b/backend/remote-state/azure/arm_client.go @@ -13,7 +13,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/go-azure-helpers/authentication" "github.com/hashicorp/go-azure-helpers/sender" - "github.com/hashicorp/terraform/internal/httpclient" + "github.com/hashicorp/terraform/httpclient" "github.com/hashicorp/terraform/version" "github.com/manicminer/hamilton/environments" "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" diff --git a/backend/remote-state/azure/backend.go b/backend/remote-state/azure/backend.go new file mode 100644 index 000000000000..dacc0b9aad9d --- /dev/null +++ b/backend/remote-state/azure/backend.go @@ -0,0 +1,271 @@ +package azure + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" +) + +// New creates a new backend for Azure remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "storage_account_name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the storage account.", + }, + + "container_name": { + Type: schema.TypeString, + Required: true, + Description: "The container name.", + }, + + "key": { + Type: schema.TypeString, + Required: true, + Description: "The blob key.", + }, + + "metadata_host": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("ARM_METADATA_HOST", ""), + Description: "The Metadata URL which will be used to obtain the Cloud Environment.", + }, + + "environment": { + Type: schema.TypeString, + Optional: true, + Description: "The Azure cloud environment.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"), + }, + + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "The access key.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ACCESS_KEY", ""), + }, + + "sas_token": { + Type: schema.TypeString, + Optional: true, + Description: "A SAS Token used to interact with the Blob Storage Account.", + DefaultFunc: schema.EnvDefaultFunc("ARM_SAS_TOKEN", ""), + }, + + "snapshot": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable/Disable automatic blob snapshotting", + DefaultFunc: schema.EnvDefaultFunc("ARM_SNAPSHOT", false), + }, + + "resource_group_name": { + Type: schema.TypeString, + Optional: true, + Description: "The resource group name.", + }, + + "client_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Client ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), + }, + + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom Endpoint used to access the Azure Resource Manager API's.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ENDPOINT", ""), + }, + + "subscription_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Subscription ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), + }, + + "tenant_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Tenant ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), + }, + + // Service Principal (Client Certificate) specific + "client_certificate_password": { + Type: schema.TypeString, + Optional: true, + Description: "The password associated with the Client Certificate specified in `client_certificate_path`", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""), + }, + "client_certificate_path": { + Type: schema.TypeString, + Optional: true, + Description: "The path to the PFX file used as the Client Certificate when authenticating as a Service Principal", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""), + }, + + // Service Principal (Client Secret) specific + "client_secret": { + Type: schema.TypeString, + Optional: true, + Description: "The Client Secret.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), + }, + + // Managed Service Identity specific + "use_msi": { + Type: schema.TypeBool, + Optional: true, + Description: "Should Managed Service Identity be used?", + DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false), + }, + "msi_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "The Managed Service Identity Endpoint.", + DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""), + }, + + // OIDC auth specific fields + "use_oidc": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ARM_USE_OIDC", false), + Description: "Allow OIDC to be used for authentication", + }, + "oidc_token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN", ""), + Description: "A generic JWT token that can be used for OIDC authentication. Should not be used in conjunction with `oidc_request_token`.", + }, + "oidc_token_file_path": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN_FILE_PATH", ""), + Description: "Path to file containing a generic JWT token that can be used for OIDC authentication. Should not be used in conjunction with `oidc_request_token`.", + }, + "oidc_request_url": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_URL", "ACTIONS_ID_TOKEN_REQUEST_URL"}, ""), + Description: "The URL of the OIDC provider from which to request an ID token. Needs to be used in conjunction with `oidc_request_token`. This is meant to be used for Github Actions.", + }, + "oidc_request_token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_TOKEN", "ACTIONS_ID_TOKEN_REQUEST_TOKEN"}, ""), + Description: "The bearer token to use for the request to the OIDC providers `oidc_request_url` URL to fetch an ID token. Needs to be used in conjunction with `oidc_request_url`. This is meant to be used for Github Actions.", + }, + + // Feature Flags + "use_azuread_auth": { + Type: schema.TypeBool, + Optional: true, + Description: "Should Terraform use AzureAD Authentication to access the Blob?", + DefaultFunc: schema.EnvDefaultFunc("ARM_USE_AZUREAD", false), + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + armClient *ArmClient + containerName string + keyName string + accountName string + snapshot bool +} + +type BackendConfig struct { + // Required + StorageAccountName string + + // Optional + AccessKey string + ClientID string + ClientCertificatePassword string + ClientCertificatePath string + ClientSecret string + CustomResourceManagerEndpoint string + MetadataHost string + Environment string + MsiEndpoint string + OIDCToken string + OIDCTokenFilePath string + OIDCRequestURL string + OIDCRequestToken string + ResourceGroupName string + SasToken string + SubscriptionID string + TenantID string + UseMsi bool + UseOIDC bool + UseAzureADAuthentication bool +} + +func (b *Backend) configure(ctx context.Context) error { + if b.containerName != "" { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + b.containerName = data.Get("container_name").(string) + b.accountName = data.Get("storage_account_name").(string) + b.keyName = data.Get("key").(string) + b.snapshot = data.Get("snapshot").(bool) + + config := BackendConfig{ + AccessKey: data.Get("access_key").(string), + ClientID: data.Get("client_id").(string), + ClientCertificatePassword: data.Get("client_certificate_password").(string), + ClientCertificatePath: data.Get("client_certificate_path").(string), + ClientSecret: data.Get("client_secret").(string), + CustomResourceManagerEndpoint: data.Get("endpoint").(string), + MetadataHost: data.Get("metadata_host").(string), + Environment: data.Get("environment").(string), + MsiEndpoint: data.Get("msi_endpoint").(string), + OIDCToken: data.Get("oidc_token").(string), + OIDCTokenFilePath: data.Get("oidc_token_file_path").(string), + OIDCRequestURL: data.Get("oidc_request_url").(string), + OIDCRequestToken: data.Get("oidc_request_token").(string), + ResourceGroupName: data.Get("resource_group_name").(string), + SasToken: data.Get("sas_token").(string), + StorageAccountName: data.Get("storage_account_name").(string), + SubscriptionID: data.Get("subscription_id").(string), + TenantID: data.Get("tenant_id").(string), + UseMsi: data.Get("use_msi").(bool), + UseOIDC: data.Get("use_oidc").(bool), + UseAzureADAuthentication: data.Get("use_azuread_auth").(bool), + } + + armClient, err := buildArmClient(context.TODO(), config) + if err != nil { + return err + } + + thingsNeededToLookupAccessKeySpecified := config.AccessKey == "" && config.SasToken == "" && config.ResourceGroupName == "" + if thingsNeededToLookupAccessKeySpecified && !config.UseAzureADAuthentication { + return fmt.Errorf("Either an Access Key / SAS Token or the Resource Group for the Storage Account must be specified - or Azure AD Authentication must be enabled") + } + + b.armClient = armClient + return nil +} diff --git a/backend/remote-state/azure/backend_state.go b/backend/remote-state/azure/backend_state.go new file mode 100644 index 000000000000..60d59f1b643a --- /dev/null +++ b/backend/remote-state/azure/backend_state.go @@ -0,0 +1,167 @@ +package azure + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers" +) + +const ( + // This will be used as directory name, the odd looking colon is simply to + // reduce the chance of name conflicts with existing objects. + keyEnvPrefix = "env:" +) + +func (b *Backend) Workspaces() ([]string, error) { + prefix := b.keyName + keyEnvPrefix + params := containers.ListBlobsInput{ + Prefix: &prefix, + } + + ctx := context.TODO() + client, err := b.armClient.getContainersClient(ctx) + if err != nil { + return nil, err + } + resp, err := client.ListBlobs(ctx, b.armClient.storageAccountName, b.containerName, params) + if err != nil { + return nil, err + } + + envs := map[string]struct{}{} + for _, obj := range resp.Blobs.Blobs { + key := obj.Name + if strings.HasPrefix(key, prefix) { + name := strings.TrimPrefix(key, prefix) + // we store the state in a key, not a directory + if strings.Contains(name, "/") { + continue + } + + envs[name] = struct{}{} + } + } + + result := []string{backend.DefaultStateName} + for name := range envs { + result = append(result, name) + } + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + ctx := context.TODO() + client, err := b.armClient.getBlobClient(ctx) + if err != nil { + return err + } + + if resp, err := client.Delete(ctx, b.armClient.storageAccountName, b.containerName, b.path(name), blobs.DeleteInput{}); err != nil { + if resp.Response.StatusCode != 404 { + return err + } + } + + return nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + ctx := context.TODO() + blobClient, err := b.armClient.getBlobClient(ctx) + if err != nil { + return nil, err + } + + client := &RemoteClient{ + giovanniBlobClient: *blobClient, + containerName: b.containerName, + keyName: b.path(name), + accountName: b.accountName, + snapshot: b.snapshot, + } + + stateMgr := &remote.State{Client: client} + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + return nil, err + } + //if this isn't the default state name, we need to create the object so + //it's listed by States. + if v := stateMgr.State(); v == nil { + // take a lock on this state while we write it + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock azure state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + //if this isn't the default state name, we need to create the object so + //it's listed by States. + if v := stateMgr.State(); v == nil { + // If we have no state, we have to create an empty state + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + } + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return b.keyName + keyEnvPrefix + name +} + +const errStateUnlock = ` +Error unlocking Azure state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/backend/remote-state/azure/backend_test.go b/backend/remote-state/azure/backend_test.go new file mode 100644 index 000000000000..ea0ff0a025b2 --- /dev/null +++ b/backend/remote-state/azure/backend_test.go @@ -0,0 +1,366 @@ +package azure + +import ( + "context" + "os" + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/acctest" +) + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + // This test just instantiates the client. Shouldn't make any actual + // requests nor incur any costs. + + config := map[string]interface{}{ + "storage_account_name": "tfaccount", + "container_name": "tfcontainer", + "key": "state", + "snapshot": false, + // Access Key must be Base64 + "access_key": "QUNDRVNTX0tFWQ0K", + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) + + if b.containerName != "tfcontainer" { + t.Fatalf("Incorrect bucketName was populated") + } + if b.keyName != "state" { + t.Fatalf("Incorrect keyName was populated") + } + if b.snapshot != false { + t.Fatalf("Incorrect snapshot was populated") + } +} + +func TestAccBackendAccessKeyBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + armClient.destroyTestResources(ctx, res) + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendSASTokenBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + sasToken, err := buildSasToken(res.storageAccountName, res.storageAccountAccessKey) + if err != nil { + t.Fatalf("Error building SAS Token: %+v", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "sas_token": *sasToken, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendOIDCBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "use_oidc": true, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendAzureADAuthBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + res.useAzureADAuth = true + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + armClient.destroyTestResources(ctx, res) + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + "use_azuread_auth": true, + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendManagedServiceIdentityBasic(t *testing.T) { + testAccAzureBackendRunningInAzure(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "use_msi": true, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendServicePrincipalClientCertificateBasic(t *testing.T) { + testAccAzureBackend(t) + + clientCertPassword := os.Getenv("ARM_CLIENT_CERTIFICATE_PASSWORD") + clientCertPath := os.Getenv("ARM_CLIENT_CERTIFICATE_PATH") + if clientCertPath == "" { + t.Skip("Skipping since `ARM_CLIENT_CERTIFICATE_PATH` is not specified!") + } + + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_certificate_password": clientCertPassword, + "client_certificate_path": clientCertPath, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendServicePrincipalClientSecretBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendServicePrincipalClientSecretCustomEndpoint(t *testing.T) { + testAccAzureBackend(t) + + // this is only applicable for Azure Stack. + endpoint := os.Getenv("ARM_ENDPOINT") + if endpoint == "" { + t.Skip("Skipping as ARM_ENDPOINT isn't configured") + } + + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": endpoint, + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendAccessKeyLocked(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) + + backend.TestBackendStateLocksInWS(t, b1, b2, "foo") + backend.TestBackendStateForceUnlockInWS(t, b1, b2, "foo") +} + +func TestAccBackendServicePrincipalLocked(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) + + backend.TestBackendStateLocksInWS(t, b1, b2, "foo") + backend.TestBackendStateForceUnlockInWS(t, b1, b2, "foo") +} diff --git a/backend/remote-state/azure/client.go b/backend/remote-state/azure/client.go new file mode 100644 index 000000000000..9982d184dea5 --- /dev/null +++ b/backend/remote-state/azure/client.go @@ -0,0 +1,279 @@ +package azure + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "log" + "net/http" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" +) + +const ( + leaseHeader = "x-ms-lease-id" + // Must be lower case + lockInfoMetaKey = "terraformlockid" +) + +type RemoteClient struct { + giovanniBlobClient blobs.Client + accountName string + containerName string + keyName string + leaseID string + snapshot bool +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + options := blobs.GetInput{} + if c.leaseID != "" { + options.LeaseID = &c.leaseID + } + + ctx := context.TODO() + blob, err := c.giovanniBlobClient.Get(ctx, c.accountName, c.containerName, c.keyName, options) + if err != nil { + if blob.Response.IsHTTPStatus(http.StatusNotFound) { + return nil, nil + } + return nil, err + } + + payload := &remote.Payload{ + Data: blob.Contents, + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + getOptions := blobs.GetPropertiesInput{} + setOptions := blobs.SetPropertiesInput{} + putOptions := blobs.PutBlockBlobInput{} + + options := blobs.GetInput{} + if c.leaseID != "" { + options.LeaseID = &c.leaseID + getOptions.LeaseID = &c.leaseID + setOptions.LeaseID = &c.leaseID + putOptions.LeaseID = &c.leaseID + } + + ctx := context.TODO() + + if c.snapshot { + snapshotInput := blobs.SnapshotInput{LeaseID: options.LeaseID} + + log.Printf("[DEBUG] Snapshotting existing Blob %q (Container %q / Account %q)", c.keyName, c.containerName, c.accountName) + if _, err := c.giovanniBlobClient.Snapshot(ctx, c.accountName, c.containerName, c.keyName, snapshotInput); err != nil { + return fmt.Errorf("error snapshotting Blob %q (Container %q / Account %q): %+v", c.keyName, c.containerName, c.accountName, err) + } + + log.Print("[DEBUG] Created blob snapshot") + } + + blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, getOptions) + if err != nil { + if blob.StatusCode != 404 { + return err + } + } + + contentType := "application/json" + putOptions.Content = &data + putOptions.ContentType = &contentType + putOptions.MetaData = blob.MetaData + _, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putOptions) + + return err +} + +func (c *RemoteClient) Delete() error { + options := blobs.DeleteInput{} + + if c.leaseID != "" { + options.LeaseID = &c.leaseID + } + + ctx := context.TODO() + resp, err := c.giovanniBlobClient.Delete(ctx, c.accountName, c.containerName, c.keyName, options) + if err != nil { + if !resp.IsHTTPStatus(http.StatusNotFound) { + return err + } + } + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + stateName := fmt.Sprintf("%s/%s", c.containerName, c.keyName) + info.Path = stateName + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + getLockInfoErr := func(err error) error { + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + return &statemgr.LockError{ + Err: err, + Info: lockInfo, + } + } + + leaseOptions := blobs.AcquireLeaseInput{ + ProposedLeaseID: &info.ID, + LeaseDuration: -1, + } + ctx := context.TODO() + + // obtain properties to see if the blob lease is already in use. If the blob doesn't exist, create it + properties, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{}) + if err != nil { + // error if we had issues getting the blob + if !properties.Response.IsHTTPStatus(http.StatusNotFound) { + return "", getLockInfoErr(err) + } + // if we don't find the blob, we need to build it + + contentType := "application/json" + putGOptions := blobs.PutBlockBlobInput{ + ContentType: &contentType, + } + + _, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putGOptions) + if err != nil { + return "", getLockInfoErr(err) + } + } + + // if the blob is already locked then error + if properties.LeaseStatus == blobs.Locked { + return "", getLockInfoErr(fmt.Errorf("state blob is already locked")) + } + + leaseID, err := c.giovanniBlobClient.AcquireLease(ctx, c.accountName, c.containerName, c.keyName, leaseOptions) + if err != nil { + return "", getLockInfoErr(err) + } + + info.ID = leaseID.LeaseID + c.leaseID = leaseID.LeaseID + + if err := c.writeLockInfo(info); err != nil { + return "", err + } + + return info.ID, nil +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + options := blobs.GetPropertiesInput{} + if c.leaseID != "" { + options.LeaseID = &c.leaseID + } + + ctx := context.TODO() + blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, options) + if err != nil { + return nil, err + } + + raw := blob.MetaData[lockInfoMetaKey] + if raw == "" { + return nil, fmt.Errorf("blob metadata %q was empty", lockInfoMetaKey) + } + + data, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + lockInfo := &statemgr.LockInfo{} + err = json.Unmarshal(data, lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +// writes info to blob meta data, deletes metadata entry if info is nil +func (c *RemoteClient) writeLockInfo(info *statemgr.LockInfo) error { + ctx := context.TODO() + blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{LeaseID: &c.leaseID}) + if err != nil { + return err + } + if err != nil { + return err + } + + if info == nil { + delete(blob.MetaData, lockInfoMetaKey) + } else { + value := base64.StdEncoding.EncodeToString(info.Marshal()) + blob.MetaData[lockInfoMetaKey] = value + } + + opts := blobs.SetMetaDataInput{ + LeaseID: &c.leaseID, + MetaData: blob.MetaData, + } + + _, err = c.giovanniBlobClient.SetMetaData(ctx, c.accountName, c.containerName, c.keyName, opts) + return err +} + +func (c *RemoteClient) Unlock(id string) error { + lockErr := &statemgr.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + c.leaseID = lockInfo.ID + if err := c.writeLockInfo(nil); err != nil { + lockErr.Err = fmt.Errorf("failed to delete lock info from metadata: %s", err) + return lockErr + } + + ctx := context.TODO() + _, err = c.giovanniBlobClient.ReleaseLease(ctx, c.accountName, c.containerName, c.keyName, id) + if err != nil { + lockErr.Err = err + return lockErr + } + + c.leaseID = "" + + return nil +} diff --git a/backend/remote-state/azure/client_test.go b/backend/remote-state/azure/client_test.go new file mode 100644 index 000000000000..bc9283a31b1f --- /dev/null +++ b/backend/remote-state/azure/client_test.go @@ -0,0 +1,311 @@ +package azure + +import ( + "context" + "os" + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/acctest" + "github.com/hashicorp/terraform/states/remote" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClientAccessKeyBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientManagedServiceIdentityBasic(t *testing.T) { + testAccAzureBackendRunningInAzure(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "use_msi": true, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientSasTokenBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + sasToken, err := buildSasToken(res.storageAccountName, res.storageAccountAccessKey) + if err != nil { + t.Fatalf("Error building SAS Token: %+v", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "sas_token": *sasToken, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientServicePrincipalBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientAccessKeyLocks(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +func TestRemoteClientServicePrincipalLocks(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +func TestPutMaintainsMetaData(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + headerName := "acceptancetest" + expectedValue := "f3b56bad-33ad-4b93-a600-7a66e9cbd1eb" + + client, err := armClient.getBlobClient(ctx) + if err != nil { + t.Fatalf("Error building Blob Client: %+v", err) + } + + _, err = client.PutBlockBlob(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.PutBlockBlobInput{}) + if err != nil { + t.Fatalf("Error Creating Block Blob: %+v", err) + } + + blobReference, err := client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error loading MetaData: %+v", err) + } + + blobReference.MetaData[headerName] = expectedValue + opts := blobs.SetMetaDataInput{ + MetaData: blobReference.MetaData, + } + _, err = client.SetMetaData(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, opts) + if err != nil { + t.Fatalf("Error setting MetaData: %+v", err) + } + + // update the metadata using the Backend + remoteClient := RemoteClient{ + keyName: res.storageKeyName, + containerName: res.storageContainerName, + accountName: res.storageAccountName, + + giovanniBlobClient: *client, + } + + bytes := []byte(acctest.RandString(20)) + err = remoteClient.Put(bytes) + if err != nil { + t.Fatalf("Error putting data: %+v", err) + } + + // Verify it still exists + blobReference, err = client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error loading MetaData: %+v", err) + } + + if blobReference.MetaData[headerName] != expectedValue { + t.Fatalf("%q was not set to %q in the MetaData: %+v", headerName, expectedValue, blobReference.MetaData) + } +} diff --git a/internal/backend/remote-state/azure/helpers_test.go b/backend/remote-state/azure/helpers_test.go similarity index 100% rename from internal/backend/remote-state/azure/helpers_test.go rename to backend/remote-state/azure/helpers_test.go diff --git a/internal/backend/remote-state/azure/sender.go b/backend/remote-state/azure/sender.go similarity index 97% rename from internal/backend/remote-state/azure/sender.go rename to backend/remote-state/azure/sender.go index 958273e83d05..a341b58cd105 100644 --- a/internal/backend/remote-state/azure/sender.go +++ b/backend/remote-state/azure/sender.go @@ -6,7 +6,7 @@ import ( "net/http/httputil" "github.com/Azure/go-autorest/autorest" - "github.com/hashicorp/terraform/internal/logging" + "github.com/hashicorp/terraform/logging" ) func buildSender() autorest.Sender { diff --git a/backend/remote-state/consul/backend.go b/backend/remote-state/consul/backend.go new file mode 100644 index 000000000000..9d05ba2b9e86 --- /dev/null +++ b/backend/remote-state/consul/backend.go @@ -0,0 +1,180 @@ +package consul + +import ( + "context" + "net" + "strings" + "time" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" +) + +// New creates a new backend for Consul remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Path to store state in Consul", + }, + + "access_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Access token for a Consul ACL", + Default: "", // To prevent input + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Address to the Consul Cluster", + Default: "", // To prevent input + }, + + "scheme": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Scheme to communicate to Consul with", + Default: "", // To prevent input + }, + + "datacenter": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Datacenter to communicate with", + Default: "", // To prevent input + }, + + "http_auth": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "HTTP Auth in the format of 'username:password'", + Default: "", // To prevent input + }, + + "gzip": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Compress the state data using gzip", + Default: false, + }, + + "lock": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Lock state access", + Default: true, + }, + + "ca_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CACERT", ""), + }, + + "cert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_CERT", ""), + }, + + "key_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded private key, required if cert_file is specified.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_KEY", ""), + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + client *consulapi.Client + configData *schema.ResourceData + lock bool +} + +func (b *Backend) configure(ctx context.Context) error { + // Grab the resource data + b.configData = schema.FromContextBackendConfig(ctx) + + // Store the lock information + b.lock = b.configData.Get("lock").(bool) + + data := b.configData + + // Configure the client + config := consulapi.DefaultConfig() + + // replace the default Transport Dialer to reduce the KeepAlive + config.Transport.DialContext = dialContext + + if v, ok := data.GetOk("access_token"); ok && v.(string) != "" { + config.Token = v.(string) + } + if v, ok := data.GetOk("address"); ok && v.(string) != "" { + config.Address = v.(string) + } + if v, ok := data.GetOk("scheme"); ok && v.(string) != "" { + config.Scheme = v.(string) + } + if v, ok := data.GetOk("datacenter"); ok && v.(string) != "" { + config.Datacenter = v.(string) + } + + if v, ok := data.GetOk("ca_file"); ok && v.(string) != "" { + config.TLSConfig.CAFile = v.(string) + } + if v, ok := data.GetOk("cert_file"); ok && v.(string) != "" { + config.TLSConfig.CertFile = v.(string) + } + if v, ok := data.GetOk("key_file"); ok && v.(string) != "" { + config.TLSConfig.KeyFile = v.(string) + } + + if v, ok := data.GetOk("http_auth"); ok && v.(string) != "" { + auth := v.(string) + + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &consulapi.HttpBasicAuth{ + Username: username, + Password: password, + } + } + + client, err := consulapi.NewClient(config) + if err != nil { + return err + } + + b.client = client + return nil +} + +// dialContext is the DialContext function for the consul client transport. +// This is stored in a package var to inject a different dialer for tests. +var dialContext = (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 17 * time.Second, +}).DialContext diff --git a/backend/remote-state/consul/backend_state.go b/backend/remote-state/consul/backend_state.go new file mode 100644 index 000000000000..8601dacdb09c --- /dev/null +++ b/backend/remote-state/consul/backend_state.go @@ -0,0 +1,154 @@ +package consul + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +const ( + keyEnvPrefix = "-env:" +) + +func (b *Backend) Workspaces() ([]string, error) { + // List our raw path + prefix := b.configData.Get("path").(string) + keyEnvPrefix + keys, _, err := b.client.KV().Keys(prefix, "/", nil) + if err != nil { + return nil, err + } + + // Find the envs, we use a map since we can get duplicates with + // path suffixes. + envs := map[string]struct{}{} + for _, key := range keys { + // Consul should ensure this but it doesn't hurt to check again + if strings.HasPrefix(key, prefix) { + key = strings.TrimPrefix(key, prefix) + + // Ignore anything with a "/" in it since we store the state + // directly in a key not a directory. + if idx := strings.IndexRune(key, '/'); idx >= 0 { + continue + } + + envs[key] = struct{}{} + } + } + + result := make([]string, 1, len(envs)+1) + result[0] = backend.DefaultStateName + for k, _ := range envs { + result = append(result, k) + } + + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + // Determine the path of the data + path := b.path(name) + + // Delete it. We just delete it without any locking since + // the DeleteState API is documented as such. + _, err := b.client.KV().Delete(path, nil) + return err +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + // Determine the path of the data + path := b.path(name) + + // Determine whether to gzip or not + gzip := b.configData.Get("gzip").(bool) + + // Build the state client + var stateMgr = &remote.State{ + Client: &RemoteClient{ + Client: b.client, + Path: path, + GZip: gzip, + lockState: b.lock, + }, + } + + if !b.lock { + stateMgr.DisableLocks() + } + + // the default state always exists + if name == backend.DefaultStateName { + return stateMgr, nil + } + + // Grab a lock, we use this to write an empty state if one doesn't + // exist already. We have to write an empty state as a sentinel value + // so States() knows it exists. + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock state in Consul: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + return stateMgr, nil +} + +func (b *Backend) path(name string) string { + path := b.configData.Get("path").(string) + if name != backend.DefaultStateName { + path += fmt.Sprintf("%s%s", keyEnvPrefix, name) + } + + return path +} + +const errStateUnlock = ` +Error unlocking Consul state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +The Consul backend acquires a lock during initialization to ensure +the minimum required key/values are prepared. +` diff --git a/backend/remote-state/consul/backend_test.go b/backend/remote-state/consul/backend_test.go new file mode 100644 index 000000000000..153386c9d62b --- /dev/null +++ b/backend/remote-state/consul/backend_test.go @@ -0,0 +1,103 @@ +package consul + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/terraform/backend" +) + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func newConsulTestServer(t *testing.T) *testutil.TestServer { + if os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == "" { + t.Skipf("consul server tests require setting TF_ACC or TF_CONSUL_TEST") + } + + srv, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.LogLevel = "warn" + + if !flag.Parsed() { + flag.Parse() + } + + if !testing.Verbose() { + c.Stdout = ioutil.Discard + c.Stderr = ioutil.Discard + } + }) + + if err != nil { + t.Fatalf("failed to create consul test server: %s", err) + } + + srv.WaitForSerfCheck(t) + srv.WaitForLeader(t) + + return srv +} + +func TestBackend(t *testing.T) { + srv := newConsulTestServer(t) + + path := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + // Get the backend. We need two to test locking. + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + // Test + backend.TestBackendStates(t, b1) + backend.TestBackendStateLocks(t, b1, b2) +} + +func TestBackend_lockDisabled(t *testing.T) { + srv := newConsulTestServer(t) + + path := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + // Get the backend. We need two to test locking. + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + "lock": false, + })) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path + "different", // Diff so locking test would fail if it was locking + "lock": false, + })) + + // Test + backend.TestBackendStates(t, b1) + backend.TestBackendStateLocks(t, b1, b2) +} + +func TestBackend_gzip(t *testing.T) { + srv := newConsulTestServer(t) + + // Get the backend + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": fmt.Sprintf("tf-unit/%s", time.Now().String()), + "gzip": true, + })) + + // Test + backend.TestBackendStates(t, b) +} diff --git a/backend/remote-state/consul/client.go b/backend/remote-state/consul/client.go new file mode 100644 index 000000000000..5ccc9165981a --- /dev/null +++ b/backend/remote-state/consul/client.go @@ -0,0 +1,682 @@ +package consul + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/md5" + "encoding/json" + "errors" + "fmt" + "log" + "strings" + "sync" + "time" + + consulapi "github.com/hashicorp/consul/api" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +const ( + lockSuffix = "/.lock" + lockInfoSuffix = "/.lockinfo" + + // The Session TTL associated with this lock. + lockSessionTTL = "15s" + + // the delay time from when a session is lost to when the + // lock is released by the server + lockDelay = 5 * time.Second + // interval between attempts to reacquire a lost lock + lockReacquireInterval = 2 * time.Second +) + +var lostLockErr = errors.New("consul lock was lost") + +// RemoteClient is a remote client that stores data in Consul. +type RemoteClient struct { + Client *consulapi.Client + Path string + GZip bool + + mu sync.Mutex + // lockState is true if we're using locks + lockState bool + + // The index of the last state we wrote. + // If this is > 0, Put will perform a CAS to ensure that the state wasn't + // changed during the operation. This is important even with locks, because + // if the client loses the lock for some reason, then reacquires it, we + // need to make sure that the state was not modified. + modifyIndex uint64 + + consulLock *consulapi.Lock + lockCh <-chan struct{} + + info *statemgr.LockInfo + + // cancel our goroutine which is monitoring the lock to automatically + // reacquire it when possible. + monitorCancel context.CancelFunc + monitorWG sync.WaitGroup + + // sessionCancel cancels the Context use for session.RenewPeriodic, and is + // called when unlocking, or before creating a new lock if the lock is + // lost. + sessionCancel context.CancelFunc +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + c.mu.Lock() + defer c.mu.Unlock() + + kv := c.Client.KV() + + chunked, hash, chunks, pair, err := c.chunkedMode() + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + + c.modifyIndex = pair.ModifyIndex + + var payload []byte + if chunked { + for _, c := range chunks { + pair, _, err := kv.Get(c, nil) + if err != nil { + return nil, err + } + if pair == nil { + return nil, fmt.Errorf("Key %q could not be found", c) + } + payload = append(payload, pair.Value[:]...) + } + } else { + payload = pair.Value + } + + // If the payload starts with 0x1f, it's gzip, not json + if len(payload) >= 1 && payload[0] == '\x1f' { + payload, err = uncompressState(payload) + if err != nil { + return nil, err + } + } + + md5 := md5.Sum(payload) + + if hash != "" && fmt.Sprintf("%x", md5) != hash { + return nil, fmt.Errorf("The remote state does not match the expected hash") + } + + return &remote.Payload{ + Data: payload, + MD5: md5[:], + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + // The state can be stored in 4 different ways, based on the payload size + // and whether the user enabled gzip: + // - single entry mode with plain JSON: a single JSON is stored at + // "tfstate/my_project" + // - single entry mode gzip: the JSON payload is first gziped and stored at + // "tfstate/my_project" + // - chunked mode with plain JSON: the JSON payload is split in pieces and + // stored like so: + // - "tfstate/my_project" -> a JSON payload that contains the path of + // the chunks and an MD5 sum like so: + // { + // "current-hash": "abcdef1234", + // "chunks": [ + // "tfstate/my_project/tfstate.abcdef1234/0", + // "tfstate/my_project/tfstate.abcdef1234/1", + // "tfstate/my_project/tfstate.abcdef1234/2", + // ] + // } + // - "tfstate/my_project/tfstate.abcdef1234/0" -> The first chunk + // - "tfstate/my_project/tfstate.abcdef1234/1" -> The next one + // - ... + // - chunked mode with gzip: the same system but we gziped the JSON payload + // before splitting it in chunks + // + // When overwritting the current state, we need to clean the old chunks if + // we were in chunked mode (no matter whether we need to use chunks for the + // new one). To do so based on the 4 possibilities above we look at the + // value at "tfstate/my_project" and if it is: + // - absent then it's a new state and there will be nothing to cleanup, + // - not a JSON payload we were in single entry mode with gzip so there will + // be nothing to cleanup + // - a JSON payload, then we were either single entry mode with plain JSON + // or in chunked mode. To differentiate between the two we look whether a + // "current-hash" key is present in the payload. If we find one we were + // in chunked mode and we will need to remove the old chunks (whether or + // not we were using gzip does not matter in that case). + + c.mu.Lock() + defer c.mu.Unlock() + + kv := c.Client.KV() + + // First we determine what mode we were using and to prepare the cleanup + chunked, hash, _, _, err := c.chunkedMode() + if err != nil { + return err + } + cleanupOldChunks := func() {} + if chunked { + cleanupOldChunks = func() { + // We ignore all errors that can happen here because we already + // saved the new state and there is no way to return a warning to + // the user. We may end up with dangling chunks but there is no way + // to be sure we won't. + path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash) + kv.DeleteTree(path, nil) + } + } + + payload := data + if c.GZip { + if compressedState, err := compressState(data); err == nil { + payload = compressedState + } else { + return err + } + } + + // default to doing a CAS + verb := consulapi.KVCAS + + // Assume a 0 index doesn't need a CAS for now, since we are either + // creating a new state or purposely overwriting one. + if c.modifyIndex == 0 { + verb = consulapi.KVSet + } + + // The payload may be too large to store in a single KV entry in Consul. We + // could try to determine whether it will fit or not before sending the + // request but since we are using the Transaction API and not the KV API, + // it grows by about a 1/3 when it is base64 encoded plus the overhead of + // the fields specific to the Transaction API. + // Rather than trying to calculate the overhead (which could change from + // one version of Consul to another, and between Consul Community Edition + // and Consul Enterprise), we try to send the whole state in one request, if + // it fails because it is too big we then split it in chunks and send each + // chunk separately. + // When splitting in chunks, we make each chunk 524288 bits, which is the + // default max size for raft. If the user changed it, we still may send + // chunks too big and fail but this is not a setting that should be fiddled + // with anyway. + + store := func(payload []byte) error { + // KV.Put doesn't return the new index, so we use a single operation + // transaction to get the new index with a single request. + txOps := consulapi.KVTxnOps{ + &consulapi.KVTxnOp{ + Verb: verb, + Key: c.Path, + Value: payload, + Index: c.modifyIndex, + }, + } + + ok, resp, _, err := kv.Txn(txOps, nil) + if err != nil { + return err + } + // transaction was rolled back + if !ok { + return fmt.Errorf("consul CAS failed with transaction errors: %v", resp.Errors) + } + + if len(resp.Results) != 1 { + // this probably shouldn't happen + return fmt.Errorf("expected on 1 response value, got: %d", len(resp.Results)) + } + + c.modifyIndex = resp.Results[0].ModifyIndex + + // We remove all the old chunks + cleanupOldChunks() + + return nil + } + + if err = store(payload); err == nil { + // The payload was small enough to be stored + return nil + } else if !strings.Contains(err.Error(), "too large") { + // We failed for some other reason, report this to the user + return err + } + + // The payload was too large so we split it in multiple chunks + + md5 := md5.Sum(data) + chunks := split(payload, 524288) + chunkPaths := make([]string, 0) + + // First we write the new chunks + for i, p := range chunks { + path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%x/%d", md5, i) + chunkPaths = append(chunkPaths, path) + _, err := kv.Put(&consulapi.KVPair{ + Key: path, + Value: p, + }, nil) + + if err != nil { + return err + } + } + + // Then we update the link to point to the new chunks + payload, err = json.Marshal(map[string]interface{}{ + "current-hash": fmt.Sprintf("%x", md5), + "chunks": chunkPaths, + }) + if err != nil { + return err + } + return store(payload) +} + +func (c *RemoteClient) Delete() error { + c.mu.Lock() + defer c.mu.Unlock() + + kv := c.Client.KV() + + chunked, hash, _, _, err := c.chunkedMode() + if err != nil { + return err + } + + _, err = kv.Delete(c.Path, nil) + + // If there were chunks we need to remove them + if chunked { + path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash) + kv.DeleteTree(path, nil) + } + + return err +} + +func (c *RemoteClient) lockPath() string { + // we sanitize the path for the lock as Consul does not like having + // two consecutive slashes for the lock path + return strings.TrimRight(c.Path, "/") +} + +func (c *RemoteClient) putLockInfo(info *statemgr.LockInfo) error { + info.Path = c.Path + info.Created = time.Now().UTC() + + kv := c.Client.KV() + _, err := kv.Put(&consulapi.KVPair{ + Key: c.lockPath() + lockInfoSuffix, + Value: info.Marshal(), + }, nil) + + return err +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + path := c.lockPath() + lockInfoSuffix + pair, _, err := c.Client.KV().Get(path, nil) + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + + li := &statemgr.LockInfo{} + err = json.Unmarshal(pair.Value, li) + if err != nil { + return nil, fmt.Errorf("error unmarshaling lock info: %s", err) + } + + return li, nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return "", nil + } + + c.info = info + + // These checks only are to ensure we strictly follow the specification. + // Terraform shouldn't ever re-lock, so provide errors for the 2 possible + // states if this is called. + select { + case <-c.lockCh: + // We had a lock, but lost it. + return "", errors.New("lost consul lock, cannot re-lock") + default: + if c.lockCh != nil { + // we have an active lock already + return "", fmt.Errorf("state %q already locked", c.Path) + } + } + + return c.lock() +} + +// the lock implementation. +// Only to be called while holding Client.mu +func (c *RemoteClient) lock() (string, error) { + // We create a new session here, so it can be canceled when the lock is + // lost or unlocked. + lockSession, err := c.createSession() + if err != nil { + return "", err + } + + // store the session ID for correlation with consul logs + c.info.Info = "consul session: " + lockSession + + // A random lock ID has been generated but we override it with the session + // ID as this will make it easier to manually invalidate the session + // if needed. + c.info.ID = lockSession + + opts := &consulapi.LockOptions{ + Key: c.lockPath() + lockSuffix, + Session: lockSession, + + // only wait briefly, so terraform has the choice to fail fast or + // retry as needed. + LockWaitTime: time.Second, + LockTryOnce: true, + + // Don't let the lock monitor give up right away, as it's possible the + // session is still OK. While the session is refreshed at a rate of + // TTL/2, the lock monitor is an idle blocking request and is more + // susceptible to being closed by a lower network layer. + MonitorRetries: 5, + // + // The delay between lock monitor retries. + // While the session has a 15s TTL plus a 5s wait period on a lost + // lock, if we can't get our lock back in 10+ seconds something is + // wrong so we're going to drop the session and start over. + MonitorRetryTime: 2 * time.Second, + } + + c.consulLock, err = c.Client.LockOpts(opts) + if err != nil { + return "", err + } + + lockErr := &statemgr.LockError{} + + lockCh, err := c.consulLock.Lock(make(chan struct{})) + if err != nil { + lockErr.Err = err + return "", lockErr + } + + if lockCh == nil { + lockInfo, e := c.getLockInfo() + if e != nil { + lockErr.Err = e + return "", lockErr + } + + lockErr.Info = lockInfo + + return "", lockErr + } + + c.lockCh = lockCh + + err = c.putLockInfo(c.info) + if err != nil { + if unlockErr := c.unlock(c.info.ID); unlockErr != nil { + err = multierror.Append(err, unlockErr) + } + + return "", err + } + + // Start a goroutine to monitor the lock state. + // If we lose the lock to due communication issues with the consul agent, + // attempt to immediately reacquire the lock. Put will verify the integrity + // of the state by using a CAS operation. + ctx, cancel := context.WithCancel(context.Background()) + c.monitorCancel = cancel + c.monitorWG.Add(1) + go func() { + defer c.monitorWG.Done() + select { + case <-c.lockCh: + log.Println("[ERROR] lost consul lock") + for { + c.mu.Lock() + // We lost our lock, so we need to cancel the session too. + // The CancelFunc is only replaced while holding Client.mu, so + // this is safe to call here. This will be replaced by the + // lock() call below. + c.sessionCancel() + + c.consulLock = nil + _, err := c.lock() + c.mu.Unlock() + + if err != nil { + // We failed to get the lock, keep trying as long as + // terraform is running. There may be changes in progress, + // so there's no use in aborting. Either we eventually + // reacquire the lock, or a Put will fail on a CAS. + log.Printf("[ERROR] could not reacquire lock: %s", err) + time.Sleep(lockReacquireInterval) + + select { + case <-ctx.Done(): + return + default: + } + continue + } + + // if the error was nil, the new lock started a new copy of + // this goroutine. + return + } + + case <-ctx.Done(): + return + } + }() + + if testLockHook != nil { + testLockHook() + } + + return c.info.ID, nil +} + +// called after a lock is acquired +var testLockHook func() + +func (c *RemoteClient) createSession() (string, error) { + // create the context first. Even if the session creation fails, we assume + // that the CancelFunc is always callable. + ctx, cancel := context.WithCancel(context.Background()) + c.sessionCancel = cancel + + session := c.Client.Session() + se := &consulapi.SessionEntry{ + Name: consulapi.DefaultLockSessionName, + TTL: lockSessionTTL, + LockDelay: lockDelay, + } + + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + + log.Println("[INFO] created consul lock session", id) + + // keep the session renewed + go session.RenewPeriodic(lockSessionTTL, id, nil, ctx.Done()) + + return id, nil +} + +func (c *RemoteClient) Unlock(id string) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return nil + } + + return c.unlock(id) +} + +// the unlock implementation. +// Only to be called while holding Client.mu +func (c *RemoteClient) unlock(id string) error { + // This method can be called in two circumstances: + // - when the plan apply or destroy operation finishes and the lock needs to be released, + // the watchdog stopped and the session closed + // - when the user calls `terraform force-unlock ` in which case + // we only need to release the lock. + + if c.consulLock == nil || c.lockCh == nil { + // The user called `terraform force-unlock `, we just destroy + // the session which will release the lock, clean the KV store and quit. + + _, err := c.Client.Session().Destroy(id, nil) + if err != nil { + return err + } + // We ignore the errors that may happen during cleanup + kv := c.Client.KV() + kv.Delete(c.lockPath()+lockSuffix, nil) + kv.Delete(c.lockPath()+lockInfoSuffix, nil) + + return nil + } + + // cancel our monitoring goroutine + c.monitorCancel() + + defer func() { + c.consulLock = nil + + // The consul session is only used for this single lock, so cancel it + // after we unlock. + // The session is only created and replaced holding Client.mu, so the + // CancelFunc must be non-nil. + c.sessionCancel() + }() + + select { + case <-c.lockCh: + return lostLockErr + default: + } + + kv := c.Client.KV() + + var errs error + + if _, err := kv.Delete(c.lockPath()+lockInfoSuffix, nil); err != nil { + errs = multierror.Append(errs, err) + } + + if err := c.consulLock.Unlock(); err != nil { + errs = multierror.Append(errs, err) + } + + // the monitoring goroutine may be in a select on the lockCh, so we need to + // wait for it to return before changing the value. + c.monitorWG.Wait() + c.lockCh = nil + + // This is only cleanup, and will fail if the lock was immediately taken by + // another client, so we don't report an error to the user here. + c.consulLock.Destroy() + + return errs +} + +func compressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz := gzip.NewWriter(b) + if _, err := gz.Write(data); err != nil { + return nil, err + } + if err := gz.Flush(); err != nil { + return nil, err + } + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func uncompressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + b.ReadFrom(gz) + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func split(payload []byte, limit int) [][]byte { + var chunk []byte + chunks := make([][]byte, 0, len(payload)/limit+1) + for len(payload) >= limit { + chunk, payload = payload[:limit], payload[limit:] + chunks = append(chunks, chunk) + } + if len(payload) > 0 { + chunks = append(chunks, payload[:]) + } + return chunks +} + +func (c *RemoteClient) chunkedMode() (bool, string, []string, *consulapi.KVPair, error) { + kv := c.Client.KV() + pair, _, err := kv.Get(c.Path, nil) + if err != nil { + return false, "", nil, pair, err + } + if pair != nil { + var d map[string]interface{} + err = json.Unmarshal(pair.Value, &d) + // If there is an error when unmarshaling the payload, the state has + // probably been gziped in single entry mode. + if err == nil { + // If we find the "current-hash" key we were in chunked mode + hash, ok := d["current-hash"] + if ok { + chunks := make([]string, 0) + for _, c := range d["chunks"].([]interface{}) { + chunks = append(chunks, c.(string)) + } + return true, hash.(string), chunks, pair, nil + } + } + } + return false, "", nil, pair, nil +} diff --git a/backend/remote-state/consul/client_test.go b/backend/remote-state/consul/client_test.go new file mode 100644 index 000000000000..3bf9f1ee002a --- /dev/null +++ b/backend/remote-state/consul/client_test.go @@ -0,0 +1,491 @@ +package consul + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math/rand" + "net" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + srv := newConsulTestServer(t) + + testCases := []string{ + fmt.Sprintf("tf-unit/%s", time.Now().String()), + fmt.Sprintf("tf-unit/%s/", time.Now().String()), + } + + for _, path := range testCases { + t.Run(path, func(*testing.T) { + // Get the backend + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + // Grab the client + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test + remote.TestClient(t, state.(*remote.State).Client) + }) + } +} + +// test the gzip functionality of the client +func TestRemoteClient_gzipUpgrade(t *testing.T) { + srv := newConsulTestServer(t) + + statePath := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + // Get the backend + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": statePath, + })) + + // Grab the client + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test + remote.TestClient(t, state.(*remote.State).Client) + + // create a new backend with gzip + b = backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": statePath, + "gzip": true, + })) + + // Grab the client + state, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test + remote.TestClient(t, state.(*remote.State).Client) +} + +// TestConsul_largeState tries to write a large payload using the Consul state +// manager, as there is a limit to the size of the values in the KV store it +// will need to be split up before being saved and put back together when read. +func TestConsul_largeState(t *testing.T) { + srv := newConsulTestServer(t) + + path := "tf-unit/test-large-state" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + c := s.(*remote.State).Client.(*RemoteClient) + c.Path = path + + // testPaths fails the test if the keys found at the prefix don't match + // what is expected + testPaths := func(t *testing.T, expected []string) { + kv := c.Client.KV() + pairs, _, err := kv.List(c.Path, nil) + if err != nil { + t.Fatal(err) + } + res := make([]string, 0) + for _, p := range pairs { + res = append(res, p.Key) + } + if !reflect.DeepEqual(res, expected) { + t.Fatalf("Wrong keys: %#v", res) + } + } + + testPayload := func(t *testing.T, data map[string]string, keys []string) { + payload, err := json.Marshal(data) + if err != nil { + t.Fatal(err) + } + err = c.Put(payload) + if err != nil { + t.Fatal("could not put payload", err) + } + + remote, err := c.Get() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(payload, remote.Data) { + t.Fatal("the data do not match") + } + + testPaths(t, keys) + } + + // The default limit for the size of the value in Consul is 524288 bytes + testPayload( + t, + map[string]string{ + "foo": strings.Repeat("a", 524288+2), + }, + []string{ + "tf-unit/test-large-state", + "tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/0", + "tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/1", + }, + ) + + // This payload is just short enough to be stored but will be bigger when + // going through the Transaction API as it will be base64 encoded + testPayload( + t, + map[string]string{ + "foo": strings.Repeat("a", 524288-10), + }, + []string{ + "tf-unit/test-large-state", + "tf-unit/test-large-state/tfstate.4f407ace136a86521fd0d366972fe5c7/0", + }, + ) + + // We try to replace the payload with a small one, the old chunks should be removed + testPayload( + t, + map[string]string{"var": "a"}, + []string{"tf-unit/test-large-state"}, + ) + + // Test with gzip and chunks + b = backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + "gzip": true, + })) + + s, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + c = s.(*remote.State).Client.(*RemoteClient) + c.Path = path + + // We need a long random string so it results in multiple chunks even after + // being gziped + + // We use a fixed seed so the test can be reproductible + rand.Seed(1234) + RandStringRunes := func(n int) string { + var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) + } + + testPayload( + t, + map[string]string{ + "bar": RandStringRunes(5 * (524288 + 2)), + }, + []string{ + "tf-unit/test-large-state", + "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/0", + "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/1", + "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/2", + "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/3", + }, + ) + + // Deleting the state should remove all chunks + err = c.Delete() + if err != nil { + t.Fatal(err) + } + testPaths(t, []string{}) +} + +func TestConsul_stateLock(t *testing.T) { + srv := newConsulTestServer(t) + + testCases := []string{ + fmt.Sprintf("tf-unit/%s", time.Now().String()), + fmt.Sprintf("tf-unit/%s/", time.Now().String()), + } + + for _, path := range testCases { + t.Run(path, func(*testing.T) { + // create 2 instances to get 2 remote.Clients + sA, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + sB, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, sA.(*remote.State).Client, sB.(*remote.State).Client) + }) + } +} + +func TestConsul_destroyLock(t *testing.T) { + srv := newConsulTestServer(t) + + testCases := []string{ + fmt.Sprintf("tf-unit/%s", time.Now().String()), + fmt.Sprintf("tf-unit/%s/", time.Now().String()), + } + + testLock := func(client *RemoteClient, lockPath string) { + // get the lock val + pair, _, err := client.Client.KV().Get(lockPath, nil) + if err != nil { + t.Fatal(err) + } + if pair != nil { + t.Fatalf("lock key not cleaned up at: %s", pair.Key) + } + } + + for _, path := range testCases { + t.Run(path, func(*testing.T) { + // Get the backend + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + // Grab the client + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + clientA := s.(*remote.State).Client.(*RemoteClient) + + info := statemgr.NewLockInfo() + id, err := clientA.Lock(info) + if err != nil { + t.Fatal(err) + } + + lockPath := clientA.Path + lockSuffix + + if err := clientA.Unlock(id); err != nil { + t.Fatal(err) + } + + testLock(clientA, lockPath) + + // The release the lock from a second client to test the + // `terraform force-unlock ` functionnality + s, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + clientB := s.(*remote.State).Client.(*RemoteClient) + + info = statemgr.NewLockInfo() + id, err = clientA.Lock(info) + if err != nil { + t.Fatal(err) + } + + if err := clientB.Unlock(id); err != nil { + t.Fatal(err) + } + + testLock(clientA, lockPath) + + err = clientA.Unlock(id) + + if err == nil { + t.Fatal("consul lock should have been lost") + } + if err.Error() != "consul lock was lost" { + t.Fatal("got wrong error", err) + } + }) + } +} + +func TestConsul_lostLock(t *testing.T) { + srv := newConsulTestServer(t) + + path := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + // create 2 instances to get 2 remote.Clients + sA, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + sB, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path + "-not-used", + })).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test-lost-lock" + id, err := sA.Lock(info) + if err != nil { + t.Fatal(err) + } + + reLocked := make(chan struct{}) + testLockHook = func() { + close(reLocked) + testLockHook = nil + } + + // now we use the second client to break the lock + kv := sB.(*remote.State).Client.(*RemoteClient).Client.KV() + _, err = kv.Delete(path+lockSuffix, nil) + if err != nil { + t.Fatal(err) + } + + <-reLocked + + if err := sA.Unlock(id); err != nil { + t.Fatal(err) + } +} + +func TestConsul_lostLockConnection(t *testing.T) { + srv := newConsulTestServer(t) + + // create an "unreliable" network by closing all the consul client's + // network connections + conns := &unreliableConns{} + origDialFn := dialContext + defer func() { + dialContext = origDialFn + }() + dialContext = conns.DialContext + + path := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test-lost-lock-connection" + id, err := s.Lock(info) + if err != nil { + t.Fatal(err) + } + + // kill the connection a few times + for i := 0; i < 3; i++ { + dialed := conns.dialedDone() + // kill any open connections + conns.Kill() + // wait for a new connection to be dialed, and kill it again + <-dialed + } + + if err := s.Unlock(id); err != nil { + t.Fatal("unlock error:", err) + } +} + +type unreliableConns struct { + sync.Mutex + conns []net.Conn + dialCallback func() +} + +func (u *unreliableConns) DialContext(ctx context.Context, netw, addr string) (net.Conn, error) { + u.Lock() + defer u.Unlock() + + dialer := &net.Dialer{} + conn, err := dialer.DialContext(ctx, netw, addr) + if err != nil { + return nil, err + } + + u.conns = append(u.conns, conn) + + if u.dialCallback != nil { + u.dialCallback() + } + + return conn, nil +} + +func (u *unreliableConns) dialedDone() chan struct{} { + u.Lock() + defer u.Unlock() + dialed := make(chan struct{}) + u.dialCallback = func() { + defer close(dialed) + u.dialCallback = nil + } + + return dialed +} + +// Kill these with a deadline, just to make sure we don't end up with any EOFs +// that get ignored. +func (u *unreliableConns) Kill() { + u.Lock() + defer u.Unlock() + + for _, conn := range u.conns { + conn.(*net.TCPConn).SetDeadline(time.Now()) + } + u.conns = nil +} diff --git a/backend/remote-state/cos/backend.go b/backend/remote-state/cos/backend.go new file mode 100644 index 000000000000..92ff6c4f6179 --- /dev/null +++ b/backend/remote-state/cos/backend.go @@ -0,0 +1,335 @@ +package cos + +import ( + "context" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" + sts "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/sts/v20180813" + tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813" + "github.com/tencentyun/cos-go-sdk-v5" +) + +// Default value from environment variable +const ( + PROVIDER_SECRET_ID = "TENCENTCLOUD_SECRET_ID" + PROVIDER_SECRET_KEY = "TENCENTCLOUD_SECRET_KEY" + PROVIDER_SECURITY_TOKEN = "TENCENTCLOUD_SECURITY_TOKEN" + PROVIDER_REGION = "TENCENTCLOUD_REGION" + PROVIDER_ASSUME_ROLE_ARN = "TENCENTCLOUD_ASSUME_ROLE_ARN" + PROVIDER_ASSUME_ROLE_SESSION_NAME = "TENCENTCLOUD_ASSUME_ROLE_SESSION_NAME" + PROVIDER_ASSUME_ROLE_SESSION_DURATION = "TENCENTCLOUD_ASSUME_ROLE_SESSION_DURATION" +) + +// Backend implements "backend".Backend for tencentCloud cos +type Backend struct { + *schema.Backend + credential *common.Credential + + cosContext context.Context + cosClient *cos.Client + tagClient *tag.Client + stsClient *sts.Client + + region string + bucket string + prefix string + key string + encrypt bool + acl string +} + +// New creates a new backend for TencentCloud cos remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "secret_id": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECRET_ID, nil), + Description: "Secret id of Tencent Cloud", + }, + "secret_key": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECRET_KEY, nil), + Description: "Secret key of Tencent Cloud", + Sensitive: true, + }, + "security_token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECURITY_TOKEN, nil), + Description: "TencentCloud Security Token of temporary access credentials. It can be sourced from the `TENCENTCLOUD_SECURITY_TOKEN` environment variable. Notice: for supported products, please refer to: [temporary key supported products](https://intl.cloud.tencent.com/document/product/598/10588).", + Sensitive: true, + }, + "region": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_REGION, nil), + Description: "The region of the COS bucket", + InputDefault: "ap-guangzhou", + }, + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the COS bucket", + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The directory for saving the state file in bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + prefix := v.(string) + if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") { + return nil, []error{fmt.Errorf("prefix must not start with '/' or './'")} + } + return nil, nil + }, + }, + "key": { + Type: schema.TypeString, + Optional: true, + Description: "The path for saving the state file in bucket", + Default: "terraform.tfstate", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") { + return nil, []error{fmt.Errorf("key can not start and end with '/'")} + } + return nil, nil + }, + }, + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: true, + }, + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Object ACL to be applied to the state file", + Default: "private", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + value := v.(string) + if value != "private" && value != "public-read" { + return nil, []error{fmt.Errorf( + "acl value invalid, expected %s or %s, got %s", + "private", "public-read", value)} + } + return nil, nil + }, + }, + "accelerate": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable global Acceleration", + Default: false, + }, + "assume_role": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Description: "The `assume_role` block. If provided, terraform will attempt to assume this role using the supplied credentials.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_ASSUME_ROLE_ARN, nil), + Description: "The ARN of the role to assume. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_ARN`.", + }, + "session_name": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_ASSUME_ROLE_SESSION_NAME, nil), + Description: "The session name to use when making the AssumeRole call. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_SESSION_NAME`.", + }, + "session_duration": { + Type: schema.TypeInt, + Required: true, + DefaultFunc: func() (interface{}, error) { + if v := os.Getenv(PROVIDER_ASSUME_ROLE_SESSION_DURATION); v != "" { + return strconv.Atoi(v) + } + return 7200, nil + }, + ValidateFunc: validateIntegerInRange(0, 43200), + Description: "The duration of the session when making the AssumeRole call. Its value ranges from 0 to 43200(seconds), and default is 7200 seconds. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_SESSION_DURATION`.", + }, + "policy": { + Type: schema.TypeString, + Optional: true, + Description: "A more restrictive policy when making the AssumeRole call. Its content must not contains `principal` elements. Notice: more syntax references, please refer to: [policies syntax logic](https://intl.cloud.tencent.com/document/product/598/10603).", + }, + }, + }, + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + + return result +} + +func validateIntegerInRange(min, max int64) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := int64(v.(int)) + if value < min { + errors = append(errors, fmt.Errorf( + "%q cannot be lower than %d: %d", k, min, value)) + } + if value > max { + errors = append(errors, fmt.Errorf( + "%q cannot be higher than %d: %d", k, max, value)) + } + return + } +} + +// configure init cos client +func (b *Backend) configure(ctx context.Context) error { + if b.cosClient != nil { + return nil + } + + b.cosContext = ctx + data := schema.FromContextBackendConfig(b.cosContext) + + b.region = data.Get("region").(string) + b.bucket = data.Get("bucket").(string) + b.prefix = data.Get("prefix").(string) + b.key = data.Get("key").(string) + b.encrypt = data.Get("encrypt").(bool) + b.acl = data.Get("acl").(string) + + var ( + u *url.URL + err error + ) + accelerate := data.Get("accelerate").(bool) + if accelerate { + u, err = url.Parse(fmt.Sprintf("https://%s.cos.accelerate.myqcloud.com", b.bucket)) + } else { + u, err = url.Parse(fmt.Sprintf("https://%s.cos.%s.myqcloud.com", b.bucket, b.region)) + } + if err != nil { + return err + } + + secretId := data.Get("secret_id").(string) + secretKey := data.Get("secret_key").(string) + securityToken := data.Get("security_token").(string) + + // init credential by AKSK & TOKEN + b.credential = common.NewTokenCredential(secretId, secretKey, securityToken) + // update credential if assume role exist + err = handleAssumeRole(data, b) + if err != nil { + return err + } + + b.cosClient = cos.NewClient( + &cos.BaseURL{BucketURL: u}, + &http.Client{ + Timeout: 60 * time.Second, + Transport: &cos.AuthorizationTransport{ + SecretID: b.credential.SecretId, + SecretKey: b.credential.SecretKey, + SessionToken: b.credential.Token, + }, + }, + ) + + b.tagClient = b.UseTagClient() + return err +} + +func handleAssumeRole(data *schema.ResourceData, b *Backend) error { + assumeRoleList := data.Get("assume_role").(*schema.Set).List() + if len(assumeRoleList) == 1 { + assumeRole := assumeRoleList[0].(map[string]interface{}) + assumeRoleArn := assumeRole["role_arn"].(string) + assumeRoleSessionName := assumeRole["session_name"].(string) + assumeRoleSessionDuration := assumeRole["session_duration"].(int) + assumeRolePolicy := assumeRole["policy"].(string) + + err := b.updateCredentialWithSTS(assumeRoleArn, assumeRoleSessionName, assumeRoleSessionDuration, assumeRolePolicy) + if err != nil { + return err + } + } + return nil +} + +func (b *Backend) updateCredentialWithSTS(assumeRoleArn, assumeRoleSessionName string, assumeRoleSessionDuration int, assumeRolePolicy string) error { + // assume role by STS + request := sts.NewAssumeRoleRequest() + request.RoleArn = &assumeRoleArn + request.RoleSessionName = &assumeRoleSessionName + duration := uint64(assumeRoleSessionDuration) + request.DurationSeconds = &duration + if assumeRolePolicy != "" { + policy := url.QueryEscape(assumeRolePolicy) + request.Policy = &policy + } + + response, err := b.UseStsClient().AssumeRole(request) + if err != nil { + return err + } + // update credentials by result of assume role + b.credential = common.NewTokenCredential( + *response.Response.Credentials.TmpSecretId, + *response.Response.Credentials.TmpSecretKey, + *response.Response.Credentials.Token, + ) + + return nil +} + +// UseStsClient returns sts client for service +func (b *Backend) UseStsClient() *sts.Client { + if b.stsClient != nil { + return b.stsClient + } + cpf := b.NewClientProfile(300) + b.stsClient, _ = sts.NewClient(b.credential, b.region, cpf) + b.stsClient.WithHttpTransport(&LogRoundTripper{}) + + return b.stsClient +} + +// UseTagClient returns tag client for service +func (b *Backend) UseTagClient() *tag.Client { + if b.tagClient != nil { + return b.tagClient + } + cpf := b.NewClientProfile(300) + cpf.Language = "en-US" + b.tagClient, _ = tag.NewClient(b.credential, b.region, cpf) + return b.tagClient +} + +// NewClientProfile returns a new ClientProfile +func (b *Backend) NewClientProfile(timeout int) *profile.ClientProfile { + cpf := profile.NewClientProfile() + + // all request use method POST + cpf.HttpProfile.ReqMethod = "POST" + // request timeout + cpf.HttpProfile.ReqTimeout = timeout + + return cpf +} diff --git a/backend/remote-state/cos/backend_state.go b/backend/remote-state/cos/backend_state.go new file mode 100644 index 000000000000..abf8a0572860 --- /dev/null +++ b/backend/remote-state/cos/backend_state.go @@ -0,0 +1,185 @@ +package cos + +import ( + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +// Define file suffix +const ( + stateFileSuffix = ".tfstate" + lockFileSuffix = ".tflock" +) + +// Workspaces returns a list of names for the workspaces +func (b *Backend) Workspaces() ([]string, error) { + c, err := b.client("tencentcloud") + if err != nil { + return nil, err + } + + obs, err := c.getBucket(b.prefix) + log.Printf("[DEBUG] list all workspaces, objects: %v, error: %v", obs, err) + if err != nil { + return nil, err + } + + ws := []string{backend.DefaultStateName} + for _, vv := range obs { + // .tfstate + if !strings.HasSuffix(vv.Key, stateFileSuffix) { + continue + } + // default worksapce + if path.Join(b.prefix, b.key) == vv.Key { + continue + } + // // + prefix := strings.TrimRight(b.prefix, "/") + "/" + parts := strings.Split(strings.TrimPrefix(vv.Key, prefix), "/") + if len(parts) > 0 && parts[0] != "" { + ws = append(ws, parts[0]) + } + } + + sort.Strings(ws[1:]) + log.Printf("[DEBUG] list all workspaces, workspaces: %v", ws) + + return ws, nil +} + +// DeleteWorkspace deletes the named workspaces. The "default" state cannot be deleted. +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + log.Printf("[DEBUG] delete workspace, workspace: %v", name) + + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("default state is not allow to delete") + } + + c, err := b.client(name) + if err != nil { + return err + } + + return c.Delete() +} + +// StateMgr manage the state, if the named state not exists, a new file will created +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + log.Printf("[DEBUG] state manager, current workspace: %v", name) + + c, err := b.client(name) + if err != nil { + return nil, err + } + stateMgr := &remote.State{Client: c} + + ws, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, candidate := range ws { + if candidate == name { + exists = true + break + } + } + + if !exists { + log.Printf("[DEBUG] workspace %v not exists", name) + + // take a lock on this state while we write it + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := c.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("Failed to lock cos state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(e error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(unlockErrMsg, err, lockId) + } + return e + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + } + + return stateMgr, nil +} + +// client returns a remoteClient for the named state. +func (b *Backend) client(name string) (*remoteClient, error) { + if strings.TrimSpace(name) == "" { + return nil, fmt.Errorf("state name not allow to be empty") + } + + return &remoteClient{ + cosContext: b.cosContext, + cosClient: b.cosClient, + tagClient: b.tagClient, + bucket: b.bucket, + stateFile: b.stateFile(name), + lockFile: b.lockFile(name), + encrypt: b.encrypt, + acl: b.acl, + }, nil +} + +// stateFile returns state file path by name +func (b *Backend) stateFile(name string) string { + if name == backend.DefaultStateName { + return path.Join(b.prefix, b.key) + } + return path.Join(b.prefix, name, b.key) +} + +// lockFile returns lock file path by name +func (b *Backend) lockFile(name string) string { + return b.stateFile(name) + lockFileSuffix +} + +// unlockErrMsg is error msg for unlock failed +const unlockErrMsg = ` +Unlocking the state file on TencentCloud cos backend failed: + +Error message: %v +Lock ID (gen): %s + +You may have to force-unlock this state in order to use it again. +The TencentCloud backend acquires a lock during initialization +to ensure the initial state file is created. +` diff --git a/backend/remote-state/cos/backend_test.go b/backend/remote-state/cos/backend_test.go new file mode 100644 index 000000000000..fb9894ee8b0f --- /dev/null +++ b/backend/remote-state/cos/backend_test.go @@ -0,0 +1,256 @@ +package cos + +import ( + "crypto/md5" + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/remote" +) + +const ( + defaultPrefix = "" + defaultKey = "terraform.tfstate" +) + +// Testing Thanks to GCS + +func TestStateFile(t *testing.T) { + t.Parallel() + + cases := []struct { + prefix string + stateName string + key string + wantStateFile string + wantLockFile string + }{ + {"", "default", "default.tfstate", "default.tfstate", "default.tfstate.tflock"}, + {"", "default", "test.tfstate", "test.tfstate", "test.tfstate.tflock"}, + {"", "dev", "test.tfstate", "dev/test.tfstate", "dev/test.tfstate.tflock"}, + {"terraform/test", "default", "default.tfstate", "terraform/test/default.tfstate", "terraform/test/default.tfstate.tflock"}, + {"terraform/test", "default", "test.tfstate", "terraform/test/test.tfstate", "terraform/test/test.tfstate.tflock"}, + {"terraform/test", "dev", "test.tfstate", "terraform/test/dev/test.tfstate", "terraform/test/dev/test.tfstate.tflock"}, + } + + for _, c := range cases { + t.Run(fmt.Sprintf("%s %s %s", c.prefix, c.key, c.stateName), func(t *testing.T) { + b := &Backend{ + prefix: c.prefix, + key: c.key, + } + if got, want := b.stateFile(c.stateName), c.wantStateFile; got != want { + t.Errorf("wrong state file name\ngot: %s\nwant: %s", got, want) + } + if got, want := b.lockFile(c.stateName), c.wantLockFile; got != want { + t.Errorf("wrong lock file name\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestRemoteClient(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be := setupBackend(t, bucket, defaultPrefix, defaultKey, false) + defer teardownBackend(t, be) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) + } + + remote.TestClient(t, rs.Client) +} + +func TestRemoteClientWithPrefix(t *testing.T) { + t.Parallel() + + prefix := "prefix/test" + bucket := bucketName(t) + + be := setupBackend(t, bucket, prefix, defaultKey, false) + defer teardownBackend(t, be) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) + } + + remote.TestClient(t, rs.Client) +} + +func TestRemoteClientWithEncryption(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be := setupBackend(t, bucket, defaultPrefix, defaultKey, true) + defer teardownBackend(t, be) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) + } + + remote.TestClient(t, rs.Client) +} + +func TestRemoteLocks(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be := setupBackend(t, bucket, defaultPrefix, defaultKey, false) + defer teardownBackend(t, be) + + remoteClient := func() (remote.Client, error) { + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + return nil, err + } + + rs, ok := ss.(*remote.State) + if !ok { + return nil, fmt.Errorf("be.StateMgr(): got a %T, want a *remote.State", ss) + } + + return rs.Client, nil + } + + c0, err := remoteClient() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + c1, err := remoteClient() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + remote.TestRemoteLocks(t, c0, c1) +} + +func TestBackend(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, defaultPrefix, defaultKey, false) + defer teardownBackend(t, be0) + + be1 := setupBackend(t, bucket, defaultPrefix, defaultKey, false) + defer teardownBackend(t, be1) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) + backend.TestBackendStateForceUnlock(t, be0, be1) +} + +func TestBackendWithPrefix(t *testing.T) { + t.Parallel() + + prefix := "prefix/test" + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, prefix, defaultKey, false) + defer teardownBackend(t, be0) + + be1 := setupBackend(t, bucket, prefix+"/", defaultKey, false) + defer teardownBackend(t, be1) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} + +func TestBackendWithEncryption(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, defaultPrefix, defaultKey, true) + defer teardownBackend(t, be0) + + be1 := setupBackend(t, bucket, defaultPrefix, defaultKey, true) + defer teardownBackend(t, be1) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} + +func setupBackend(t *testing.T, bucket, prefix, key string, encrypt bool) backend.Backend { + t.Helper() + + skip := os.Getenv("TF_COS_APPID") == "" + if skip { + t.Skip("This test require setting TF_COS_APPID environment variables") + } + + if os.Getenv(PROVIDER_REGION) == "" { + os.Setenv(PROVIDER_REGION, "ap-guangzhou") + } + + appId := os.Getenv("TF_COS_APPID") + region := os.Getenv(PROVIDER_REGION) + + config := map[string]interface{}{ + "region": region, + "bucket": bucket + appId, + "prefix": prefix, + "key": key, + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)) + be := b.(*Backend) + + c, err := be.client("tencentcloud") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + err = c.putBucket() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + return b +} + +func teardownBackend(t *testing.T, b backend.Backend) { + t.Helper() + + c, err := b.(*Backend).client("tencentcloud") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + err = c.deleteBucket(true) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func bucketName(t *testing.T) string { + unique := fmt.Sprintf("%s-%x", t.Name(), time.Now().UnixNano()) + return fmt.Sprintf("terraform-test-%s-%s", fmt.Sprintf("%x", md5.Sum([]byte(unique)))[:10], "") +} diff --git a/backend/remote-state/cos/client.go b/backend/remote-state/cos/client.go new file mode 100644 index 000000000000..6921dd576806 --- /dev/null +++ b/backend/remote-state/cos/client.go @@ -0,0 +1,442 @@ +package cos + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813" + "github.com/tencentyun/cos-go-sdk-v5" +) + +const ( + lockTagKey = "tencentcloud-terraform-lock" +) + +// RemoteClient implements the client of remote state +type remoteClient struct { + cosContext context.Context + cosClient *cos.Client + tagClient *tag.Client + + bucket string + stateFile string + lockFile string + encrypt bool + acl string +} + +// Get returns remote state file +func (c *remoteClient) Get() (*remote.Payload, error) { + log.Printf("[DEBUG] get remote state file %s", c.stateFile) + + exists, data, checksum, err := c.getObject(c.stateFile) + if err != nil { + return nil, err + } + + if !exists { + return nil, nil + } + + payload := &remote.Payload{ + Data: data, + MD5: []byte(checksum), + } + + return payload, nil +} + +// Put put state file to remote +func (c *remoteClient) Put(data []byte) error { + log.Printf("[DEBUG] put remote state file %s", c.stateFile) + + return c.putObject(c.stateFile, data) +} + +// Delete delete remote state file +func (c *remoteClient) Delete() error { + log.Printf("[DEBUG] delete remote state file %s", c.stateFile) + + return c.deleteObject(c.stateFile) +} + +// Lock lock remote state file for writing +func (c *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { + log.Printf("[DEBUG] lock remote state file %s", c.lockFile) + + err := c.cosLock(c.bucket, c.lockFile) + if err != nil { + return "", c.lockError(err) + } + defer c.cosUnlock(c.bucket, c.lockFile) + + exists, _, _, err := c.getObject(c.lockFile) + if err != nil { + return "", c.lockError(err) + } + + if exists { + return "", c.lockError(fmt.Errorf("lock file %s exists", c.lockFile)) + } + + info.Path = c.lockFile + data, err := json.Marshal(info) + if err != nil { + return "", c.lockError(err) + } + + check := fmt.Sprintf("%x", md5.Sum(data)) + err = c.putObject(c.lockFile, data) + if err != nil { + return "", c.lockError(err) + } + + return check, nil +} + +// Unlock unlock remote state file +func (c *remoteClient) Unlock(check string) error { + log.Printf("[DEBUG] unlock remote state file %s", c.lockFile) + + info, err := c.lockInfo() + if err != nil { + return c.lockError(err) + } + + if info.ID != check { + return c.lockError(fmt.Errorf("lock id mismatch, %v != %v", info.ID, check)) + } + + err = c.deleteObject(c.lockFile) + if err != nil { + return c.lockError(err) + } + + err = c.cosUnlock(c.bucket, c.lockFile) + if err != nil { + return c.lockError(err) + } + + return nil +} + +// lockError returns statemgr.LockError +func (c *remoteClient) lockError(err error) *statemgr.LockError { + log.Printf("[DEBUG] failed to lock or unlock %s: %v", c.lockFile, err) + + lockErr := &statemgr.LockError{ + Err: err, + } + + info, infoErr := c.lockInfo() + if infoErr != nil { + lockErr.Err = multierror.Append(lockErr.Err, infoErr) + } else { + lockErr.Info = info + } + + return lockErr +} + +// lockInfo returns LockInfo from lock file +func (c *remoteClient) lockInfo() (*statemgr.LockInfo, error) { + exists, data, checksum, err := c.getObject(c.lockFile) + if err != nil { + return nil, err + } + + if !exists { + return nil, fmt.Errorf("lock file %s not exists", c.lockFile) + } + + info := &statemgr.LockInfo{} + if err := json.Unmarshal(data, info); err != nil { + return nil, err + } + + info.ID = checksum + + return info, nil +} + +// getObject get remote object +func (c *remoteClient) getObject(cosFile string) (exists bool, data []byte, checksum string, err error) { + rsp, err := c.cosClient.Object.Get(c.cosContext, cosFile, nil) + if rsp == nil { + log.Printf("[DEBUG] getObject %s: error: %v", cosFile, err) + err = fmt.Errorf("failed to open file at %v: %v", cosFile, err) + return + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] getObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) + if err != nil { + if rsp.StatusCode == 404 { + err = nil + } else { + err = fmt.Errorf("failed to open file at %v: %v", cosFile, err) + } + return + } + + checksum = rsp.Header.Get("X-Cos-Meta-Md5") + log.Printf("[DEBUG] getObject %s: checksum: %s", cosFile, checksum) + if len(checksum) != 32 { + err = fmt.Errorf("failed to open file at %v: checksum %s invalid", cosFile, checksum) + return + } + + exists = true + data, err = ioutil.ReadAll(rsp.Body) + log.Printf("[DEBUG] getObject %s: data length: %d", cosFile, len(data)) + if err != nil { + err = fmt.Errorf("failed to open file at %v: %v", cosFile, err) + return + } + + check := fmt.Sprintf("%x", md5.Sum(data)) + log.Printf("[DEBUG] getObject %s: check: %s", cosFile, check) + if check != checksum { + err = fmt.Errorf("failed to open file at %v: checksum mismatch, %s != %s", cosFile, check, checksum) + return + } + + return +} + +// putObject put object to remote +func (c *remoteClient) putObject(cosFile string, data []byte) error { + opt := &cos.ObjectPutOptions{ + ObjectPutHeaderOptions: &cos.ObjectPutHeaderOptions{ + XCosMetaXXX: &http.Header{ + "X-Cos-Meta-Md5": []string{fmt.Sprintf("%x", md5.Sum(data))}, + }, + }, + ACLHeaderOptions: &cos.ACLHeaderOptions{ + XCosACL: c.acl, + }, + } + + if c.encrypt { + opt.ObjectPutHeaderOptions.XCosServerSideEncryption = "AES256" + } + + r := bytes.NewReader(data) + rsp, err := c.cosClient.Object.Put(c.cosContext, cosFile, r, opt) + if rsp == nil { + log.Printf("[DEBUG] putObject %s: error: %v", cosFile, err) + return fmt.Errorf("failed to save file to %v: %v", cosFile, err) + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] putObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) + if err != nil { + return fmt.Errorf("failed to save file to %v: %v", cosFile, err) + } + + return nil +} + +// deleteObject delete remote object +func (c *remoteClient) deleteObject(cosFile string) error { + rsp, err := c.cosClient.Object.Delete(c.cosContext, cosFile) + if rsp == nil { + log.Printf("[DEBUG] deleteObject %s: error: %v", cosFile, err) + return fmt.Errorf("failed to delete file %v: %v", cosFile, err) + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] deleteObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) + if rsp.StatusCode == 404 { + return nil + } + + if err != nil { + return fmt.Errorf("failed to delete file %v: %v", cosFile, err) + } + + return nil +} + +// getBucket list bucket by prefix +func (c *remoteClient) getBucket(prefix string) (obs []cos.Object, err error) { + fs, rsp, err := c.cosClient.Bucket.Get(c.cosContext, &cos.BucketGetOptions{Prefix: prefix}) + if rsp == nil { + log.Printf("[DEBUG] getBucket %s/%s: error: %v", c.bucket, prefix, err) + err = fmt.Errorf("bucket %s not exists", c.bucket) + return + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] getBucket %s/%s: code: %d, error: %v", c.bucket, prefix, rsp.StatusCode, err) + if rsp.StatusCode == 404 { + err = fmt.Errorf("bucket %s not exists", c.bucket) + return + } + + if err != nil { + return + } + + return fs.Contents, nil +} + +// putBucket create cos bucket +func (c *remoteClient) putBucket() error { + rsp, err := c.cosClient.Bucket.Put(c.cosContext, nil) + if rsp == nil { + log.Printf("[DEBUG] putBucket %s: error: %v", c.bucket, err) + return fmt.Errorf("failed to create bucket %v: %v", c.bucket, err) + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] putBucket %s: code: %d, error: %v", c.bucket, rsp.StatusCode, err) + if rsp.StatusCode == 409 { + return nil + } + + if err != nil { + return fmt.Errorf("failed to create bucket %v: %v", c.bucket, err) + } + + return nil +} + +// deleteBucket delete cos bucket +func (c *remoteClient) deleteBucket(recursive bool) error { + if recursive { + obs, err := c.getBucket("") + if err != nil { + if strings.Contains(err.Error(), "not exists") { + return nil + } + log.Printf("[DEBUG] deleteBucket %s: empty bucket error: %v", c.bucket, err) + return fmt.Errorf("failed to empty bucket %v: %v", c.bucket, err) + } + for _, v := range obs { + c.deleteObject(v.Key) + } + } + + rsp, err := c.cosClient.Bucket.Delete(c.cosContext) + if rsp == nil { + log.Printf("[DEBUG] deleteBucket %s: error: %v", c.bucket, err) + return fmt.Errorf("failed to delete bucket %v: %v", c.bucket, err) + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] deleteBucket %s: code: %d, error: %v", c.bucket, rsp.StatusCode, err) + if rsp.StatusCode == 404 { + return nil + } + + if err != nil { + return fmt.Errorf("failed to delete bucket %v: %v", c.bucket, err) + } + + return nil +} + +// cosLock lock cos for writing +func (c *remoteClient) cosLock(bucket, cosFile string) error { + log.Printf("[DEBUG] lock cos file %s:%s", bucket, cosFile) + + cosPath := fmt.Sprintf("%s:%s", bucket, cosFile) + lockTagValue := fmt.Sprintf("%x", md5.Sum([]byte(cosPath))) + + return c.CreateTag(lockTagKey, lockTagValue) +} + +// cosUnlock unlock cos writing +func (c *remoteClient) cosUnlock(bucket, cosFile string) error { + log.Printf("[DEBUG] unlock cos file %s:%s", bucket, cosFile) + + cosPath := fmt.Sprintf("%s:%s", bucket, cosFile) + lockTagValue := fmt.Sprintf("%x", md5.Sum([]byte(cosPath))) + + var err error + for i := 0; i < 30; i++ { + tagExists, err := c.CheckTag(lockTagKey, lockTagValue) + + if err != nil { + return err + } + + if !tagExists { + return nil + } + + err = c.DeleteTag(lockTagKey, lockTagValue) + if err == nil { + return nil + } + time.Sleep(1 * time.Second) + } + + return err +} + +// CheckTag checks if tag key:value exists +func (c *remoteClient) CheckTag(key, value string) (exists bool, err error) { + request := tag.NewDescribeTagsRequest() + request.TagKey = &key + request.TagValue = &value + + response, err := c.tagClient.DescribeTags(request) + log.Printf("[DEBUG] create tag %s:%s: error: %v", key, value, err) + if err != nil { + return + } + + if len(response.Response.Tags) == 0 { + return + } + + tagKey := response.Response.Tags[0].TagKey + tagValue := response.Response.Tags[0].TagValue + + exists = key == *tagKey && value == *tagValue + + return +} + +// CreateTag create tag by key and value +func (c *remoteClient) CreateTag(key, value string) error { + request := tag.NewCreateTagRequest() + request.TagKey = &key + request.TagValue = &value + + _, err := c.tagClient.CreateTag(request) + log.Printf("[DEBUG] create tag %s:%s: error: %v", key, value, err) + if err != nil { + return fmt.Errorf("failed to create tag: %s -> %s: %s", key, value, err) + } + + return nil +} + +// DeleteTag create tag by key and value +func (c *remoteClient) DeleteTag(key, value string) error { + request := tag.NewDeleteTagRequest() + request.TagKey = &key + request.TagValue = &value + + _, err := c.tagClient.DeleteTag(request) + log.Printf("[DEBUG] delete tag %s:%s: error: %v", key, value, err) + if err != nil { + return fmt.Errorf("failed to delete tag: %s -> %s: %s", key, value, err) + } + + return nil +} diff --git a/internal/backend/remote-state/cos/transport.go b/backend/remote-state/cos/transport.go similarity index 100% rename from internal/backend/remote-state/cos/transport.go rename to backend/remote-state/cos/transport.go diff --git a/backend/remote-state/gcs/backend.go b/backend/remote-state/gcs/backend.go new file mode 100644 index 000000000000..4a5b7df8bcd4 --- /dev/null +++ b/backend/remote-state/gcs/backend.go @@ -0,0 +1,249 @@ +// Package gcs implements remote storage of state on Google Cloud Storage (GCS). +package gcs + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "strings" + + "cloud.google.com/go/storage" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/legacy/helper/schema" + "golang.org/x/oauth2" + "google.golang.org/api/impersonate" + "google.golang.org/api/option" +) + +// Backend implements "backend".Backend for GCS. +// Input(), Validate() and Configure() are implemented by embedding *schema.Backend. +// State(), DeleteState() and States() are implemented explicitly. +type Backend struct { + *schema.Backend + + storageClient *storage.Client + storageContext context.Context + + bucketName string + prefix string + + encryptionKey []byte + kmsKeyName string +} + +func New() backend.Backend { + b := &Backend{} + b.Backend = &schema.Backend{ + ConfigureFunc: b.configure, + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the Google Cloud Storage bucket", + }, + + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The directory where state files will be saved inside the bucket", + }, + + "credentials": { + Type: schema.TypeString, + Optional: true, + Description: "Google Cloud JSON Account Key", + Default: "", + }, + + "access_token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN", + }, nil), + Description: "An OAuth2 token used for GCP authentication", + }, + + "impersonate_service_account": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_BACKEND_IMPERSONATE_SERVICE_ACCOUNT", + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", + }, nil), + Description: "The service account to impersonate for all Google API Calls", + }, + + "impersonate_service_account_delegates": { + Type: schema.TypeList, + Optional: true, + Description: "The delegation chain for the impersonated service account", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "encryption_key": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_ENCRYPTION_KEY", + }, nil), + Description: "A 32 byte base64 encoded 'customer supplied encryption key' used when reading and writing state files in the bucket.", + ConflictsWith: []string{"kms_encryption_key"}, + }, + + "kms_encryption_key": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_KMS_ENCRYPTION_KEY", + }, nil), + Description: "A Cloud KMS key ('customer managed encryption key') used when reading and writing state files in the bucket. Format should be 'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{name}}'.", + ConflictsWith: []string{"encryption_key"}, + }, + + "storage_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_BACKEND_STORAGE_CUSTOM_ENDPOINT", + "GOOGLE_STORAGE_CUSTOM_ENDPOINT", + }, nil), + }, + }, + } + + return b +} + +func (b *Backend) configure(ctx context.Context) error { + if b.storageClient != nil { + return nil + } + + // ctx is a background context with the backend config added. + // Since no context is passed to remoteClient.Get(), .Lock(), etc. but + // one is required for calling the GCP API, we're holding on to this + // context here and re-use it later. + b.storageContext = ctx + + data := schema.FromContextBackendConfig(b.storageContext) + + b.bucketName = data.Get("bucket").(string) + b.prefix = strings.TrimLeft(data.Get("prefix").(string), "/") + if b.prefix != "" && !strings.HasSuffix(b.prefix, "/") { + b.prefix = b.prefix + "/" + } + + var opts []option.ClientOption + var credOptions []option.ClientOption + + // Add credential source + var creds string + var tokenSource oauth2.TokenSource + + if v, ok := data.GetOk("access_token"); ok { + tokenSource = oauth2.StaticTokenSource(&oauth2.Token{ + AccessToken: v.(string), + }) + } else if v, ok := data.GetOk("credentials"); ok { + creds = v.(string) + } else if v := os.Getenv("GOOGLE_BACKEND_CREDENTIALS"); v != "" { + creds = v + } else { + creds = os.Getenv("GOOGLE_CREDENTIALS") + } + + if tokenSource != nil { + credOptions = append(credOptions, option.WithTokenSource(tokenSource)) + } else if creds != "" { + + // to mirror how the provider works, we accept the file path or the contents + contents, err := backend.ReadPathOrContents(creds) + if err != nil { + return fmt.Errorf("Error loading credentials: %s", err) + } + + if !json.Valid([]byte(contents)) { + return fmt.Errorf("the string provided in credentials is neither valid json nor a valid file path") + } + + credOptions = append(credOptions, option.WithCredentialsJSON([]byte(contents))) + } + + // Service Account Impersonation + if v, ok := data.GetOk("impersonate_service_account"); ok { + ServiceAccount := v.(string) + var delegates []string + + if v, ok := data.GetOk("impersonate_service_account_delegates"); ok { + d := v.([]interface{}) + if len(delegates) > 0 { + delegates = make([]string, 0, len(d)) + } + for _, delegate := range d { + delegates = append(delegates, delegate.(string)) + } + } + + ts, err := impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{ + TargetPrincipal: ServiceAccount, + Scopes: []string{storage.ScopeReadWrite}, + Delegates: delegates, + }, credOptions...) + + if err != nil { + return err + } + + opts = append(opts, option.WithTokenSource(ts)) + + } else { + opts = append(opts, credOptions...) + } + + opts = append(opts, option.WithUserAgent(httpclient.UserAgentString())) + + // Custom endpoint for storage API + if storageEndpoint, ok := data.GetOk("storage_custom_endpoint"); ok { + endpoint := option.WithEndpoint(storageEndpoint.(string)) + opts = append(opts, endpoint) + } + client, err := storage.NewClient(b.storageContext, opts...) + if err != nil { + return fmt.Errorf("storage.NewClient() failed: %v", err) + } + + b.storageClient = client + + // Customer-supplied encryption + key := data.Get("encryption_key").(string) + if key != "" { + kc, err := backend.ReadPathOrContents(key) + if err != nil { + return fmt.Errorf("Error loading encryption key: %s", err) + } + + // The GCS client expects a customer supplied encryption key to be + // passed in as a 32 byte long byte slice. The byte slice is base64 + // encoded before being passed to the API. We take a base64 encoded key + // to remain consistent with the GCS docs. + // https://cloud.google.com/storage/docs/encryption#customer-supplied + // https://github.com/GoogleCloudPlatform/google-cloud-go/blob/def681/storage/storage.go#L1181 + k, err := base64.StdEncoding.DecodeString(kc) + if err != nil { + return fmt.Errorf("Error decoding encryption key: %s", err) + } + b.encryptionKey = k + } + + // Customer-managed encryption + kmsName := data.Get("kms_encryption_key").(string) + if kmsName != "" { + b.kmsKeyName = kmsName + } + + return nil +} diff --git a/backend/remote-state/gcs/backend_state.go b/backend/remote-state/gcs/backend_state.go new file mode 100644 index 000000000000..e7294e3048a0 --- /dev/null +++ b/backend/remote-state/gcs/backend_state.go @@ -0,0 +1,155 @@ +package gcs + +import ( + "fmt" + "path" + "sort" + "strings" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +const ( + stateFileSuffix = ".tfstate" + lockFileSuffix = ".tflock" +) + +// Workspaces returns a list of names for the workspaces found on GCS. The default +// state is always returned as the first element in the slice. +func (b *Backend) Workspaces() ([]string, error) { + states := []string{backend.DefaultStateName} + + bucket := b.storageClient.Bucket(b.bucketName) + objs := bucket.Objects(b.storageContext, &storage.Query{ + Delimiter: "/", + Prefix: b.prefix, + }) + for { + attrs, err := objs.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("querying Cloud Storage failed: %v", err) + } + + name := path.Base(attrs.Name) + if !strings.HasSuffix(name, stateFileSuffix) { + continue + } + st := strings.TrimSuffix(name, stateFileSuffix) + + if st != backend.DefaultStateName { + states = append(states, st) + } + } + + sort.Strings(states[1:]) + return states, nil +} + +// DeleteWorkspace deletes the named workspaces. The "default" state cannot be deleted. +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName { + return fmt.Errorf("cowardly refusing to delete the %q state", name) + } + + c, err := b.client(name) + if err != nil { + return err + } + + return c.Delete() +} + +// client returns a remoteClient for the named state. +func (b *Backend) client(name string) (*remoteClient, error) { + if name == "" { + return nil, fmt.Errorf("%q is not a valid state name", name) + } + + return &remoteClient{ + storageContext: b.storageContext, + storageClient: b.storageClient, + bucketName: b.bucketName, + stateFilePath: b.stateFile(name), + lockFilePath: b.lockFile(name), + encryptionKey: b.encryptionKey, + kmsKeyName: b.kmsKeyName, + }, nil +} + +// StateMgr reads and returns the named state from GCS. If the named state does +// not yet exist, a new state file is created. +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + c, err := b.client(name) + if err != nil { + return nil, err + } + + st := &remote.State{Client: c} + + // Grab the value + if err := st.RefreshState(); err != nil { + return nil, err + } + + // If we have no state, we have to create an empty state + if v := st.State(); v == nil { + + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := st.Lock(lockInfo) + if err != nil { + return nil, err + } + + // Local helper function so we can call it multiple places + unlock := func(baseErr error) error { + if err := st.Unlock(lockID); err != nil { + const unlockErrMsg = `%v + Additionally, unlocking the state file on Google Cloud Storage failed: + + Error message: %q + Lock ID (gen): %v + Lock file URL: %v + + You may have to force-unlock this state in order to use it again. + The GCloud backend acquires a lock during initialization to ensure + the initial state file is created.` + return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, c.lockFileURL()) + } + + return baseErr + } + + if err := st.WriteState(states.NewState()); err != nil { + return nil, unlock(err) + } + if err := st.PersistState(nil); err != nil { + return nil, unlock(err) + } + + // Unlock, the state should now be initialized + if err := unlock(nil); err != nil { + return nil, err + } + + } + + return st, nil +} + +func (b *Backend) stateFile(name string) string { + return path.Join(b.prefix, name+stateFileSuffix) +} + +func (b *Backend) lockFile(name string) string { + return path.Join(b.prefix, name+lockFileSuffix) +} diff --git a/backend/remote-state/gcs/backend_test.go b/backend/remote-state/gcs/backend_test.go new file mode 100644 index 000000000000..6b77bdb86784 --- /dev/null +++ b/backend/remote-state/gcs/backend_test.go @@ -0,0 +1,441 @@ +package gcs + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "strings" + "testing" + "time" + + kms "cloud.google.com/go/kms/apiv1" + "cloud.google.com/go/storage" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/states/remote" + "google.golang.org/api/option" + kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" +) + +const ( + noPrefix = "" + noEncryptionKey = "" + noKmsKeyName = "" +) + +// See https://cloud.google.com/storage/docs/using-encryption-keys#generating_your_own_encryption_key +const encryptionKey = "yRyCOikXi1ZDNE0xN3yiFsJjg7LGimoLrGFcLZgQoVk=" + +// KMS key ring name and key name are hardcoded here and re-used because key rings (and keys) cannot be deleted +// Test code asserts their presence and creates them if they're absent. They're not deleted at the end of tests. +// See: https://cloud.google.com/kms/docs/faq#cannot_delete +const ( + keyRingName = "tf-gcs-backend-acc-tests" + keyName = "tf-test-key-1" + kmsRole = "roles/cloudkms.cryptoKeyEncrypterDecrypter" // GCS service account needs this binding on the created key +) + +var keyRingLocation = os.Getenv("GOOGLE_REGION") + +func TestStateFile(t *testing.T) { + t.Parallel() + + cases := []struct { + prefix string + name string + wantStateFile string + wantLockFile string + }{ + {"state", "default", "state/default.tfstate", "state/default.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, + } + for _, c := range cases { + b := &Backend{ + prefix: c.prefix, + } + + if got := b.stateFile(c.name); got != c.wantStateFile { + t.Errorf("stateFile(%q) = %q, want %q", c.name, got, c.wantStateFile) + } + + if got := b.lockFile(c.name); got != c.wantLockFile { + t.Errorf("lockFile(%q) = %q, want %q", c.name, got, c.wantLockFile) + } + } +} + +func TestRemoteClient(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + be := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) + defer teardownBackend(t, be, noPrefix) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("be.StateMgr(%q) = %v", backend.DefaultStateName, err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("be.StateMgr(): got a %T, want a *remote.State", ss) + } + + remote.TestClient(t, rs.Client) +} +func TestRemoteClientWithEncryption(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + be := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) + defer teardownBackend(t, be, noPrefix) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("be.StateMgr(%q) = %v", backend.DefaultStateName, err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("be.StateMgr(): got a %T, want a *remote.State", ss) + } + + remote.TestClient(t, rs.Client) +} + +func TestRemoteLocks(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + be := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) + defer teardownBackend(t, be, noPrefix) + + remoteClient := func() (remote.Client, error) { + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + return nil, err + } + + rs, ok := ss.(*remote.State) + if !ok { + return nil, fmt.Errorf("be.StateMgr(): got a %T, want a *remote.State", ss) + } + + return rs.Client, nil + } + + c0, err := remoteClient() + if err != nil { + t.Fatalf("remoteClient(0) = %v", err) + } + c1, err := remoteClient() + if err != nil { + t.Fatalf("remoteClient(1) = %v", err) + } + + remote.TestRemoteLocks(t, c0, c1) +} + +func TestBackend(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) + defer teardownBackend(t, be0, noPrefix) + + be1 := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) + backend.TestBackendStateForceUnlock(t, be0, be1) +} + +func TestBackendWithPrefix(t *testing.T) { + t.Parallel() + + prefix := "test/prefix" + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, prefix, noEncryptionKey, noKmsKeyName) + defer teardownBackend(t, be0, prefix) + + be1 := setupBackend(t, bucket, prefix+"/", noEncryptionKey, noKmsKeyName) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} +func TestBackendWithCustomerSuppliedEncryption(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) + defer teardownBackend(t, be0, noPrefix) + + be1 := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} + +func TestBackendWithCustomerManagedKMSEncryption(t *testing.T) { + t.Parallel() + + projectID := os.Getenv("GOOGLE_PROJECT") + bucket := bucketName(t) + + // Taken from global variables in test file + kmsDetails := map[string]string{ + "project": projectID, + "location": keyRingLocation, + "ringName": keyRingName, + "keyName": keyName, + } + + kmsName := setupKmsKey(t, kmsDetails) + + be0 := setupBackend(t, bucket, noPrefix, noEncryptionKey, kmsName) + defer teardownBackend(t, be0, noPrefix) + + be1 := setupBackend(t, bucket, noPrefix, noEncryptionKey, kmsName) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} + +// setupBackend returns a new GCS backend. +func setupBackend(t *testing.T, bucket, prefix, key, kmsName string) backend.Backend { + t.Helper() + + projectID := os.Getenv("GOOGLE_PROJECT") + if projectID == "" || os.Getenv("TF_ACC") == "" { + t.Skip("This test creates a bucket in GCS and populates it. " + + "Since this may incur costs, it will only run if " + + "the TF_ACC and GOOGLE_PROJECT environment variables are set.") + } + + config := map[string]interface{}{ + "bucket": bucket, + "prefix": prefix, + } + // Only add encryption keys to config if non-zero value set + // If not set here, default values are supplied in `TestBackendConfig` by `PrepareConfig` function call + if len(key) > 0 { + config["encryption_key"] = key + } + if len(kmsName) > 0 { + config["kms_encryption_key"] = kmsName + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)) + be := b.(*Backend) + + // create the bucket if it doesn't exist + bkt := be.storageClient.Bucket(bucket) + _, err := bkt.Attrs(be.storageContext) + if err != nil { + if err != storage.ErrBucketNotExist { + t.Fatal(err) + } + + attrs := &storage.BucketAttrs{ + Location: os.Getenv("GOOGLE_REGION"), + } + err := bkt.Create(be.storageContext, projectID, attrs) + if err != nil { + t.Fatal(err) + } + } + + return b +} + +// setupKmsKey asserts that a KMS key chain and key exist and necessary IAM bindings are in place +// If the key ring or key do not exist they are created and permissions are given to the GCS Service account +func setupKmsKey(t *testing.T, keyDetails map[string]string) string { + t.Helper() + + projectID := os.Getenv("GOOGLE_PROJECT") + if projectID == "" || os.Getenv("TF_ACC") == "" { + t.Skip("This test creates a KMS key ring and key in Cloud KMS. " + + "Since this may incur costs, it will only run if " + + "the TF_ACC and GOOGLE_PROJECT environment variables are set.") + } + + // KMS Client + ctx := context.Background() + opts, err := testGetClientOptions(t) + if err != nil { + e := fmt.Errorf("testGetClientOptions() failed: %s", err) + t.Fatal(e) + } + c, err := kms.NewKeyManagementClient(ctx, opts...) + if err != nil { + e := fmt.Errorf("kms.NewKeyManagementClient() failed: %v", err) + t.Fatal(e) + } + defer c.Close() + + // Get KMS key ring, create if doesn't exist + reqGetKeyRing := &kmspb.GetKeyRingRequest{ + Name: fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", keyDetails["project"], keyDetails["location"], keyDetails["ringName"]), + } + var keyRing *kmspb.KeyRing + keyRing, err = c.GetKeyRing(ctx, reqGetKeyRing) + if err != nil { + if !strings.Contains(err.Error(), "NotFound") { + // Handle unexpected error that isn't related to the key ring not being made yet + t.Fatal(err) + } + // Create key ring that doesn't exist + t.Logf("Cloud KMS key ring `%s` not found: creating key ring", + fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", keyDetails["project"], keyDetails["location"], keyDetails["ringName"]), + ) + reqCreateKeyRing := &kmspb.CreateKeyRingRequest{ + Parent: fmt.Sprintf("projects/%s/locations/%s", keyDetails["project"], keyDetails["location"]), + KeyRingId: keyDetails["ringName"], + } + keyRing, err = c.CreateKeyRing(ctx, reqCreateKeyRing) + if err != nil { + t.Fatal(err) + } + t.Logf("Cloud KMS key ring `%s` created successfully", keyRing.Name) + } + + // Get KMS key, create if doesn't exist (and give GCS service account permission to use) + reqGetKey := &kmspb.GetCryptoKeyRequest{ + Name: fmt.Sprintf("%s/cryptoKeys/%s", keyRing.Name, keyDetails["keyName"]), + } + var key *kmspb.CryptoKey + key, err = c.GetCryptoKey(ctx, reqGetKey) + if err != nil { + if !strings.Contains(err.Error(), "NotFound") { + // Handle unexpected error that isn't related to the key not being made yet + t.Fatal(err) + } + // Create key that doesn't exist + t.Logf("Cloud KMS key `%s` not found: creating key", + fmt.Sprintf("%s/cryptoKeys/%s", keyRing.Name, keyDetails["keyName"]), + ) + reqCreateKey := &kmspb.CreateCryptoKeyRequest{ + Parent: keyRing.Name, + CryptoKeyId: keyDetails["keyName"], + CryptoKey: &kmspb.CryptoKey{ + Purpose: kmspb.CryptoKey_ENCRYPT_DECRYPT, + }, + } + key, err = c.CreateCryptoKey(ctx, reqCreateKey) + if err != nil { + t.Fatal(err) + } + t.Logf("Cloud KMS key `%s` created successfully", key.Name) + } + + // Get GCS Service account email, check has necessary permission on key + // Note: we cannot reuse the backend's storage client (like in the setupBackend function) + // because the KMS key needs to exist before the backend buckets are made in the test. + sc, err := storage.NewClient(ctx, opts...) //reuse opts from KMS client + if err != nil { + e := fmt.Errorf("storage.NewClient() failed: %v", err) + t.Fatal(e) + } + defer sc.Close() + gcsServiceAccount, err := sc.ServiceAccount(ctx, keyDetails["project"]) + if err != nil { + t.Fatal(err) + } + + // Assert Cloud Storage service account has permission to use this key. + member := fmt.Sprintf("serviceAccount:%s", gcsServiceAccount) + iamHandle := c.ResourceIAM(key.Name) + policy, err := iamHandle.Policy(ctx) + if err != nil { + t.Fatal(err) + } + if ok := policy.HasRole(member, kmsRole); !ok { + // Add the missing permissions + t.Logf("Granting GCS service account %s %s role on key %s", gcsServiceAccount, kmsRole, key.Name) + policy.Add(member, kmsRole) + err = iamHandle.SetPolicy(ctx, policy) + if err != nil { + t.Fatal(err) + } + } + return key.Name +} + +// teardownBackend deletes all states from be except the default state. +func teardownBackend(t *testing.T, be backend.Backend, prefix string) { + t.Helper() + gcsBE, ok := be.(*Backend) + if !ok { + t.Fatalf("be is a %T, want a *gcsBackend", be) + } + ctx := gcsBE.storageContext + + bucket := gcsBE.storageClient.Bucket(gcsBE.bucketName) + objs := bucket.Objects(ctx, nil) + + for o, err := objs.Next(); err == nil; o, err = objs.Next() { + if err := bucket.Object(o.Name).Delete(ctx); err != nil { + log.Printf("Error trying to delete object: %s %s\n\n", o.Name, err) + } else { + log.Printf("Object deleted: %s", o.Name) + } + } + + // Delete the bucket itself. + if err := bucket.Delete(ctx); err != nil { + t.Errorf("deleting bucket %q failed, manual cleanup may be required: %v", gcsBE.bucketName, err) + } +} + +// bucketName returns a valid bucket name for this test. +func bucketName(t *testing.T) string { + name := fmt.Sprintf("tf-%x-%s", time.Now().UnixNano(), t.Name()) + + // Bucket names must contain 3 to 63 characters. + if len(name) > 63 { + name = name[:63] + } + + return strings.ToLower(name) +} + +// getClientOptions returns the []option.ClientOption needed to configure Google API clients +// that are required in acceptance tests but are not part of the gcs backend itself +func testGetClientOptions(t *testing.T) ([]option.ClientOption, error) { + t.Helper() + + var creds string + if v := os.Getenv("GOOGLE_BACKEND_CREDENTIALS"); v != "" { + creds = v + } else { + creds = os.Getenv("GOOGLE_CREDENTIALS") + } + if creds == "" { + t.Skip("This test required credentials to be supplied via" + + "the GOOGLE_CREDENTIALS or GOOGLE_BACKEND_CREDENTIALS environment variables.") + } + + var opts []option.ClientOption + var credOptions []option.ClientOption + + contents, err := backend.ReadPathOrContents(creds) + if err != nil { + return nil, fmt.Errorf("error loading credentials: %s", err) + } + if !json.Valid([]byte(contents)) { + return nil, fmt.Errorf("the string provided in credentials is neither valid json nor a valid file path") + } + credOptions = append(credOptions, option.WithCredentialsJSON([]byte(contents))) + opts = append(opts, credOptions...) + opts = append(opts, option.WithUserAgent(httpclient.UserAgentString())) + + return opts, nil +} diff --git a/backend/remote-state/gcs/client.go b/backend/remote-state/gcs/client.go new file mode 100644 index 000000000000..83bc89e9e742 --- /dev/null +++ b/backend/remote-state/gcs/client.go @@ -0,0 +1,190 @@ +package gcs + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strconv" + + "cloud.google.com/go/storage" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + "golang.org/x/net/context" +) + +// remoteClient is used by "state/remote".State to read and write +// blobs representing state. +// Implements "state/remote".ClientLocker +type remoteClient struct { + storageContext context.Context + storageClient *storage.Client + bucketName string + stateFilePath string + lockFilePath string + encryptionKey []byte + kmsKeyName string +} + +func (c *remoteClient) Get() (payload *remote.Payload, err error) { + stateFileReader, err := c.stateFile().NewReader(c.storageContext) + if err != nil { + if err == storage.ErrObjectNotExist { + return nil, nil + } else { + return nil, fmt.Errorf("Failed to open state file at %v: %v", c.stateFileURL(), err) + } + } + defer stateFileReader.Close() + + stateFileContents, err := ioutil.ReadAll(stateFileReader) + if err != nil { + return nil, fmt.Errorf("Failed to read state file from %v: %v", c.stateFileURL(), err) + } + + stateFileAttrs, err := c.stateFile().Attrs(c.storageContext) + if err != nil { + return nil, fmt.Errorf("Failed to read state file attrs from %v: %v", c.stateFileURL(), err) + } + + result := &remote.Payload{ + Data: stateFileContents, + MD5: stateFileAttrs.MD5, + } + + return result, nil +} + +func (c *remoteClient) Put(data []byte) error { + err := func() error { + stateFileWriter := c.stateFile().NewWriter(c.storageContext) + if len(c.kmsKeyName) > 0 { + stateFileWriter.KMSKeyName = c.kmsKeyName + } + if _, err := stateFileWriter.Write(data); err != nil { + return err + } + return stateFileWriter.Close() + }() + if err != nil { + return fmt.Errorf("Failed to upload state to %v: %v", c.stateFileURL(), err) + } + + return nil +} + +func (c *remoteClient) Delete() error { + if err := c.stateFile().Delete(c.storageContext); err != nil { + return fmt.Errorf("Failed to delete state file %v: %v", c.stateFileURL(), err) + } + + return nil +} + +// Lock writes to a lock file, ensuring file creation. Returns the generation +// number, which must be passed to Unlock(). +func (c *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { + // update the path we're using + // we can't set the ID until the info is written + info.Path = c.lockFileURL() + + infoJson, err := json.Marshal(info) + if err != nil { + return "", err + } + + lockFile := c.lockFile() + w := lockFile.If(storage.Conditions{DoesNotExist: true}).NewWriter(c.storageContext) + err = func() error { + if _, err := w.Write(infoJson); err != nil { + return err + } + return w.Close() + }() + + if err != nil { + return "", c.lockError(fmt.Errorf("writing %q failed: %v", c.lockFileURL(), err)) + } + + info.ID = strconv.FormatInt(w.Attrs().Generation, 10) + + return info.ID, nil +} + +func (c *remoteClient) Unlock(id string) error { + gen, err := strconv.ParseInt(id, 10, 64) + if err != nil { + return fmt.Errorf("Lock ID should be numerical value, got '%s'", id) + } + + if err := c.lockFile().If(storage.Conditions{GenerationMatch: gen}).Delete(c.storageContext); err != nil { + return c.lockError(err) + } + + return nil +} + +func (c *remoteClient) lockError(err error) *statemgr.LockError { + lockErr := &statemgr.LockError{ + Err: err, + } + + info, infoErr := c.lockInfo() + if infoErr != nil { + lockErr.Err = multierror.Append(lockErr.Err, infoErr) + } else { + lockErr.Info = info + } + return lockErr +} + +// lockInfo reads the lock file, parses its contents and returns the parsed +// LockInfo struct. +func (c *remoteClient) lockInfo() (*statemgr.LockInfo, error) { + r, err := c.lockFile().NewReader(c.storageContext) + if err != nil { + return nil, err + } + defer r.Close() + + rawData, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + info := &statemgr.LockInfo{} + if err := json.Unmarshal(rawData, info); err != nil { + return nil, err + } + + // We use the Generation as the ID, so overwrite the ID in the json. + // This can't be written into the Info, since the generation isn't known + // until it's written. + attrs, err := c.lockFile().Attrs(c.storageContext) + if err != nil { + return nil, err + } + info.ID = strconv.FormatInt(attrs.Generation, 10) + + return info, nil +} + +func (c *remoteClient) stateFile() *storage.ObjectHandle { + h := c.storageClient.Bucket(c.bucketName).Object(c.stateFilePath) + if len(c.encryptionKey) > 0 { + return h.Key(c.encryptionKey) + } + return h +} + +func (c *remoteClient) stateFileURL() string { + return fmt.Sprintf("gs://%v/%v", c.bucketName, c.stateFilePath) +} + +func (c *remoteClient) lockFile() *storage.ObjectHandle { + return c.storageClient.Bucket(c.bucketName).Object(c.lockFilePath) +} + +func (c *remoteClient) lockFileURL() string { + return fmt.Sprintf("gs://%v/%v", c.bucketName, c.lockFilePath) +} diff --git a/backend/remote-state/http/backend.go b/backend/remote-state/http/backend.go new file mode 100644 index 000000000000..addc74ca7071 --- /dev/null +++ b/backend/remote-state/http/backend.go @@ -0,0 +1,257 @@ +package http + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "time" + + "github.com/hashicorp/go-retryablehttp" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_ADDRESS", nil), + Description: "The address of the REST endpoint", + }, + "update_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UPDATE_METHOD", "POST"), + Description: "HTTP method to use when updating state", + }, + "lock_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_LOCK_ADDRESS", nil), + Description: "The address of the lock REST endpoint", + }, + "unlock_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UNLOCK_ADDRESS", nil), + Description: "The address of the unlock REST endpoint", + }, + "lock_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_LOCK_METHOD", "LOCK"), + Description: "The HTTP method to use when locking", + }, + "unlock_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UNLOCK_METHOD", "UNLOCK"), + Description: "The HTTP method to use when unlocking", + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_USERNAME", nil), + Description: "The username for HTTP basic authentication", + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_PASSWORD", nil), + Description: "The password for HTTP basic authentication", + }, + "skip_cert_verification": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to skip TLS verification.", + }, + "retry_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_MAX", 2), + Description: "The number of HTTP request retries.", + }, + "retry_wait_min": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_WAIT_MIN", 1), + Description: "The minimum time in seconds to wait between HTTP request attempts.", + }, + "retry_wait_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_WAIT_MAX", 30), + Description: "The maximum time in seconds to wait between HTTP request attempts.", + }, + "client_ca_certificate_pem": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_CA_CERTIFICATE_PEM", ""), + Description: "A PEM-encoded CA certificate chain used by the client to verify server certificates during TLS authentication.", + }, + "client_certificate_pem": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_CERTIFICATE_PEM", ""), + Description: "A PEM-encoded certificate used by the server to verify the client during mutual TLS (mTLS) authentication.", + }, + "client_private_key_pem": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_PRIVATE_KEY_PEM", ""), + Description: "A PEM-encoded private key, required if client_certificate_pem is specified.", + }, + }, + } + + b := &Backend{Backend: s} + b.Backend.ConfigureFunc = b.configure + return b +} + +type Backend struct { + *schema.Backend + + client *httpClient +} + +// configureTLS configures TLS when needed; if there are no conditions requiring TLS, no change is made. +func (b *Backend) configureTLS(client *retryablehttp.Client, data *schema.ResourceData) error { + // If there are no conditions needing to configure TLS, leave the client untouched + skipCertVerification := data.Get("skip_cert_verification").(bool) + clientCACertificatePem := data.Get("client_ca_certificate_pem").(string) + clientCertificatePem := data.Get("client_certificate_pem").(string) + clientPrivateKeyPem := data.Get("client_private_key_pem").(string) + if !skipCertVerification && clientCACertificatePem == "" && clientCertificatePem == "" && clientPrivateKeyPem == "" { + return nil + } + if clientCertificatePem != "" && clientPrivateKeyPem == "" { + return fmt.Errorf("client_certificate_pem is set but client_private_key_pem is not") + } + if clientPrivateKeyPem != "" && clientCertificatePem == "" { + return fmt.Errorf("client_private_key_pem is set but client_certificate_pem is not") + } + + // TLS configuration is needed; create an object and configure it + var tlsConfig tls.Config + client.HTTPClient.Transport.(*http.Transport).TLSClientConfig = &tlsConfig + + if skipCertVerification { + // ignores TLS verification + tlsConfig.InsecureSkipVerify = true + } + if clientCACertificatePem != "" { + // trust servers based on a CA + tlsConfig.RootCAs = x509.NewCertPool() + if !tlsConfig.RootCAs.AppendCertsFromPEM([]byte(clientCACertificatePem)) { + return errors.New("failed to append certs") + } + } + if clientCertificatePem != "" && clientPrivateKeyPem != "" { + // attach a client certificate to the TLS handshake (aka mTLS) + certificate, err := tls.X509KeyPair([]byte(clientCertificatePem), []byte(clientPrivateKeyPem)) + if err != nil { + return fmt.Errorf("cannot load client certificate: %w", err) + } + tlsConfig.Certificates = []tls.Certificate{certificate} + } + + return nil +} + +func (b *Backend) configure(ctx context.Context) error { + data := schema.FromContextBackendConfig(ctx) + + address := data.Get("address").(string) + updateURL, err := url.Parse(address) + if err != nil { + return fmt.Errorf("failed to parse address URL: %s", err) + } + if updateURL.Scheme != "http" && updateURL.Scheme != "https" { + return fmt.Errorf("address must be HTTP or HTTPS") + } + + updateMethod := data.Get("update_method").(string) + + var lockURL *url.URL + if v, ok := data.GetOk("lock_address"); ok && v.(string) != "" { + var err error + lockURL, err = url.Parse(v.(string)) + if err != nil { + return fmt.Errorf("failed to parse lockAddress URL: %s", err) + } + if lockURL.Scheme != "http" && lockURL.Scheme != "https" { + return fmt.Errorf("lockAddress must be HTTP or HTTPS") + } + } + + lockMethod := data.Get("lock_method").(string) + + var unlockURL *url.URL + if v, ok := data.GetOk("unlock_address"); ok && v.(string) != "" { + var err error + unlockURL, err = url.Parse(v.(string)) + if err != nil { + return fmt.Errorf("failed to parse unlockAddress URL: %s", err) + } + if unlockURL.Scheme != "http" && unlockURL.Scheme != "https" { + return fmt.Errorf("unlockAddress must be HTTP or HTTPS") + } + } + + unlockMethod := data.Get("unlock_method").(string) + + rClient := retryablehttp.NewClient() + rClient.RetryMax = data.Get("retry_max").(int) + rClient.RetryWaitMin = time.Duration(data.Get("retry_wait_min").(int)) * time.Second + rClient.RetryWaitMax = time.Duration(data.Get("retry_wait_max").(int)) * time.Second + rClient.Logger = log.New(logging.LogOutput(), "", log.Flags()) + if err = b.configureTLS(rClient, data); err != nil { + return err + } + + b.client = &httpClient{ + URL: updateURL, + UpdateMethod: updateMethod, + + LockURL: lockURL, + LockMethod: lockMethod, + UnlockURL: unlockURL, + UnlockMethod: unlockMethod, + + Username: data.Get("username").(string), + Password: data.Get("password").(string), + + // accessible only for testing use + Client: rClient, + } + return nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + return &remote.State{Client: b.client}, nil +} + +func (b *Backend) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *Backend) DeleteWorkspace(string, bool) error { + return backend.ErrWorkspacesNotSupported +} diff --git a/backend/remote-state/http/backend_test.go b/backend/remote-state/http/backend_test.go new file mode 100644 index 000000000000..a03ebb319dab --- /dev/null +++ b/backend/remote-state/http/backend_test.go @@ -0,0 +1,164 @@ +package http + +import ( + "os" + "testing" + "time" + + "github.com/hashicorp/terraform/configs" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/backend" +) + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestHTTPClientFactory(t *testing.T) { + // defaults + + conf := map[string]cty.Value{ + "address": cty.StringVal("http://127.0.0.1:8888/foo"), + } + b := backend.TestBackendConfig(t, New(), configs.SynthBody("synth", conf)).(*Backend) + client := b.client + + if client == nil { + t.Fatal("Unexpected failure, address") + } + if client.URL.String() != "http://127.0.0.1:8888/foo" { + t.Fatalf("Expected address \"%s\", got \"%s\"", conf["address"], client.URL.String()) + } + if client.UpdateMethod != "POST" { + t.Fatalf("Expected update_method \"%s\", got \"%s\"", "POST", client.UpdateMethod) + } + if client.LockURL != nil || client.LockMethod != "LOCK" { + t.Fatal("Unexpected lock_address or lock_method") + } + if client.UnlockURL != nil || client.UnlockMethod != "UNLOCK" { + t.Fatal("Unexpected unlock_address or unlock_method") + } + if client.Username != "" || client.Password != "" { + t.Fatal("Unexpected username or password") + } + + // custom + conf = map[string]cty.Value{ + "address": cty.StringVal("http://127.0.0.1:8888/foo"), + "update_method": cty.StringVal("BLAH"), + "lock_address": cty.StringVal("http://127.0.0.1:8888/bar"), + "lock_method": cty.StringVal("BLIP"), + "unlock_address": cty.StringVal("http://127.0.0.1:8888/baz"), + "unlock_method": cty.StringVal("BLOOP"), + "username": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "retry_max": cty.StringVal("999"), + "retry_wait_min": cty.StringVal("15"), + "retry_wait_max": cty.StringVal("150"), + } + + b = backend.TestBackendConfig(t, New(), configs.SynthBody("synth", conf)).(*Backend) + client = b.client + + if client == nil { + t.Fatal("Unexpected failure, update_method") + } + if client.UpdateMethod != "BLAH" { + t.Fatalf("Expected update_method \"%s\", got \"%s\"", "BLAH", client.UpdateMethod) + } + if client.LockURL.String() != conf["lock_address"].AsString() || client.LockMethod != "BLIP" { + t.Fatalf("Unexpected lock_address \"%s\" vs \"%s\" or lock_method \"%s\" vs \"%s\"", client.LockURL.String(), + conf["lock_address"].AsString(), client.LockMethod, conf["lock_method"]) + } + if client.UnlockURL.String() != conf["unlock_address"].AsString() || client.UnlockMethod != "BLOOP" { + t.Fatalf("Unexpected unlock_address \"%s\" vs \"%s\" or unlock_method \"%s\" vs \"%s\"", client.UnlockURL.String(), + conf["unlock_address"].AsString(), client.UnlockMethod, conf["unlock_method"]) + } + if client.Username != "user" || client.Password != "pass" { + t.Fatalf("Unexpected username \"%s\" vs \"%s\" or password \"%s\" vs \"%s\"", client.Username, conf["username"], + client.Password, conf["password"]) + } + if client.Client.RetryMax != 999 { + t.Fatalf("Expected retry_max \"%d\", got \"%d\"", 999, client.Client.RetryMax) + } + if client.Client.RetryWaitMin != 15*time.Second { + t.Fatalf("Expected retry_wait_min \"%s\", got \"%s\"", 15*time.Second, client.Client.RetryWaitMin) + } + if client.Client.RetryWaitMax != 150*time.Second { + t.Fatalf("Expected retry_wait_max \"%s\", got \"%s\"", 150*time.Second, client.Client.RetryWaitMax) + } +} + +func TestHTTPClientFactoryWithEnv(t *testing.T) { + // env + conf := map[string]string{ + "address": "http://127.0.0.1:8888/foo", + "update_method": "BLAH", + "lock_address": "http://127.0.0.1:8888/bar", + "lock_method": "BLIP", + "unlock_address": "http://127.0.0.1:8888/baz", + "unlock_method": "BLOOP", + "username": "user", + "password": "pass", + "retry_max": "999", + "retry_wait_min": "15", + "retry_wait_max": "150", + } + + defer testWithEnv(t, "TF_HTTP_ADDRESS", conf["address"])() + defer testWithEnv(t, "TF_HTTP_UPDATE_METHOD", conf["update_method"])() + defer testWithEnv(t, "TF_HTTP_LOCK_ADDRESS", conf["lock_address"])() + defer testWithEnv(t, "TF_HTTP_UNLOCK_ADDRESS", conf["unlock_address"])() + defer testWithEnv(t, "TF_HTTP_LOCK_METHOD", conf["lock_method"])() + defer testWithEnv(t, "TF_HTTP_UNLOCK_METHOD", conf["unlock_method"])() + defer testWithEnv(t, "TF_HTTP_USERNAME", conf["username"])() + defer testWithEnv(t, "TF_HTTP_PASSWORD", conf["password"])() + defer testWithEnv(t, "TF_HTTP_RETRY_MAX", conf["retry_max"])() + defer testWithEnv(t, "TF_HTTP_RETRY_WAIT_MIN", conf["retry_wait_min"])() + defer testWithEnv(t, "TF_HTTP_RETRY_WAIT_MAX", conf["retry_wait_max"])() + + b := backend.TestBackendConfig(t, New(), nil).(*Backend) + client := b.client + + if client == nil { + t.Fatal("Unexpected failure, EnvDefaultFunc") + } + if client.UpdateMethod != "BLAH" { + t.Fatalf("Expected update_method \"%s\", got \"%s\"", "BLAH", client.UpdateMethod) + } + if client.LockURL.String() != conf["lock_address"] || client.LockMethod != "BLIP" { + t.Fatalf("Unexpected lock_address \"%s\" vs \"%s\" or lock_method \"%s\" vs \"%s\"", client.LockURL.String(), + conf["lock_address"], client.LockMethod, conf["lock_method"]) + } + if client.UnlockURL.String() != conf["unlock_address"] || client.UnlockMethod != "BLOOP" { + t.Fatalf("Unexpected unlock_address \"%s\" vs \"%s\" or unlock_method \"%s\" vs \"%s\"", client.UnlockURL.String(), + conf["unlock_address"], client.UnlockMethod, conf["unlock_method"]) + } + if client.Username != "user" || client.Password != "pass" { + t.Fatalf("Unexpected username \"%s\" vs \"%s\" or password \"%s\" vs \"%s\"", client.Username, conf["username"], + client.Password, conf["password"]) + } + if client.Client.RetryMax != 999 { + t.Fatalf("Expected retry_max \"%d\", got \"%d\"", 999, client.Client.RetryMax) + } + if client.Client.RetryWaitMin != 15*time.Second { + t.Fatalf("Expected retry_wait_min \"%s\", got \"%s\"", 15*time.Second, client.Client.RetryWaitMin) + } + if client.Client.RetryWaitMax != 150*time.Second { + t.Fatalf("Expected retry_wait_max \"%s\", got \"%s\"", 150*time.Second, client.Client.RetryWaitMax) + } +} + +// testWithEnv sets an environment variable and returns a deferable func to clean up +func testWithEnv(t *testing.T, key string, value string) func() { + if err := os.Setenv(key, value); err != nil { + t.Fatalf("err: %v", err) + } + + return func() { + if err := os.Unsetenv(key); err != nil { + t.Fatalf("err: %v", err) + } + } +} diff --git a/backend/remote-state/http/client.go b/backend/remote-state/http/client.go new file mode 100644 index 000000000000..3fe75de13684 --- /dev/null +++ b/backend/remote-state/http/client.go @@ -0,0 +1,256 @@ +package http + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +// httpClient is a remote client that stores data in Consul or HTTP REST. +type httpClient struct { + // Update & Retrieve + URL *url.URL + UpdateMethod string + + // Locking + LockURL *url.URL + LockMethod string + UnlockURL *url.URL + UnlockMethod string + + // HTTP + Client *retryablehttp.Client + Username string + Password string + + lockID string + jsonLockInfo []byte +} + +func (c *httpClient) httpRequest(method string, url *url.URL, data *[]byte, what string) (*http.Response, error) { + // If we have data we need a reader + var reader io.Reader = nil + if data != nil { + reader = bytes.NewReader(*data) + } + + // Create the request + req, err := retryablehttp.NewRequest(method, url.String(), reader) + if err != nil { + return nil, fmt.Errorf("Failed to make %s HTTP request: %s", what, err) + } + // Set up basic auth + if c.Username != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + // Work with data/body + if data != nil { + req.Header.Set("Content-Type", "application/json") + req.ContentLength = int64(len(*data)) + + // Generate the MD5 + hash := md5.Sum(*data) + b64 := base64.StdEncoding.EncodeToString(hash[:]) + req.Header.Set("Content-MD5", b64) + } + + // Make the request + resp, err := c.Client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to %s: %v", what, err) + } + + return resp, nil +} + +func (c *httpClient) Lock(info *statemgr.LockInfo) (string, error) { + if c.LockURL == nil { + return "", nil + } + c.lockID = "" + + jsonLockInfo := info.Marshal() + resp, err := c.httpRequest(c.LockMethod, c.LockURL, &jsonLockInfo, "lock") + if err != nil { + return "", err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + c.lockID = info.ID + c.jsonLockInfo = jsonLockInfo + return info.ID, nil + case http.StatusUnauthorized: + return "", fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return "", fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusConflict, http.StatusLocked: + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", &statemgr.LockError{ + Info: info, + Err: fmt.Errorf("HTTP remote state already locked, failed to read body"), + } + } + existing := statemgr.LockInfo{} + err = json.Unmarshal(body, &existing) + if err != nil { + return "", &statemgr.LockError{ + Info: info, + Err: fmt.Errorf("HTTP remote state already locked, failed to unmarshal body"), + } + } + return "", &statemgr.LockError{ + Info: info, + Err: fmt.Errorf("HTTP remote state already locked: ID=%s", existing.ID), + } + default: + return "", fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } +} + +func (c *httpClient) Unlock(id string) error { + if c.UnlockURL == nil { + return nil + } + + resp, err := c.httpRequest(c.UnlockMethod, c.UnlockURL, &c.jsonLockInfo, "unlock") + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } +} + +func (c *httpClient) Get() (*remote.Payload, error) { + resp, err := c.httpRequest("GET", c.URL, nil, "get state") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Handle the common status codes + switch resp.StatusCode { + case http.StatusOK: + // Handled after + case http.StatusNoContent: + return nil, nil + case http.StatusNotFound: + return nil, nil + case http.StatusUnauthorized: + return nil, fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusInternalServerError: + return nil, fmt.Errorf("HTTP remote state internal server error") + default: + return nil, fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } + + // Read in the body + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, resp.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + // Create the payload + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + // Check for the MD5 + if raw := resp.Header.Get("Content-MD5"); raw != "" { + md5, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, fmt.Errorf( + "Failed to decode Content-MD5 '%s': %s", raw, err) + } + + payload.MD5 = md5 + } else { + // Generate the MD5 + hash := md5.Sum(payload.Data) + payload.MD5 = hash[:] + } + + return payload, nil +} + +func (c *httpClient) Put(data []byte) error { + // Copy the target URL + base := *c.URL + + if c.lockID != "" { + query := base.Query() + query.Set("ID", c.lockID) + base.RawQuery = query.Encode() + } + + /* + // Set the force query parameter if needed + if force { + values := base.Query() + values.Set("force", "true") + base.RawQuery = values.Encode() + } + */ + + var method string = "POST" + if c.UpdateMethod != "" { + method = c.UpdateMethod + } + resp, err := c.httpRequest(method, &base, &data, "upload state") + if err != nil { + return err + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent: + return nil + default: + return fmt.Errorf("HTTP error: %d", resp.StatusCode) + } +} + +func (c *httpClient) Delete() error { + // Make the request + resp, err := c.httpRequest("DELETE", c.URL, nil, "delete state") + if err != nil { + return err + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("HTTP error: %d", resp.StatusCode) + } +} diff --git a/backend/remote-state/http/client_test.go b/backend/remote-state/http/client_test.go new file mode 100644 index 000000000000..c9bce3b78c2c --- /dev/null +++ b/backend/remote-state/http/client_test.go @@ -0,0 +1,175 @@ +package http + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/terraform/states/remote" +) + +func TestHTTPClient_impl(t *testing.T) { + var _ remote.Client = new(httpClient) + var _ remote.ClientLocker = new(httpClient) +} + +func TestHTTPClient(t *testing.T) { + handler := new(testHTTPHandler) + ts := httptest.NewServer(http.HandlerFunc(handler.Handle)) + defer ts.Close() + + url, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Parse: %s", err) + } + + // Test basic get/update + client := &httpClient{URL: url, Client: retryablehttp.NewClient()} + remote.TestClient(t, client) + + // test just a single PUT + p := &httpClient{ + URL: url, + UpdateMethod: "PUT", + Client: retryablehttp.NewClient(), + } + remote.TestClient(t, p) + + // Test locking and alternative UpdateMethod + a := &httpClient{ + URL: url, + UpdateMethod: "PUT", + LockURL: url, + LockMethod: "LOCK", + UnlockURL: url, + UnlockMethod: "UNLOCK", + Client: retryablehttp.NewClient(), + } + b := &httpClient{ + URL: url, + UpdateMethod: "PUT", + LockURL: url, + LockMethod: "LOCK", + UnlockURL: url, + UnlockMethod: "UNLOCK", + Client: retryablehttp.NewClient(), + } + remote.TestRemoteLocks(t, a, b) + + // test a WebDAV-ish backend + davhandler := new(testHTTPHandler) + ts = httptest.NewServer(http.HandlerFunc(davhandler.HandleWebDAV)) + defer ts.Close() + + url, err = url.Parse(ts.URL) + client = &httpClient{ + URL: url, + UpdateMethod: "PUT", + Client: retryablehttp.NewClient(), + } + if err != nil { + t.Fatalf("Parse: %s", err) + } + + remote.TestClient(t, client) // first time through: 201 + remote.TestClient(t, client) // second time, with identical data: 204 + + // test a broken backend + brokenHandler := new(testBrokenHTTPHandler) + brokenHandler.handler = new(testHTTPHandler) + ts = httptest.NewServer(http.HandlerFunc(brokenHandler.Handle)) + defer ts.Close() + + url, err = url.Parse(ts.URL) + if err != nil { + t.Fatalf("Parse: %s", err) + } + client = &httpClient{URL: url, Client: retryablehttp.NewClient()} + remote.TestClient(t, client) +} + +type testHTTPHandler struct { + Data []byte + Locked bool +} + +func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + w.Write(h.Data) + case "PUT": + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, r.Body); err != nil { + w.WriteHeader(500) + } + w.WriteHeader(201) + h.Data = buf.Bytes() + case "POST": + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, r.Body); err != nil { + w.WriteHeader(500) + } + h.Data = buf.Bytes() + case "LOCK": + if h.Locked { + w.WriteHeader(423) + } else { + h.Locked = true + } + case "UNLOCK": + h.Locked = false + case "DELETE": + h.Data = nil + w.WriteHeader(200) + default: + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method))) + } +} + +// mod_dav-ish behavior +func (h *testHTTPHandler) HandleWebDAV(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + w.Write(h.Data) + case "PUT": + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, r.Body); err != nil { + w.WriteHeader(500) + } + if reflect.DeepEqual(h.Data, buf.Bytes()) { + h.Data = buf.Bytes() + w.WriteHeader(204) + } else { + h.Data = buf.Bytes() + w.WriteHeader(201) + } + case "DELETE": + h.Data = nil + w.WriteHeader(200) + default: + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method))) + } +} + +type testBrokenHTTPHandler struct { + lastRequestWasBroken bool + handler *testHTTPHandler +} + +func (h *testBrokenHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) { + if h.lastRequestWasBroken { + h.lastRequestWasBroken = false + h.handler.Handle(w, r) + } else { + h.lastRequestWasBroken = true + w.WriteHeader(500) + } +} diff --git a/internal/backend/remote-state/http/mock_server_test.go b/backend/remote-state/http/mock_server_test.go similarity index 100% rename from internal/backend/remote-state/http/mock_server_test.go rename to backend/remote-state/http/mock_server_test.go diff --git a/internal/backend/remote-state/http/server_test.go b/backend/remote-state/http/server_test.go similarity index 98% rename from internal/backend/remote-state/http/server_test.go rename to backend/remote-state/http/server_test.go index ff793708aa08..151234ba040a 100644 --- a/internal/backend/remote-state/http/server_test.go +++ b/backend/remote-state/http/server_test.go @@ -20,10 +20,10 @@ import ( "testing" "github.com/golang/mock/gomock" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/backend/remote-state/http/testdata/certs/ca.cert.pem b/backend/remote-state/http/testdata/certs/ca.cert.pem similarity index 100% rename from internal/backend/remote-state/http/testdata/certs/ca.cert.pem rename to backend/remote-state/http/testdata/certs/ca.cert.pem diff --git a/internal/backend/remote-state/http/testdata/certs/ca.key b/backend/remote-state/http/testdata/certs/ca.key similarity index 100% rename from internal/backend/remote-state/http/testdata/certs/ca.key rename to backend/remote-state/http/testdata/certs/ca.key diff --git a/internal/backend/remote-state/http/testdata/certs/client.crt b/backend/remote-state/http/testdata/certs/client.crt similarity index 100% rename from internal/backend/remote-state/http/testdata/certs/client.crt rename to backend/remote-state/http/testdata/certs/client.crt diff --git a/internal/backend/remote-state/http/testdata/certs/client.csr b/backend/remote-state/http/testdata/certs/client.csr similarity index 100% rename from internal/backend/remote-state/http/testdata/certs/client.csr rename to backend/remote-state/http/testdata/certs/client.csr diff --git a/internal/backend/remote-state/http/testdata/certs/client.key b/backend/remote-state/http/testdata/certs/client.key similarity index 100% rename from internal/backend/remote-state/http/testdata/certs/client.key rename to backend/remote-state/http/testdata/certs/client.key diff --git a/internal/backend/remote-state/http/testdata/certs/server.crt b/backend/remote-state/http/testdata/certs/server.crt similarity index 100% rename from internal/backend/remote-state/http/testdata/certs/server.crt rename to backend/remote-state/http/testdata/certs/server.crt diff --git a/internal/backend/remote-state/http/testdata/certs/server.csr b/backend/remote-state/http/testdata/certs/server.csr similarity index 100% rename from internal/backend/remote-state/http/testdata/certs/server.csr rename to backend/remote-state/http/testdata/certs/server.csr diff --git a/internal/backend/remote-state/http/testdata/certs/server.key b/backend/remote-state/http/testdata/certs/server.key similarity index 100% rename from internal/backend/remote-state/http/testdata/certs/server.key rename to backend/remote-state/http/testdata/certs/server.key diff --git a/internal/backend/remote-state/http/testdata/gencerts.sh b/backend/remote-state/http/testdata/gencerts.sh similarity index 100% rename from internal/backend/remote-state/http/testdata/gencerts.sh rename to backend/remote-state/http/testdata/gencerts.sh diff --git a/backend/remote-state/inmem/backend.go b/backend/remote-state/inmem/backend.go new file mode 100644 index 000000000000..4689b6971c70 --- /dev/null +++ b/backend/remote-state/inmem/backend.go @@ -0,0 +1,208 @@ +package inmem + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" + statespkg "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +// we keep the states and locks in package-level variables, so that they can be +// accessed from multiple instances of the backend. This better emulates +// backend instances accessing a single remote data store. +var ( + states stateMap + locks lockMap +) + +func init() { + Reset() +} + +// Reset clears out all existing state and lock data. +// This is used to initialize the package during init, as well as between +// tests. +func Reset() { + states = stateMap{ + m: map[string]*remote.State{}, + } + + locks = lockMap{ + m: map[string]*statemgr.LockInfo{}, + } +} + +// New creates a new backend for Inmem remote state. +func New() backend.Backend { + // Set the schema + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "lock_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "initializes the state in a locked configuration", + }, + }, + } + backend := &Backend{Backend: s} + backend.Backend.ConfigureFunc = backend.configure + return backend +} + +type Backend struct { + *schema.Backend +} + +func (b *Backend) configure(ctx context.Context) error { + states.Lock() + defer states.Unlock() + + defaultClient := &RemoteClient{ + Name: backend.DefaultStateName, + } + + states.m[backend.DefaultStateName] = &remote.State{ + Client: defaultClient, + } + + // set the default client lock info per the test config + data := schema.FromContextBackendConfig(ctx) + if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" { + info := statemgr.NewLockInfo() + info.ID = v.(string) + info.Operation = "test" + info.Info = "test config" + + locks.lock(backend.DefaultStateName, info) + } + + return nil +} + +func (b *Backend) Workspaces() ([]string, error) { + states.Lock() + defer states.Unlock() + + var workspaces []string + + for s := range states.m { + workspaces = append(workspaces, s) + } + + sort.Strings(workspaces) + return workspaces, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + states.Lock() + defer states.Unlock() + + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + delete(states.m, name) + return nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + states.Lock() + defer states.Unlock() + + s := states.m[name] + if s == nil { + s = &remote.State{ + Client: &RemoteClient{ + Name: name, + }, + } + states.m[name] = s + + // to most closely replicate other implementations, we are going to + // take a lock and create a new state if it doesn't exist. + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := s.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock inmem state: %s", err) + } + defer s.Unlock(lockID) + + // If we have no state, we have to create an empty state + if v := s.State(); v == nil { + if err := s.WriteState(statespkg.NewState()); err != nil { + return nil, err + } + if err := s.PersistState(nil); err != nil { + return nil, err + } + } + } + + return s, nil +} + +type stateMap struct { + sync.Mutex + m map[string]*remote.State +} + +// Global level locks for inmem backends. +type lockMap struct { + sync.Mutex + m map[string]*statemgr.LockInfo +} + +func (l *lockMap) lock(name string, info *statemgr.LockInfo) (string, error) { + l.Lock() + defer l.Unlock() + + lockInfo := l.m[name] + if lockInfo != nil { + lockErr := &statemgr.LockError{ + Info: lockInfo, + } + + lockErr.Err = errors.New("state locked") + // make a copy of the lock info to avoid any testing shenanigans + *lockErr.Info = *lockInfo + return "", lockErr + } + + info.Created = time.Now().UTC() + l.m[name] = info + + return info.ID, nil +} + +func (l *lockMap) unlock(name, id string) error { + l.Lock() + defer l.Unlock() + + lockInfo := l.m[name] + + if lockInfo == nil { + return errors.New("state not locked") + } + + lockErr := &statemgr.LockError{ + Info: &statemgr.LockInfo{}, + } + + if id != lockInfo.ID { + lockErr.Err = errors.New("invalid lock id") + *lockErr.Info = *lockInfo + return lockErr + } + + delete(l.m, name) + return nil +} diff --git a/backend/remote-state/inmem/backend_test.go b/backend/remote-state/inmem/backend_test.go new file mode 100644 index 000000000000..f401b793d61f --- /dev/null +++ b/backend/remote-state/inmem/backend_test.go @@ -0,0 +1,92 @@ +package inmem + +import ( + "flag" + "os" + "testing" + + "github.com/hashicorp/hcl/v2" + + "github.com/hashicorp/terraform/backend" + statespkg "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + + _ "github.com/hashicorp/terraform/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + defer Reset() + testID := "test_lock_id" + + config := map[string]interface{}{ + "lock_id": testID, + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + c := s.(*remote.State).Client.(*RemoteClient) + if c.Name != backend.DefaultStateName { + t.Fatal("client name is not configured") + } + + if err := locks.unlock(backend.DefaultStateName, testID); err != nil { + t.Fatalf("default state should have been locked: %s", err) + } +} + +func TestBackend(t *testing.T) { + defer Reset() + b := backend.TestBackendConfig(t, New(), hcl.EmptyBody()).(*Backend) + backend.TestBackendStates(t, b) +} + +func TestBackendLocked(t *testing.T) { + defer Reset() + b1 := backend.TestBackendConfig(t, New(), hcl.EmptyBody()).(*Backend) + b2 := backend.TestBackendConfig(t, New(), hcl.EmptyBody()).(*Backend) + + backend.TestBackendStateLocks(t, b1, b2) +} + +// use the this backen to test the remote.State implementation +func TestRemoteState(t *testing.T) { + defer Reset() + b := backend.TestBackendConfig(t, New(), hcl.EmptyBody()) + + workspace := "workspace" + + // create a new workspace in this backend + s, err := b.StateMgr(workspace) + if err != nil { + t.Fatal(err) + } + + // force overwriting the remote state + newState := statespkg.NewState() + + if err := s.WriteState(newState); err != nil { + t.Fatal(err) + } + + if err := s.PersistState(nil); err != nil { + t.Fatal(err) + } + + if err := s.RefreshState(); err != nil { + t.Fatal(err) + } +} diff --git a/backend/remote-state/inmem/client.go b/backend/remote-state/inmem/client.go new file mode 100644 index 000000000000..21f229bdacf2 --- /dev/null +++ b/backend/remote-state/inmem/client.go @@ -0,0 +1,47 @@ +package inmem + +import ( + "crypto/md5" + + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +// RemoteClient is a remote client that stores data in memory for testing. +type RemoteClient struct { + Data []byte + MD5 []byte + Name string +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + if c.Data == nil { + return nil, nil + } + + return &remote.Payload{ + Data: c.Data, + MD5: c.MD5, + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + md5 := md5.Sum(data) + + c.Data = data + c.MD5 = md5[:] + return nil +} + +func (c *RemoteClient) Delete() error { + c.Data = nil + c.MD5 = nil + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + return locks.lock(c.Name, info) +} +func (c *RemoteClient) Unlock(id string) error { + return locks.unlock(c.Name, id) +} diff --git a/backend/remote-state/inmem/client_test.go b/backend/remote-state/inmem/client_test.go new file mode 100644 index 000000000000..765eac9ee1d2 --- /dev/null +++ b/backend/remote-state/inmem/client_test.go @@ -0,0 +1,36 @@ +package inmem + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/remote" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + defer Reset() + b := backend.TestBackendConfig(t, New(), hcl.EmptyBody()) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, s.(*remote.State).Client) +} + +func TestInmemLocks(t *testing.T) { + defer Reset() + s, err := backend.TestBackendConfig(t, New(), hcl.EmptyBody()).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s.(*remote.State).Client, s.(*remote.State).Client) +} diff --git a/backend/remote-state/kubernetes/backend.go b/backend/remote-state/kubernetes/backend.go new file mode 100644 index 000000000000..3a08d299cab2 --- /dev/null +++ b/backend/remote-state/kubernetes/backend.go @@ -0,0 +1,405 @@ +package kubernetes + +import ( + "bytes" + "context" + "fmt" + "log" + "os" + "path/filepath" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" + "github.com/hashicorp/terraform/version" + "github.com/mitchellh/go-homedir" + k8sSchema "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// Modified from github.com/terraform-providers/terraform-provider-kubernetes + +const ( + noConfigError = ` + +[Kubernetes backend] Neither service_account nor load_config_file were set to true, +this could cause issues connecting to your Kubernetes cluster. +` +) + +var ( + secretResource = k8sSchema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "secrets", + } +) + +// New creates a new backend for kubernetes remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "secret_suffix": { + Type: schema.TypeString, + Required: true, + Description: "Suffix used when creating the secret. The secret will be named in the format: `tfstate-{workspace}-{secret_suffix}`.", + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Map of additional labels to be applied to the secret.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_NAMESPACE", "default"), + Description: "Namespace to store the secret in.", + }, + "in_cluster_config": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_IN_CLUSTER_CONFIG", false), + Description: "Used to authenticate to the cluster from inside a pod.", + }, + "load_config_file": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_LOAD_CONFIG_FILE", true), + Description: "Load local kubeconfig.", + }, + "host": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_HOST", ""), + Description: "The hostname (in form of URI) of Kubernetes master.", + }, + "username": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_USER", ""), + Description: "The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", + }, + "password": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_PASSWORD", ""), + Description: "The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", + }, + "insecure": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_INSECURE", false), + Description: "Whether server should be accessed without verifying the TLS certificate.", + }, + "client_certificate": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CLIENT_CERT_DATA", ""), + Description: "PEM-encoded client certificate for TLS authentication.", + }, + "client_key": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CLIENT_KEY_DATA", ""), + Description: "PEM-encoded client certificate key for TLS authentication.", + }, + "cluster_ca_certificate": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CLUSTER_CA_CERT_DATA", ""), + Description: "PEM-encoded root certificates bundle for TLS authentication.", + }, + "config_paths": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Description: "A list of paths to kube config files. Can be set with KUBE_CONFIG_PATHS environment variable.", + }, + "config_path": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CONFIG_PATH", ""), + Description: "Path to the kube config file. Can be set with KUBE_CONFIG_PATH environment variable.", + }, + "config_context": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX", ""), + }, + "config_context_auth_info": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX_AUTH_INFO", ""), + Description: "", + }, + "config_context_cluster": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX_CLUSTER", ""), + Description: "", + }, + "token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_TOKEN", ""), + Description: "Token to authentifcate a service account.", + }, + "exec": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "api_version": { + Type: schema.TypeString, + Required: true, + }, + "command": { + Type: schema.TypeString, + Required: true, + }, + "env": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + Description: "Use a credential plugin to authenticate.", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + kubernetesSecretClient dynamic.ResourceInterface + kubernetesLeaseClient coordinationv1.LeaseInterface + config *restclient.Config + namespace string + labels map[string]string + nameSuffix string +} + +func (b Backend) KubernetesSecretClient() (dynamic.ResourceInterface, error) { + if b.kubernetesSecretClient != nil { + return b.kubernetesSecretClient, nil + } + + client, err := dynamic.NewForConfig(b.config) + if err != nil { + return nil, fmt.Errorf("Failed to configure: %s", err) + } + + b.kubernetesSecretClient = client.Resource(secretResource).Namespace(b.namespace) + return b.kubernetesSecretClient, nil +} + +func (b Backend) KubernetesLeaseClient() (coordinationv1.LeaseInterface, error) { + if b.kubernetesLeaseClient != nil { + return b.kubernetesLeaseClient, nil + } + + client, err := kubernetes.NewForConfig(b.config) + if err != nil { + return nil, err + } + + b.kubernetesLeaseClient = client.CoordinationV1().Leases(b.namespace) + return b.kubernetesLeaseClient, nil +} + +func (b *Backend) configure(ctx context.Context) error { + if b.config != nil { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + + cfg, err := getInitialConfig(data) + if err != nil { + return err + } + + // Overriding with static configuration + cfg.UserAgent = fmt.Sprintf("HashiCorp/1.0 Terraform/%s", version.String()) + + if v, ok := data.GetOk("host"); ok { + cfg.Host = v.(string) + } + if v, ok := data.GetOk("username"); ok { + cfg.Username = v.(string) + } + if v, ok := data.GetOk("password"); ok { + cfg.Password = v.(string) + } + if v, ok := data.GetOk("insecure"); ok { + cfg.Insecure = v.(bool) + } + if v, ok := data.GetOk("cluster_ca_certificate"); ok { + cfg.CAData = bytes.NewBufferString(v.(string)).Bytes() + } + if v, ok := data.GetOk("client_certificate"); ok { + cfg.CertData = bytes.NewBufferString(v.(string)).Bytes() + } + if v, ok := data.GetOk("client_key"); ok { + cfg.KeyData = bytes.NewBufferString(v.(string)).Bytes() + } + if v, ok := data.GetOk("token"); ok { + cfg.BearerToken = v.(string) + } + + if v, ok := data.GetOk("labels"); ok { + labels := map[string]string{} + for k, vv := range v.(map[string]interface{}) { + labels[k] = vv.(string) + } + b.labels = labels + } + + ns := data.Get("namespace").(string) + b.namespace = ns + b.nameSuffix = data.Get("secret_suffix").(string) + b.config = cfg + + return nil +} + +func getInitialConfig(data *schema.ResourceData) (*restclient.Config, error) { + var cfg *restclient.Config + var err error + + inCluster := data.Get("in_cluster_config").(bool) + if inCluster { + cfg, err = restclient.InClusterConfig() + if err != nil { + return nil, err + } + } else { + cfg, err = tryLoadingConfigFile(data) + if err != nil { + return nil, err + } + } + + if cfg == nil { + cfg = &restclient.Config{} + } + return cfg, err +} + +func tryLoadingConfigFile(d *schema.ResourceData) (*restclient.Config, error) { + loader := &clientcmd.ClientConfigLoadingRules{} + + configPaths := []string{} + if v, ok := d.Get("config_path").(string); ok && v != "" { + configPaths = []string{v} + } else if v, ok := d.Get("config_paths").([]interface{}); ok && len(v) > 0 { + for _, p := range v { + configPaths = append(configPaths, p.(string)) + } + } else if v := os.Getenv("KUBE_CONFIG_PATHS"); v != "" { + configPaths = filepath.SplitList(v) + } + + expandedPaths := []string{} + for _, p := range configPaths { + path, err := homedir.Expand(p) + if err != nil { + log.Printf("[DEBUG] Could not expand path: %s", err) + return nil, err + } + log.Printf("[DEBUG] Using kubeconfig: %s", path) + expandedPaths = append(expandedPaths, path) + } + + if len(expandedPaths) == 1 { + loader.ExplicitPath = expandedPaths[0] + } else { + loader.Precedence = expandedPaths + } + + overrides := &clientcmd.ConfigOverrides{} + ctxSuffix := "; default context" + + ctx, ctxOk := d.GetOk("config_context") + authInfo, authInfoOk := d.GetOk("config_context_auth_info") + cluster, clusterOk := d.GetOk("config_context_cluster") + if ctxOk || authInfoOk || clusterOk { + ctxSuffix = "; overriden context" + if ctxOk { + overrides.CurrentContext = ctx.(string) + ctxSuffix += fmt.Sprintf("; config ctx: %s", overrides.CurrentContext) + log.Printf("[DEBUG] Using custom current context: %q", overrides.CurrentContext) + } + + overrides.Context = clientcmdapi.Context{} + if authInfoOk { + overrides.Context.AuthInfo = authInfo.(string) + ctxSuffix += fmt.Sprintf("; auth_info: %s", overrides.Context.AuthInfo) + } + if clusterOk { + overrides.Context.Cluster = cluster.(string) + ctxSuffix += fmt.Sprintf("; cluster: %s", overrides.Context.Cluster) + } + log.Printf("[DEBUG] Using overidden context: %#v", overrides.Context) + } + + if v, ok := d.GetOk("exec"); ok { + exec := &clientcmdapi.ExecConfig{} + if spec, ok := v.([]interface{})[0].(map[string]interface{}); ok { + exec.APIVersion = spec["api_version"].(string) + exec.Command = spec["command"].(string) + exec.Args = expandStringSlice(spec["args"].([]interface{})) + for kk, vv := range spec["env"].(map[string]interface{}) { + exec.Env = append(exec.Env, clientcmdapi.ExecEnvVar{Name: kk, Value: vv.(string)}) + } + } else { + return nil, fmt.Errorf("Failed to parse exec") + } + overrides.AuthInfo.Exec = exec + } + + cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, overrides) + cfg, err := cc.ClientConfig() + if err != nil { + if pathErr, ok := err.(*os.PathError); ok && os.IsNotExist(pathErr.Err) { + log.Printf("[INFO] Unable to load config file as it doesn't exist at %q", pathErr.Path) + return nil, nil + } + return nil, fmt.Errorf("Failed to initialize kubernetes configuration: %s", err) + } + + log.Printf("[INFO] Successfully initialized config") + return cfg, nil +} + +func expandStringSlice(s []interface{}) []string { + result := make([]string, len(s), len(s)) + for k, v := range s { + // Handle the Terraform parser bug which turns empty strings in lists to nil. + if v == nil { + result[k] = "" + } else { + result[k] = v.(string) + } + } + return result +} diff --git a/backend/remote-state/kubernetes/backend_state.go b/backend/remote-state/kubernetes/backend_state.go new file mode 100644 index 000000000000..85a9e23d9547 --- /dev/null +++ b/backend/remote-state/kubernetes/backend_state.go @@ -0,0 +1,170 @@ +package kubernetes + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Workspaces returns a list of names for the workspaces found in k8s. The default +// workspace is always returned as the first element in the slice. +func (b *Backend) Workspaces() ([]string, error) { + secretClient, err := b.KubernetesSecretClient() + if err != nil { + return nil, err + } + + secrets, err := secretClient.List( + context.Background(), + metav1.ListOptions{ + LabelSelector: tfstateKey + "=true", + }, + ) + if err != nil { + return nil, err + } + + // Use a map so there aren't duplicate workspaces + m := make(map[string]struct{}) + for _, secret := range secrets.Items { + sl := secret.GetLabels() + ws, ok := sl[tfstateWorkspaceKey] + if !ok { + continue + } + + key, ok := sl[tfstateSecretSuffixKey] + if !ok { + continue + } + + // Make sure it isn't default and the key matches + if ws != backend.DefaultStateName && key == b.nameSuffix { + m[ws] = struct{}{} + } + } + + states := []string{backend.DefaultStateName} + for k := range m { + states = append(states, k) + } + + sort.Strings(states[1:]) + return states, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + + return client.Delete() +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + c, err := b.remoteClient(name) + if err != nil { + return nil, err + } + + stateMgr := &remote.State{Client: c} + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, err + } + + secretName, err := c.createSecretName() + if err != nil { + return nil, err + } + + // Local helper function so we can call it multiple places + unlock := func(baseErr error) error { + if err := stateMgr.Unlock(lockID); err != nil { + const unlockErrMsg = `%v + Additionally, unlocking the state in Kubernetes failed: + + Error message: %q + Lock ID (gen): %v + Secret Name: %v + + You may have to force-unlock this state in order to use it again. + The Kubernetes backend acquires a lock during initialization to ensure + the initial state file is created.` + return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, secretName) + } + + return baseErr + } + + if err := stateMgr.WriteState(states.NewState()); err != nil { + return nil, unlock(err) + } + if err := stateMgr.PersistState(nil); err != nil { + return nil, unlock(err) + } + + // Unlock, the state should now be initialized + if err := unlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + secretClient, err := b.KubernetesSecretClient() + if err != nil { + return nil, err + } + + leaseClient, err := b.KubernetesLeaseClient() + if err != nil { + return nil, err + } + + client := &RemoteClient{ + kubernetesSecretClient: secretClient, + kubernetesLeaseClient: leaseClient, + namespace: b.namespace, + labels: b.labels, + nameSuffix: b.nameSuffix, + workspace: name, + } + + return client, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} diff --git a/backend/remote-state/kubernetes/backend_test.go b/backend/remote-state/kubernetes/backend_test.go new file mode 100644 index 000000000000..9aa88ecaf321 --- /dev/null +++ b/backend/remote-state/kubernetes/backend_test.go @@ -0,0 +1,196 @@ +package kubernetes + +import ( + "context" + "fmt" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/statemgr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + secretSuffix = "test-state" +) + +var namespace string + +// verify that we are doing ACC tests or the k8s tests specifically +func testACC(t *testing.T) { + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_K8S_TEST") == "" + if skip { + t.Log("k8s backend tests require setting TF_ACC or TF_K8S_TEST") + t.Skip() + } + + ns := os.Getenv("KUBE_NAMESPACE") + + if ns != "" { + namespace = ns + } else { + namespace = "default" + } + + cleanupK8sResources(t) +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackend(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + // Test + backend.TestBackendStates(t, b1) +} + +func TestBackendLocks(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + // Get the backend. We need two to test locking. + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + // Test + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) +} + +func TestBackendLocksSoak(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + clientCount := 100 + lockAttempts := 100 + + lockers := []statemgr.Locker{} + for i := 0; i < clientCount; i++ { + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("Error creating state manager: %v", err) + } + + lockers = append(lockers, s.(statemgr.Locker)) + } + + wg := sync.WaitGroup{} + for i, l := range lockers { + wg.Add(1) + go func(locker statemgr.Locker, n int) { + defer wg.Done() + + li := statemgr.NewLockInfo() + li.Operation = "test" + li.Who = fmt.Sprintf("client-%v", n) + + for i := 0; i < lockAttempts; i++ { + id, err := locker.Lock(li) + if err != nil { + continue + } + + // hold onto the lock for a little bit + time.Sleep(time.Duration(rand.Intn(10)) * time.Microsecond) + + err = locker.Unlock(id) + if err != nil { + t.Errorf("failed to unlock: %v", err) + } + } + }(l, i) + } + + wg.Wait() +} + +func cleanupK8sResources(t *testing.T) { + ctx := context.Background() + // Get a backend to use the k8s client + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + b := b1.(*Backend) + + sClient, err := b.KubernetesSecretClient() + if err != nil { + t.Fatal(err) + } + + // Delete secrets + opts := metav1.ListOptions{LabelSelector: tfstateKey + "=true"} + secrets, err := sClient.List(ctx, opts) + if err != nil { + t.Fatal(err) + } + + delProp := metav1.DeletePropagationBackground + delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} + var errs []error + + for _, secret := range secrets.Items { + labels := secret.GetLabels() + key, ok := labels[tfstateSecretSuffixKey] + if !ok { + continue + } + + if key == secretSuffix { + err = sClient.Delete(ctx, secret.GetName(), delOps) + if err != nil { + errs = append(errs, err) + } + } + } + + leaseClient, err := b.KubernetesLeaseClient() + if err != nil { + t.Fatal(err) + } + + // Delete leases + leases, err := leaseClient.List(ctx, opts) + if err != nil { + t.Fatal(err) + } + + for _, lease := range leases.Items { + labels := lease.GetLabels() + key, ok := labels[tfstateSecretSuffixKey] + if !ok { + continue + } + + if key == secretSuffix { + err = leaseClient.Delete(ctx, lease.GetName(), delOps) + if err != nil { + errs = append(errs, err) + } + } + } + + if len(errs) > 0 { + t.Fatal(errs) + } +} diff --git a/backend/remote-state/kubernetes/client.go b/backend/remote-state/kubernetes/client.go new file mode 100644 index 000000000000..143fe61c6b34 --- /dev/null +++ b/backend/remote-state/kubernetes/client.go @@ -0,0 +1,413 @@ +package kubernetes + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/client-go/dynamic" + _ "k8s.io/client-go/plugin/pkg/client/auth" // Import to initialize client auth plugins. + "k8s.io/utils/pointer" + + coordinationv1 "k8s.io/api/coordination/v1" + coordinationclientv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" +) + +const ( + tfstateKey = "tfstate" + tfstateSecretSuffixKey = "tfstateSecretSuffix" + tfstateWorkspaceKey = "tfstateWorkspace" + tfstateLockInfoAnnotation = "app.terraform.io/lock-info" + managedByKey = "app.kubernetes.io/managed-by" +) + +type RemoteClient struct { + kubernetesSecretClient dynamic.ResourceInterface + kubernetesLeaseClient coordinationclientv1.LeaseInterface + namespace string + labels map[string]string + nameSuffix string + workspace string +} + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + secretName, err := c.createSecretName() + if err != nil { + return nil, err + } + secret, err := c.kubernetesSecretClient.Get(context.Background(), secretName, metav1.GetOptions{}) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + secretData := getSecretData(secret) + stateRaw, ok := secretData[tfstateKey] + if !ok { + // The secret exists but there is no state in it + return nil, nil + } + + stateRawString := stateRaw.(string) + + state, err := uncompressState(stateRawString) + if err != nil { + return nil, err + } + + md5 := md5.Sum(state) + + p := &remote.Payload{ + Data: state, + MD5: md5[:], + } + return p, nil +} + +func (c *RemoteClient) Put(data []byte) error { + ctx := context.Background() + secretName, err := c.createSecretName() + if err != nil { + return err + } + + payload, err := compressState(data) + if err != nil { + return err + } + + secret, err := c.getSecret(secretName) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + + secret = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": metav1.ObjectMeta{ + Name: secretName, + Namespace: c.namespace, + Labels: c.getLabels(), + Annotations: map[string]string{"encoding": "gzip"}, + }, + }, + } + + secret, err = c.kubernetesSecretClient.Create(ctx, secret, metav1.CreateOptions{}) + if err != nil { + return err + } + } + + setState(secret, payload) + _, err = c.kubernetesSecretClient.Update(ctx, secret, metav1.UpdateOptions{}) + return err +} + +// Delete the state secret +func (c *RemoteClient) Delete() error { + secretName, err := c.createSecretName() + if err != nil { + return err + } + + err = c.deleteSecret(secretName) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + leaseName, err := c.createLeaseName() + if err != nil { + return err + } + + err = c.deleteLease(leaseName) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + ctx := context.Background() + leaseName, err := c.createLeaseName() + if err != nil { + return "", err + } + + lease, err := c.getLease(leaseName) + if err != nil { + if !k8serrors.IsNotFound(err) { + return "", err + } + + labels := c.getLabels() + lease = &coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: leaseName, + Labels: labels, + Annotations: map[string]string{ + tfstateLockInfoAnnotation: string(info.Marshal()), + }, + }, + Spec: coordinationv1.LeaseSpec{ + HolderIdentity: pointer.StringPtr(info.ID), + }, + } + + _, err = c.kubernetesLeaseClient.Create(ctx, lease, metav1.CreateOptions{}) + if err != nil { + return "", err + } else { + return info.ID, nil + } + } + + if lease.Spec.HolderIdentity != nil { + if *lease.Spec.HolderIdentity == info.ID { + return info.ID, nil + } + + currentLockInfo, err := c.getLockInfo(lease) + if err != nil { + return "", err + } + + lockErr := &statemgr.LockError{ + Info: currentLockInfo, + Err: errors.New("the state is already locked by another terraform client"), + } + return "", lockErr + } + + lease.Spec.HolderIdentity = pointer.StringPtr(info.ID) + setLockInfo(lease, info.Marshal()) + _, err = c.kubernetesLeaseClient.Update(ctx, lease, metav1.UpdateOptions{}) + if err != nil { + return "", err + } + + return info.ID, err +} + +func (c *RemoteClient) Unlock(id string) error { + leaseName, err := c.createLeaseName() + if err != nil { + return err + } + + lease, err := c.getLease(leaseName) + if err != nil { + return err + } + + if lease.Spec.HolderIdentity == nil { + return fmt.Errorf("state is already unlocked") + } + + lockInfo, err := c.getLockInfo(lease) + if err != nil { + return err + } + + lockErr := &statemgr.LockError{Info: lockInfo} + if *lease.Spec.HolderIdentity != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + lease.Spec.HolderIdentity = nil + removeLockInfo(lease) + + _, err = c.kubernetesLeaseClient.Update(context.Background(), lease, metav1.UpdateOptions{}) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} + +func (c *RemoteClient) getLockInfo(lease *coordinationv1.Lease) (*statemgr.LockInfo, error) { + lockData, ok := getLockInfo(lease) + if len(lockData) == 0 || !ok { + return nil, nil + } + + lockInfo := &statemgr.LockInfo{} + err := json.Unmarshal(lockData, lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +func (c *RemoteClient) getLabels() map[string]string { + l := map[string]string{ + tfstateKey: "true", + tfstateSecretSuffixKey: c.nameSuffix, + tfstateWorkspaceKey: c.workspace, + managedByKey: "terraform", + } + + if len(c.labels) != 0 { + for k, v := range c.labels { + l[k] = v + } + } + + return l +} + +func (c *RemoteClient) getSecret(name string) (*unstructured.Unstructured, error) { + return c.kubernetesSecretClient.Get(context.Background(), name, metav1.GetOptions{}) +} + +func (c *RemoteClient) getLease(name string) (*coordinationv1.Lease, error) { + return c.kubernetesLeaseClient.Get(context.Background(), name, metav1.GetOptions{}) +} + +func (c *RemoteClient) deleteSecret(name string) error { + secret, err := c.getSecret(name) + if err != nil { + return err + } + + labels := secret.GetLabels() + v, ok := labels[tfstateKey] + if !ok || v != "true" { + return fmt.Errorf("Secret does does not have %q label", tfstateKey) + } + + delProp := metav1.DeletePropagationBackground + delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} + return c.kubernetesSecretClient.Delete(context.Background(), name, delOps) +} + +func (c *RemoteClient) deleteLease(name string) error { + secret, err := c.getLease(name) + if err != nil { + return err + } + + labels := secret.GetLabels() + v, ok := labels[tfstateKey] + if !ok || v != "true" { + return fmt.Errorf("Lease does does not have %q label", tfstateKey) + } + + delProp := metav1.DeletePropagationBackground + delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} + return c.kubernetesLeaseClient.Delete(context.Background(), name, delOps) +} + +func (c *RemoteClient) createSecretName() (string, error) { + secretName := strings.Join([]string{tfstateKey, c.workspace, c.nameSuffix}, "-") + + errs := validation.IsDNS1123Subdomain(secretName) + if len(errs) > 0 { + k8sInfo := ` +This is a requirement for Kubernetes secret names. +The workspace name and key must adhere to Kubernetes naming conventions.` + msg := fmt.Sprintf("the secret name %v is invalid, ", secretName) + return "", errors.New(msg + strings.Join(errs, ",") + k8sInfo) + } + + return secretName, nil +} + +func (c *RemoteClient) createLeaseName() (string, error) { + n, err := c.createSecretName() + if err != nil { + return "", err + } + return "lock-" + n, nil +} + +func compressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz := gzip.NewWriter(b) + if _, err := gz.Write(data); err != nil { + return nil, err + } + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func uncompressState(data string) ([]byte, error) { + decode, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return nil, err + } + + b := new(bytes.Buffer) + gz, err := gzip.NewReader(bytes.NewReader(decode)) + if err != nil { + return nil, err + } + b.ReadFrom(gz) + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func getSecretData(secret *unstructured.Unstructured) map[string]interface{} { + if m, ok := secret.Object["data"].(map[string]interface{}); ok { + return m + } + return map[string]interface{}{} +} + +func getLockInfo(lease *coordinationv1.Lease) ([]byte, bool) { + info, ok := lease.ObjectMeta.GetAnnotations()[tfstateLockInfoAnnotation] + if !ok { + return nil, false + } + return []byte(info), true +} + +func setLockInfo(lease *coordinationv1.Lease, l []byte) { + annotations := lease.ObjectMeta.GetAnnotations() + if annotations != nil { + annotations[tfstateLockInfoAnnotation] = string(l) + } else { + annotations = map[string]string{ + tfstateLockInfoAnnotation: string(l), + } + } + lease.ObjectMeta.SetAnnotations(annotations) +} + +func removeLockInfo(lease *coordinationv1.Lease) { + annotations := lease.ObjectMeta.GetAnnotations() + delete(annotations, tfstateLockInfoAnnotation) + lease.ObjectMeta.SetAnnotations(annotations) +} + +func setState(secret *unstructured.Unstructured, t []byte) { + secretData := getSecretData(secret) + secretData[tfstateKey] = t + secret.Object["data"] = secretData +} diff --git a/backend/remote-state/kubernetes/client_test.go b/backend/remote-state/kubernetes/client_test.go new file mode 100644 index 000000000000..3026afc2d8ad --- /dev/null +++ b/backend/remote-state/kubernetes/client_test.go @@ -0,0 +1,119 @@ +package kubernetes + +import ( + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientLocks(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +func TestForceUnlock(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + // first test with default + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err := s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal("failed to get default state to force unlock:", err) + } + + if err := s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock default state") + } + + // now try the same thing with a named state + // first test with default + s1, err = b1.StateMgr("test") + if err != nil { + t.Fatal(err) + } + + info = statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err = s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err = b2.StateMgr("test") + if err != nil { + t.Fatal("failed to get named state to force unlock:", err) + } + + if err = s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock named state") + } +} diff --git a/backend/remote-state/oss/backend.go b/backend/remote-state/oss/backend.go new file mode 100644 index 000000000000..013f5485a18b --- /dev/null +++ b/backend/remote-state/oss/backend.go @@ -0,0 +1,706 @@ +package oss + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/services/location" + "github.com/aliyun/alibaba-cloud-sdk-go/services/sts" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/go-cleanhttp" + "github.com/jmespath/go-jmespath" + "github.com/mitchellh/go-homedir" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" + "github.com/hashicorp/terraform/version" +) + +// Deprecated in favor of flattening assume_role_* options +func deprecatedAssumeRoleSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Deprecated: "use assume_role_* options instead", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of a RAM role to assume prior to making API calls.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_ARN", ""), + }, + "session_name": { + Type: schema.TypeString, + Optional: true, + Description: "The session name to use when assuming the role.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_SESSION_NAME", ""), + }, + "policy": { + Type: schema.TypeString, + Optional: true, + Description: "The permissions applied when assuming a role. You cannot use this policy to grant permissions which exceed those of the role that is being assumed.", + }, + "session_expiration": { + Type: schema.TypeInt, + Optional: true, + Description: "The time after which the established session for assuming role expires.", + ValidateFunc: func(v interface{}, k string) ([]string, []error) { + min := 900 + max := 3600 + value, ok := v.(int) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be int", k)} + } + + if value < min || value > max { + return nil, []error{fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)} + } + + return nil, nil + }, + }, + }, + }, + } +} + +// New creates a new backend for OSS remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Access Key ID", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_ID")), + }, + + "secret_key": { + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Access Secret Key", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_SECRET")), + }, + + "security_token": { + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Security Token", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", ""), + }, + + "ecs_role_name": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ECS_ROLE_NAME", os.Getenv("ALICLOUD_ECS_ROLE_NAME")), + Description: "The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section of the Alibaba Cloud console.", + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Description: "The region of the OSS bucket.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_DEFAULT_REGION")), + }, + "sts_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the STS API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_STS_ENDPOINT", ""), + }, + "tablestore_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the TableStore API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_TABLESTORE_ENDPOINT", ""), + }, + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the OSS API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_OSS_ENDPOINT", os.Getenv("OSS_ENDPOINT")), + }, + + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the OSS bucket", + }, + + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The directory where state files will be saved inside the bucket", + Default: "env:", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + prefix := v.(string) + if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") { + return nil, []error{fmt.Errorf("workspace_key_prefix must not start with '/' or './'")} + } + return nil, nil + }, + }, + + "key": { + Type: schema.TypeString, + Optional: true, + Description: "The path of the state file inside the bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") { + return nil, []error{fmt.Errorf("key can not start and end with '/'")} + } + return nil, nil + }, + Default: "terraform.tfstate", + }, + + "tablestore_table": { + Type: schema.TypeString, + Optional: true, + Description: "TableStore table for state locking and consistency", + Default: "", + }, + + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: false, + }, + + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Object ACL to be applied to the state file", + Default: "", + ValidateFunc: func(v interface{}, k string) ([]string, []error) { + if value := v.(string); value != "" { + acls := oss.ACLType(value) + if acls != oss.ACLPrivate && acls != oss.ACLPublicRead && acls != oss.ACLPublicReadWrite { + return nil, []error{fmt.Errorf( + "%q must be a valid ACL value , expected %s, %s or %s, got %q", + k, oss.ACLPrivate, oss.ACLPublicRead, oss.ACLPublicReadWrite, acls)} + } + } + return nil, nil + }, + }, + "shared_credentials_file": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SHARED_CREDENTIALS_FILE", ""), + Description: "This is the path to the shared credentials file. If this is not set and a profile is specified, `~/.aliyun/config.json` will be used.", + }, + "profile": { + Type: schema.TypeString, + Optional: true, + Description: "This is the Alibaba Cloud profile name as set in the shared credentials file. It can also be sourced from the `ALICLOUD_PROFILE` environment variable.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_PROFILE", ""), + }, + "assume_role": deprecatedAssumeRoleSchema(), + "assume_role_role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "The ARN of a RAM role to assume prior to making API calls.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_ARN", ""), + }, + "assume_role_session_name": { + Type: schema.TypeString, + Optional: true, + Description: "The session name to use when assuming the role.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_SESSION_NAME", ""), + }, + "assume_role_policy": { + Type: schema.TypeString, + Optional: true, + Description: "The permissions applied when assuming a role. You cannot use this policy to grant permissions which exceed those of the role that is being assumed.", + }, + "assume_role_session_expiration": { + Type: schema.TypeInt, + Optional: true, + Description: "The time after which the established session for assuming role expires.", + ValidateFunc: func(v interface{}, k string) ([]string, []error) { + min := 900 + max := 3600 + value, ok := v.(int) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be int", k)} + } + + if value < min || value > max { + return nil, []error{fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)} + } + + return nil, nil + }, + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + ossClient *oss.Client + otsClient *tablestore.TableStoreClient + + bucketName string + statePrefix string + stateKey string + serverSideEncryption bool + acl string + otsEndpoint string + otsTable string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.ossClient != nil { + return nil + } + + // Grab the resource data + d := schema.FromContextBackendConfig(ctx) + + b.bucketName = d.Get("bucket").(string) + b.statePrefix = strings.TrimPrefix(strings.Trim(d.Get("prefix").(string), "/"), "./") + b.stateKey = d.Get("key").(string) + b.serverSideEncryption = d.Get("encrypt").(bool) + b.acl = d.Get("acl").(string) + + var getBackendConfig = func(str string, key string) string { + if str == "" { + value, err := getConfigFromProfile(d, key) + if err == nil && value != nil { + str = value.(string) + } + } + return str + } + + accessKey := getBackendConfig(d.Get("access_key").(string), "access_key_id") + secretKey := getBackendConfig(d.Get("secret_key").(string), "access_key_secret") + securityToken := getBackendConfig(d.Get("security_token").(string), "sts_token") + region := getBackendConfig(d.Get("region").(string), "region_id") + + stsEndpoint := d.Get("sts_endpoint").(string) + endpoint := d.Get("endpoint").(string) + schma := "https" + + roleArn := getBackendConfig("", "ram_role_arn") + sessionName := getBackendConfig("", "ram_session_name") + var policy string + var sessionExpiration int + expiredSeconds, err := getConfigFromProfile(d, "expired_seconds") + if err == nil && expiredSeconds != nil { + sessionExpiration = (int)(expiredSeconds.(float64)) + } + + if v, ok := d.GetOk("assume_role_role_arn"); ok && v.(string) != "" { + roleArn = v.(string) + if v, ok := d.GetOk("assume_role_session_name"); ok { + sessionName = v.(string) + } + if v, ok := d.GetOk("assume_role_policy"); ok { + policy = v.(string) + } + if v, ok := d.GetOk("assume_role_session_expiration"); ok { + sessionExpiration = v.(int) + } + } else if v, ok := d.GetOk("assume_role"); ok { + // deprecated assume_role block + for _, v := range v.(*schema.Set).List() { + assumeRole := v.(map[string]interface{}) + if assumeRole["role_arn"].(string) != "" { + roleArn = assumeRole["role_arn"].(string) + } + if assumeRole["session_name"].(string) != "" { + sessionName = assumeRole["session_name"].(string) + } + policy = assumeRole["policy"].(string) + sessionExpiration = assumeRole["session_expiration"].(int) + } + } + + if sessionName == "" { + sessionName = "terraform" + } + if sessionExpiration == 0 { + if v := os.Getenv("ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION"); v != "" { + if expiredSeconds, err := strconv.Atoi(v); err == nil { + sessionExpiration = expiredSeconds + } + } + if sessionExpiration == 0 { + sessionExpiration = 3600 + } + } + + if accessKey == "" { + ecsRoleName := getBackendConfig(d.Get("ecs_role_name").(string), "ram_role_name") + subAccessKeyId, subAccessKeySecret, subSecurityToken, err := getAuthCredentialByEcsRoleName(ecsRoleName) + if err != nil { + return err + } + accessKey, secretKey, securityToken = subAccessKeyId, subAccessKeySecret, subSecurityToken + } + + if roleArn != "" { + subAccessKeyId, subAccessKeySecret, subSecurityToken, err := getAssumeRoleAK(accessKey, secretKey, securityToken, region, roleArn, sessionName, policy, stsEndpoint, sessionExpiration) + if err != nil { + return err + } + accessKey, secretKey, securityToken = subAccessKeyId, subAccessKeySecret, subSecurityToken + } + + if endpoint == "" { + endpointsResponse, err := b.getOSSEndpointByRegion(accessKey, secretKey, securityToken, region) + if err != nil { + log.Printf("[WARN] getting oss endpoint failed and using oss-%s.aliyuncs.com instead. Error: %#v.", region, err) + } else { + for _, endpointItem := range endpointsResponse.Endpoints.Endpoint { + if endpointItem.Type == "openAPI" { + endpoint = endpointItem.Endpoint + break + } + } + } + if endpoint == "" { + endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", region) + } + } + if !strings.HasPrefix(endpoint, "http") { + endpoint = fmt.Sprintf("%s://%s", schma, endpoint) + } + log.Printf("[DEBUG] Instantiate OSS client using endpoint: %#v", endpoint) + var options []oss.ClientOption + if securityToken != "" { + options = append(options, oss.SecurityToken(securityToken)) + } + options = append(options, oss.UserAgent(fmt.Sprintf("%s/%s", TerraformUA, TerraformVersion))) + + proxyUrl := getHttpProxyUrl() + if proxyUrl != nil { + options = append(options, oss.Proxy(proxyUrl.String())) + } + + client, err := oss.New(endpoint, accessKey, secretKey, options...) + b.ossClient = client + otsEndpoint := d.Get("tablestore_endpoint").(string) + if otsEndpoint != "" { + if !strings.HasPrefix(otsEndpoint, "http") { + otsEndpoint = fmt.Sprintf("%s://%s", schma, otsEndpoint) + } + b.otsEndpoint = otsEndpoint + parts := strings.Split(strings.TrimPrefix(strings.TrimPrefix(otsEndpoint, "https://"), "http://"), ".") + b.otsClient = tablestore.NewClientWithConfig(otsEndpoint, parts[0], accessKey, secretKey, securityToken, tablestore.NewDefaultTableStoreConfig()) + } + b.otsTable = d.Get("tablestore_table").(string) + + return err +} + +func (b *Backend) getOSSEndpointByRegion(access_key, secret_key, security_token, region string) (*location.DescribeEndpointsResponse, error) { + args := location.CreateDescribeEndpointsRequest() + args.ServiceCode = "oss" + args.Id = region + args.Domain = "location-readonly.aliyuncs.com" + + locationClient, err := location.NewClientWithOptions(region, getSdkConfig(), credentials.NewStsTokenCredential(access_key, secret_key, security_token)) + if err != nil { + return nil, fmt.Errorf("unable to initialize the location client: %#v", err) + + } + locationClient.AppendUserAgent(TerraformUA, TerraformVersion) + endpointsResponse, err := locationClient.DescribeEndpoints(args) + if err != nil { + return nil, fmt.Errorf("describe oss endpoint using region: %#v got an error: %#v", region, err) + } + return endpointsResponse, nil +} + +func getAssumeRoleAK(accessKey, secretKey, stsToken, region, roleArn, sessionName, policy, stsEndpoint string, sessionExpiration int) (string, string, string, error) { + request := sts.CreateAssumeRoleRequest() + request.RoleArn = roleArn + request.RoleSessionName = sessionName + request.DurationSeconds = requests.NewInteger(sessionExpiration) + request.Policy = policy + request.Scheme = "https" + + var client *sts.Client + var err error + if stsToken == "" { + client, err = sts.NewClientWithAccessKey(region, accessKey, secretKey) + } else { + client, err = sts.NewClientWithStsToken(region, accessKey, secretKey, stsToken) + } + if err != nil { + return "", "", "", err + } + if stsEndpoint != "" { + endpoints.AddEndpointMapping(region, "STS", stsEndpoint) + } + response, err := client.AssumeRole(request) + if err != nil { + return "", "", "", err + } + return response.Credentials.AccessKeyId, response.Credentials.AccessKeySecret, response.Credentials.SecurityToken, nil +} + +func getSdkConfig() *sdk.Config { + return sdk.NewConfig(). + WithMaxRetryTime(5). + WithTimeout(time.Duration(30) * time.Second). + WithGoRoutinePoolSize(10). + WithDebug(false). + WithHttpTransport(getTransport()). + WithScheme("HTTPS") +} + +func getTransport() *http.Transport { + handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) + if err != nil { + handshakeTimeout = 120 + } + transport := cleanhttp.DefaultTransport() + transport.TLSHandshakeTimeout = time.Duration(handshakeTimeout) * time.Second + transport.Proxy = http.ProxyFromEnvironment + return transport +} + +type Invoker struct { + catchers []*Catcher +} + +type Catcher struct { + Reason string + RetryCount int + RetryWaitSeconds int +} + +const TerraformUA = "HashiCorp-Terraform" + +var TerraformVersion = strings.TrimSuffix(version.String(), "-dev") +var ClientErrorCatcher = Catcher{"AliyunGoClientFailure", 10, 3} +var ServiceBusyCatcher = Catcher{"ServiceUnavailable", 10, 3} + +func NewInvoker() Invoker { + i := Invoker{} + i.AddCatcher(ClientErrorCatcher) + i.AddCatcher(ServiceBusyCatcher) + return i +} + +func (a *Invoker) AddCatcher(catcher Catcher) { + a.catchers = append(a.catchers, &catcher) +} + +func (a *Invoker) Run(f func() error) error { + err := f() + + if err == nil { + return nil + } + + for _, catcher := range a.catchers { + if strings.Contains(err.Error(), catcher.Reason) { + catcher.RetryCount-- + + if catcher.RetryCount <= 0 { + return fmt.Errorf("retry timeout and got an error: %#v", err) + } else { + time.Sleep(time.Duration(catcher.RetryWaitSeconds) * time.Second) + return a.Run(f) + } + } + } + return err +} + +var providerConfig map[string]interface{} + +func getConfigFromProfile(d *schema.ResourceData, ProfileKey string) (interface{}, error) { + + if providerConfig == nil { + if v, ok := d.GetOk("profile"); !ok || v.(string) == "" { + return nil, nil + } + current := d.Get("profile").(string) + // Set CredsFilename, expanding home directory + profilePath, err := homedir.Expand(d.Get("shared_credentials_file").(string)) + if err != nil { + return nil, err + } + if profilePath == "" { + profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("HOME")) + if runtime.GOOS == "windows" { + profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("USERPROFILE")) + } + } + providerConfig = make(map[string]interface{}) + _, err = os.Stat(profilePath) + if !os.IsNotExist(err) { + data, err := ioutil.ReadFile(profilePath) + if err != nil { + return nil, err + } + config := map[string]interface{}{} + err = json.Unmarshal(data, &config) + if err != nil { + return nil, err + } + for _, v := range config["profiles"].([]interface{}) { + if current == v.(map[string]interface{})["name"] { + providerConfig = v.(map[string]interface{}) + } + } + } + } + + mode := "" + if v, ok := providerConfig["mode"]; ok { + mode = v.(string) + } else { + return v, nil + } + switch ProfileKey { + case "access_key_id", "access_key_secret": + if mode == "EcsRamRole" { + return "", nil + } + case "ram_role_name": + if mode != "EcsRamRole" { + return "", nil + } + case "sts_token": + if mode != "StsToken" { + return "", nil + } + case "ram_role_arn", "ram_session_name": + if mode != "RamRoleArn" { + return "", nil + } + case "expired_seconds": + if mode != "RamRoleArn" { + return float64(0), nil + } + } + + return providerConfig[ProfileKey], nil +} + +var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +// getAuthCredentialByEcsRoleName aims to access meta to get sts credential +// Actually, the job should be done by sdk, but currently not all resources and products support alibaba-cloud-sdk-go, +// and their go sdk does support ecs role name. +// This method is a temporary solution and it should be removed after all go sdk support ecs role name +// The related PR: https://github.com/terraform-providers/terraform-provider-alicloud/pull/731 +func getAuthCredentialByEcsRoleName(ecsRoleName string) (accessKey, secretKey, token string, err error) { + + if ecsRoleName == "" { + return + } + requestUrl := securityCredURL + ecsRoleName + httpRequest, err := http.NewRequest(requests.GET, requestUrl, strings.NewReader("")) + if err != nil { + err = fmt.Errorf("build sts requests err: %s", err.Error()) + return + } + httpClient := &http.Client{} + httpResponse, err := httpClient.Do(httpRequest) + if err != nil { + err = fmt.Errorf("get Ecs sts token err : %s", err.Error()) + return + } + + response := responses.NewCommonResponse() + err = responses.Unmarshal(response, httpResponse, "") + if err != nil { + err = fmt.Errorf("unmarshal Ecs sts token response err : %s", err.Error()) + return + } + + if response.GetHttpStatus() != http.StatusOK { + err = fmt.Errorf("get Ecs sts token err, httpStatus: %d, message = %s", response.GetHttpStatus(), response.GetHttpContentString()) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + code, err := jmespath.Search("Code", data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get Code: %s", err.Error()) + return + } + if code.(string) != "Success" { + err = fmt.Errorf("refresh Ecs sts token err, Code is not Success") + return + } + accessKeyId, err := jmespath.Search("AccessKeyId", data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeyId: %s", err.Error()) + return + } + accessKeySecret, err := jmespath.Search("AccessKeySecret", data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeySecret: %s", err.Error()) + return + } + securityToken, err := jmespath.Search("SecurityToken", data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get SecurityToken: %s", err.Error()) + return + } + + if accessKeyId == nil || accessKeySecret == nil || securityToken == nil { + err = fmt.Errorf("there is no any available accesskey, secret and security token for Ecs role %s", ecsRoleName) + return + } + + return accessKeyId.(string), accessKeySecret.(string), securityToken.(string), nil +} + +func getHttpProxyUrl() *url.URL { + for _, v := range []string{"HTTPS_PROXY", "https_proxy", "HTTP_PROXY", "http_proxy"} { + value := strings.Trim(os.Getenv(v), " ") + if value != "" { + if !regexp.MustCompile(`^http(s)?://`).MatchString(value) { + value = fmt.Sprintf("https://%s", value) + } + proxyUrl, err := url.Parse(value) + if err == nil { + return proxyUrl + } + break + } + } + return nil +} diff --git a/backend/remote-state/oss/backend_state.go b/backend/remote-state/oss/backend_state.go new file mode 100644 index 000000000000..0091100edea0 --- /dev/null +++ b/backend/remote-state/oss/backend_state.go @@ -0,0 +1,197 @@ +package oss + +import ( + "errors" + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +const ( + lockFileSuffix = ".tflock" +) + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + ossClient: b.ossClient, + bucketName: b.bucketName, + stateFile: b.stateFile(name), + lockFile: b.lockFile(name), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + otsTable: b.otsTable, + otsClient: b.otsClient, + } + if b.otsEndpoint != "" && b.otsTable != "" { + _, err := b.otsClient.DescribeTable(&tablestore.DescribeTableRequest{ + TableName: b.otsTable, + }) + if err != nil { + return client, fmt.Errorf("error describing table store %s: %#v", b.otsTable, err) + } + } + + return client, nil +} + +func (b *Backend) Workspaces() ([]string, error) { + bucket, err := b.ossClient.Bucket(b.bucketName) + if err != nil { + return []string{""}, fmt.Errorf("error getting bucket: %#v", err) + } + + var options []oss.Option + options = append(options, oss.Prefix(b.statePrefix+"/"), oss.MaxKeys(1000)) + resp, err := bucket.ListObjects(options...) + if err != nil { + return nil, err + } + + result := []string{backend.DefaultStateName} + prefix := b.statePrefix + lastObj := "" + for { + for _, obj := range resp.Objects { + // we have 3 parts, the state prefix, the workspace name, and the state file: // + if path.Join(b.statePrefix, b.stateKey) == obj.Key { + // filter the default workspace + continue + } + lastObj = obj.Key + parts := strings.Split(strings.TrimPrefix(obj.Key, prefix+"/"), "/") + if len(parts) > 0 && parts[0] != "" { + result = append(result, parts[0]) + } + } + if resp.IsTruncated { + if len(options) == 3 { + options[2] = oss.Marker(lastObj) + } else { + options = append(options, oss.Marker(lastObj)) + } + resp, err = bucket.ListObjects(options...) + if err != nil { + return nil, err + } + } else { + break + } + } + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + return client.Delete() +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + client, err := b.remoteClient(name) + if err != nil { + return nil, err + } + stateMgr := &remote.State{Client: client} + + // Check to see if this state already exists. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + log.Printf("[DEBUG] Current workspace name: %s. All workspaces:%#v", name, existing) + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + // We need to create the object so it's listed by States. + if !exists { + // take a lock on this state while we write it + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock OSS state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(e error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(stateUnlockError), lockId, err) + } + return e + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + return stateMgr, nil +} + +func (b *Backend) stateFile(name string) string { + if name == backend.DefaultStateName { + return path.Join(b.statePrefix, b.stateKey) + } + return path.Join(b.statePrefix, name, b.stateKey) +} + +func (b *Backend) lockFile(name string) string { + return b.stateFile(name) + lockFileSuffix +} + +const stateUnlockError = ` +Error unlocking Alibaba Cloud OSS state file: + +Lock ID: %s +Error message: %#v + +You may have to force-unlock this state in order to use it again. +The Alibaba Cloud backend acquires a lock during initialization to ensure the initial state file is created. +` diff --git a/backend/remote-state/oss/backend_test.go b/backend/remote-state/oss/backend_test.go new file mode 100644 index 000000000000..7144837e4c79 --- /dev/null +++ b/backend/remote-state/oss/backend_test.go @@ -0,0 +1,250 @@ +package oss + +import ( + "fmt" + "math/rand" + "os" + "testing" + "time" + + "strings" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/hcl2shim" +) + +// verify that we are doing ACC tests or the OSS tests specifically +func testACC(t *testing.T) { + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_OSS_TEST") == "" + if skip { + t.Log("oss backend tests require setting TF_ACC or TF_OSS_TEST") + t.Skip() + } + if skip { + t.Fatal("oss backend tests require setting ALICLOUD_ACCESS_KEY or ALICLOUD_ACCESS_KEY_ID") + } + if os.Getenv("ALICLOUD_REGION") == "" { + os.Setenv("ALICLOUD_REGION", "cn-beijing") + } +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + testACC(t) + config := map[string]interface{}{ + "region": "cn-beijing", + "bucket": "terraform-backend-oss-test", + "prefix": "mystate", + "key": "first.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) + + if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { + t.Fatalf("Incorrect region was provided") + } + if b.bucketName != "terraform-backend-oss-test" { + t.Fatalf("Incorrect bucketName was provided") + } + if b.statePrefix != "mystate" { + t.Fatalf("Incorrect state file path was provided") + } + if b.stateKey != "first.tfstate" { + t.Fatalf("Incorrect keyName was provided") + } + + if b.ossClient.Config.AccessKeyID == "" { + t.Fatalf("No Access Key Id was provided") + } + if b.ossClient.Config.AccessKeySecret == "" { + t.Fatalf("No Secret Access Key was provided") + } +} + +func TestBackendConfigWorkSpace(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-backend-oss-test-%d", rand.Intn(1000)) + config := map[string]interface{}{ + "region": "cn-beijing", + "bucket": bucketName, + "prefix": "mystate", + "key": "first.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) + createOSSBucket(t, b.ossClient, bucketName) + defer deleteOSSBucket(t, b.ossClient, bucketName) + if _, err := b.Workspaces(); err != nil { + t.Fatal(err.Error()) + } + if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { + t.Fatalf("Incorrect region was provided") + } + if b.bucketName != bucketName { + t.Fatalf("Incorrect bucketName was provided") + } + if b.statePrefix != "mystate" { + t.Fatalf("Incorrect state file path was provided") + } + if b.stateKey != "first.tfstate" { + t.Fatalf("Incorrect keyName was provided") + } + + if b.ossClient.Config.AccessKeyID == "" { + t.Fatalf("No Access Key Id was provided") + } + if b.ossClient.Config.AccessKeySecret == "" { + t.Fatalf("No Secret Access Key was provided") + } +} + +func TestBackendConfigProfile(t *testing.T) { + testACC(t) + config := map[string]interface{}{ + "region": "cn-beijing", + "bucket": "terraform-backend-oss-test", + "prefix": "mystate", + "key": "first.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + "profile": "default", + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) + + if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { + t.Fatalf("Incorrect region was provided") + } + if b.bucketName != "terraform-backend-oss-test" { + t.Fatalf("Incorrect bucketName was provided") + } + if b.statePrefix != "mystate" { + t.Fatalf("Incorrect state file path was provided") + } + if b.stateKey != "first.tfstate" { + t.Fatalf("Incorrect keyName was provided") + } + + if b.ossClient.Config.AccessKeyID == "" { + t.Fatalf("No Access Key Id was provided") + } + if b.ossClient.Config.AccessKeySecret == "" { + t.Fatalf("No Secret Access Key was provided") + } +} + +func TestBackendConfig_invalidKey(t *testing.T) { + testACC(t) + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ + "region": "cn-beijing", + "bucket": "terraform-backend-oss-test", + "prefix": "/leading-slash", + "name": "/test.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + }) + + _, results := New().PrepareConfig(cfg) + if !results.HasErrors() { + t.Fatal("expected config validation error") + } +} + +func TestBackend(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix()) + statePrefix := "multi/level/path/" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": statePrefix, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": statePrefix, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + + backend.TestBackendStates(t, b1) + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) +} + +func createOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) { + // Be clear about what we're doing in case the user needs to clean this up later. + if err := ossClient.CreateBucket(bucketName); err != nil { + t.Fatal("failed to create test OSS bucket:", err) + } +} + +func deleteOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) { + warning := "WARNING: Failed to delete the test OSS bucket. It may have been left in your Alibaba Cloud account and may incur storage charges. (error was %s)" + + // first we have to get rid of the env objects, or we can't delete the bucket + bucket, err := ossClient.Bucket(bucketName) + if err != nil { + t.Fatal("Error getting bucket:", err) + return + } + objects, err := bucket.ListObjects() + if err != nil { + t.Logf(warning, err) + return + } + for _, obj := range objects.Objects { + if err := bucket.DeleteObject(obj.Key); err != nil { + // this will need cleanup no matter what, so just warn and exit + t.Logf(warning, err) + return + } + } + + if err := ossClient.DeleteBucket(bucketName); err != nil { + t.Logf(warning, err) + } +} + +// create the tablestore table, and wait until we can query it. +func createTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) { + tableMeta := new(tablestore.TableMeta) + tableMeta.TableName = tableName + tableMeta.AddPrimaryKeyColumn(pkName, tablestore.PrimaryKeyType_STRING) + + tableOption := new(tablestore.TableOption) + tableOption.TimeToAlive = -1 + tableOption.MaxVersion = 1 + + reservedThroughput := new(tablestore.ReservedThroughput) + + _, err := otsClient.CreateTable(&tablestore.CreateTableRequest{ + TableMeta: tableMeta, + TableOption: tableOption, + ReservedThroughput: reservedThroughput, + }) + if err != nil { + t.Fatal(err) + } +} + +func deleteTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) { + params := &tablestore.DeleteTableRequest{ + TableName: tableName, + } + _, err := otsClient.DeleteTable(params) + if err != nil { + t.Logf("WARNING: Failed to delete the test TableStore table %q. It has been left in your Alibaba Cloud account and may incur charges. (error was %s)", tableName, err) + } +} diff --git a/backend/remote-state/oss/client.go b/backend/remote-state/oss/client.go new file mode 100644 index 000000000000..09a96b1897bd --- /dev/null +++ b/backend/remote-state/oss/client.go @@ -0,0 +1,449 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "log" + "time" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/pkg/errors" + + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +const ( + // Store the last saved serial in tablestore with this suffix for consistency checks. + stateIDSuffix = "-md5" + + pkName = "LockID" +) + +var ( + // The amount of time we will retry a state waiting for it to match the + // expected checksum. + consistencyRetryTimeout = 10 * time.Second + + // delay when polling the state + consistencyRetryPollInterval = 2 * time.Second +) + +// test hook called when checksums don't match +var testChecksumHook func() + +type RemoteClient struct { + ossClient *oss.Client + otsClient *tablestore.TableStoreClient + bucketName string + stateFile string + lockFile string + serverSideEncryption bool + acl string + otsTable string +} + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + deadline := time.Now().Add(consistencyRetryTimeout) + + // If we have a checksum, and the returned payload doesn't match, we retry + // up until deadline. + for { + payload, err = c.getObj() + if err != nil { + return nil, err + } + + // If the remote state was manually removed the payload will be nil, + // but if there's still a digest entry for that state we will still try + // to compare the MD5 below. + var digest []byte + if payload != nil { + digest = payload.MD5 + } + + // verify that this state is what we expect + if expected, err := c.getMD5(); err != nil { + log.Printf("[WARN] failed to fetch state md5: %s", err) + } else if len(expected) > 0 && !bytes.Equal(expected, digest) { + log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) + + if testChecksumHook != nil { + testChecksumHook() + } + + if time.Now().Before(deadline) { + time.Sleep(consistencyRetryPollInterval) + log.Println("[INFO] retrying OSS RemoteClient.Get...") + continue + } + + return nil, fmt.Errorf(errBadChecksumFmt, digest) + } + + break + } + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return fmt.Errorf("error getting bucket: %#v", err) + } + + body := bytes.NewReader(data) + + var options []oss.Option + if c.acl != "" { + options = append(options, oss.ACL(oss.ACLType(c.acl))) + } + options = append(options, oss.ContentType("application/json")) + if c.serverSideEncryption { + options = append(options, oss.ServerSideEncryption("AES256")) + } + options = append(options, oss.ContentLength(int64(len(data)))) + + if body != nil { + if err := bucket.PutObject(c.stateFile, body, options...); err != nil { + return fmt.Errorf("failed to upload state %s: %#v", c.stateFile, err) + } + } + + sum := md5.Sum(data) + if err := c.putMD5(sum[:]); err != nil { + // if this errors out, we unfortunately have to error out altogether, + // since the next Get will inevitably fail. + return fmt.Errorf("failed to store state MD5: %s", err) + } + return nil +} + +func (c *RemoteClient) Delete() error { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return fmt.Errorf("error getting bucket %s: %#v", c.bucketName, err) + } + + log.Printf("[DEBUG] Deleting remote state from OSS: %#v", c.stateFile) + + if err := bucket.DeleteObject(c.stateFile); err != nil { + return fmt.Errorf("error deleting state %s: %#v", c.stateFile, err) + } + + if err := c.deleteMD5(); err != nil { + log.Printf("[WARN] Error deleting state MD5: %s", err) + } + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + if c.otsTable == "" { + return "", nil + } + + info.Path = c.lockPath() + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + info.ID = lockID + } + + putParams := &tablestore.PutRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath(), + }, + }, + }, + Columns: []tablestore.AttributeColumn{ + { + ColumnName: "Info", + Value: string(info.Marshal()), + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST, + }, + } + + log.Printf("[DEBUG] Recording state lock in tablestore: %#v; LOCKID:%s", putParams, c.lockPath()) + + _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ + PutRowChange: putParams, + }) + if err != nil { + err = fmt.Errorf("invoking PutRow got an error: %#v", err) + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, fmt.Errorf("\ngetting lock info got an error: %#v", infoErr)) + } + lockErr := &statemgr.LockError{ + Err: err, + Info: lockInfo, + } + log.Printf("[ERROR] state lock error: %s", lockErr.Error()) + return "", lockErr + } + + return info.ID, nil +} + +func (c *RemoteClient) getMD5() ([]byte, error) { + if c.otsTable == "" { + return nil, nil + } + + getParams := &tablestore.SingleRowQueryCriteria{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath() + stateIDSuffix, + }, + }, + }, + ColumnsToGet: []string{pkName, "Digest"}, + MaxVersion: 1, + } + + log.Printf("[DEBUG] Retrieving state serial in tablestore: %#v", getParams) + + object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ + SingleRowQueryCriteria: getParams, + }) + + if err != nil { + return nil, err + } + + var val string + if v, ok := object.GetColumnMap().Columns["Digest"]; ok && len(v) > 0 { + val = v[0].Value.(string) + } + + sum, err := hex.DecodeString(val) + if err != nil || len(sum) != md5.Size { + return nil, errors.New("invalid md5") + } + + return sum, nil +} + +// store the hash of the state to that clients can check for stale state files. +func (c *RemoteClient) putMD5(sum []byte) error { + if c.otsTable == "" { + return nil + } + + if len(sum) != md5.Size { + return errors.New("invalid payload md5") + } + + putParams := &tablestore.PutRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath() + stateIDSuffix, + }, + }, + }, + Columns: []tablestore.AttributeColumn{ + { + ColumnName: "Digest", + Value: hex.EncodeToString(sum), + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_IGNORE, + }, + } + + log.Printf("[DEBUG] Recoring state serial in tablestore: %#v", putParams) + + _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ + PutRowChange: putParams, + }) + + if err != nil { + log.Printf("[WARN] failed to record state serial in tablestore: %s", err) + } + + return nil +} + +// remove the hash value for a deleted state +func (c *RemoteClient) deleteMD5() error { + if c.otsTable == "" { + return nil + } + + params := &tablestore.DeleteRowRequest{ + DeleteRowChange: &tablestore.DeleteRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath() + stateIDSuffix, + }, + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST, + }, + }, + } + + log.Printf("[DEBUG] Deleting state serial in tablestore: %#v", params) + + if _, err := c.otsClient.DeleteRow(params); err != nil { + return err + } + + return nil +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + getParams := &tablestore.SingleRowQueryCriteria{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath(), + }, + }, + }, + ColumnsToGet: []string{pkName, "Info"}, + MaxVersion: 1, + } + + log.Printf("[DEBUG] Retrieving state lock info from tablestore: %#v", getParams) + + object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ + SingleRowQueryCriteria: getParams, + }) + if err != nil { + return nil, err + } + + var infoData string + if v, ok := object.GetColumnMap().Columns["Info"]; ok && len(v) > 0 { + infoData = v[0].Value.(string) + } + lockInfo := &statemgr.LockInfo{} + err = json.Unmarshal([]byte(infoData), lockInfo) + if err != nil { + return nil, err + } + return lockInfo, nil +} +func (c *RemoteClient) Unlock(id string) error { + if c.otsTable == "" { + return nil + } + + lockErr := &statemgr.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + params := &tablestore.DeleteRowRequest{ + DeleteRowChange: &tablestore.DeleteRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath(), + }, + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_IGNORE, + }, + }, + } + + _, err = c.otsClient.DeleteRow(params) + + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} + +func (c *RemoteClient) lockPath() string { + return fmt.Sprintf("%s/%s", c.bucketName, c.stateFile) +} + +func (c *RemoteClient) getObj() (*remote.Payload, error) { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return nil, fmt.Errorf("error getting bucket %s: %#v", c.bucketName, err) + } + + if exist, err := bucket.IsObjectExist(c.stateFile); err != nil { + return nil, fmt.Errorf("estimating object %s is exist got an error: %#v", c.stateFile, err) + } else if !exist { + return nil, nil + } + + var options []oss.Option + output, err := bucket.GetObject(c.stateFile, options...) + if err != nil { + return nil, fmt.Errorf("error getting object: %#v", err) + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output); err != nil { + return nil, fmt.Errorf("failed to read remote state: %s", err) + } + sum := md5.Sum(buf.Bytes()) + payload := &remote.Payload{ + Data: buf.Bytes(), + MD5: sum[:], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +const errBadChecksumFmt = `state data in OSS does not have the expected content. + +This may be caused by unusually long delays in OSS processing a previous state +update. Please wait for a minute or two and try again. If this problem +persists, and neither OSS nor TableStore are experiencing an outage, you may need +to manually verify the remote state and update the Digest value stored in the +TableStore table to the following value: %x` diff --git a/backend/remote-state/oss/client_test.go b/backend/remote-state/oss/client_test.go new file mode 100644 index 000000000000..49a2d63b61b5 --- /dev/null +++ b/backend/remote-state/oss/client_test.go @@ -0,0 +1,377 @@ +package oss + +import ( + "fmt" + "strings" + "testing" + "time" + + "bytes" + "crypto/md5" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" +) + +// NOTE: Before running this testcase, please create a OTS instance called 'tf-oss-remote' +var RemoteTestUsedOTSEndpoint = "https://tf-oss-remote.cn-hangzhou.ots.aliyuncs.com" + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + path := "testState" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + })).(*Backend) + + createOSSBucket(t, b.ossClient, bucketName) + defer deleteOSSBucket(t, b.ossClient, bucketName) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientLocks(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +// verify that the backend can handle more than one state in the same table +func TestRemoteClientLocks_multipleStates(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + s1, err := b1.StateMgr("s1") + if err != nil { + t.Fatal(err) + } + if _, err := s1.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatal("failed to get lock for s1:", err) + } + + // s1 is now locked, s2 should not be locked as it's a different state file + s2, err := b2.StateMgr("s2") + if err != nil { + t.Fatal(err) + } + if _, err := s2.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatal("failed to get lock for s2:", err) + } +} + +// verify that we can unlock a state with an existing lock +func TestRemoteForceUnlock(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + // first test with default + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err := s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal("failed to get default state to force unlock:", err) + } + + if err := s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock default state") + } + + // now try the same thing with a named state + // first test with default + s1, err = b1.StateMgr("test") + if err != nil { + t.Fatal(err) + } + + info = statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err = s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err = b2.StateMgr("test") + if err != nil { + t.Fatal("failed to get named state to force unlock:", err) + } + + if err = s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock named state") + } +} + +func TestRemoteClient_clientMD5(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b.ossClient, bucketName) + defer deleteOSSBucket(t, b.ossClient, bucketName) + createTablestoreTable(t, b.otsClient, tableName) + defer deleteTablestoreTable(t, b.otsClient, tableName) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client := s.(*remote.State).Client.(*RemoteClient) + + sum := md5.Sum([]byte("test")) + + if err := client.putMD5(sum[:]); err != nil { + t.Fatal(err) + } + + getSum, err := client.getMD5() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(getSum, sum[:]) { + t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum) + } + + if err := client.deleteMD5(); err != nil { + t.Fatal(err) + } + + if getSum, err := client.getMD5(); err == nil { + t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum) + } +} + +// verify that a client won't return a state with an incorrect checksum. +func TestRemoteClient_stateChecksum(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client1 := s1.(*remote.State).Client + + // create an old and new state version to persist + s := statemgr.TestFullInitialState() + sf := &statefile.File{State: s} + var oldState bytes.Buffer + if err := statefile.Write(sf, &oldState); err != nil { + t.Fatal(err) + } + sf.Serial++ + var newState bytes.Buffer + if err := statefile.Write(sf, &newState); err != nil { + t.Fatal(err) + } + + // Use b2 without a tablestore_table to bypass the lock table to write the state directly. + // client2 will write the "incorrect" state, simulating oss eventually consistency delays + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + })).(*Backend) + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client2 := s2.(*remote.State).Client + + // write the new state through client2 so that there is no checksum yet + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // verify that we can pull a state without a checksum + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } + + // write the new state back with its checksum + if err := client1.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // put an empty state in place to check for panics during get + if err := client2.Put([]byte{}); err != nil { + t.Fatal(err) + } + + // remove the timeouts so we can fail immediately + origTimeout := consistencyRetryTimeout + origInterval := consistencyRetryPollInterval + defer func() { + consistencyRetryTimeout = origTimeout + consistencyRetryPollInterval = origInterval + }() + consistencyRetryTimeout = 0 + consistencyRetryPollInterval = 0 + + // fetching an empty state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // put the old state in place of the new, without updating the checksum + if err := client2.Put(oldState.Bytes()); err != nil { + t.Fatal(err) + } + + // fetching the wrong state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // update the state with the correct one after we Get again + testChecksumHook = func() { + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + testChecksumHook = nil + } + + consistencyRetryTimeout = origTimeout + + // this final Get will fail to fail the checksum verification, the above + // callback will update the state with the correct version, and Get should + // retry automatically. + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } +} diff --git a/backend/remote-state/pg/backend.go b/backend/remote-state/pg/backend.go new file mode 100644 index 000000000000..8f1780a90a35 --- /dev/null +++ b/backend/remote-state/pg/backend.go @@ -0,0 +1,133 @@ +package pg + +import ( + "context" + "database/sql" + "fmt" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" + "github.com/lib/pq" +) + +const ( + statesTableName = "states" + statesIndexName = "states_by_name" +) + +// New creates a new backend for Postgres remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "conn_str": { + Type: schema.TypeString, + Required: true, + Description: "Postgres connection string; a `postgres://` URL", + }, + + "schema_name": { + Type: schema.TypeString, + Optional: true, + Description: "Name of the automatically managed Postgres schema to store state", + Default: "terraform_remote_state", + }, + + "skip_schema_creation": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, Terraform won't try to create the Postgres schema", + Default: false, + }, + + "skip_table_creation": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, Terraform won't try to create the Postgres table", + }, + + "skip_index_creation": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, Terraform won't try to create the Postgres index", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + db *sql.DB + configData *schema.ResourceData + connStr string + schemaName string +} + +func (b *Backend) configure(ctx context.Context) error { + // Grab the resource data + b.configData = schema.FromContextBackendConfig(ctx) + data := b.configData + + b.connStr = data.Get("conn_str").(string) + b.schemaName = pq.QuoteIdentifier(data.Get("schema_name").(string)) + + db, err := sql.Open("postgres", b.connStr) + if err != nil { + return err + } + + // Prepare database schema, tables, & indexes. + var query string + + if !data.Get("skip_schema_creation").(bool) { + // list all schemas to see if it exists + var count int + query = `select count(1) from information_schema.schemata where schema_name = $1` + if err := db.QueryRow(query, data.Get("schema_name").(string)).Scan(&count); err != nil { + return err + } + + // skip schema creation if schema already exists + // `CREATE SCHEMA IF NOT EXISTS` is to be avoided if ever + // a user hasn't been granted the `CREATE SCHEMA` privilege + if count < 1 { + // tries to create the schema + query = `CREATE SCHEMA IF NOT EXISTS %s` + if _, err := db.Exec(fmt.Sprintf(query, b.schemaName)); err != nil { + return err + } + } + } + + if !data.Get("skip_table_creation").(bool) { + if _, err := db.Exec("CREATE SEQUENCE IF NOT EXISTS public.global_states_id_seq AS bigint"); err != nil { + return err + } + + query = `CREATE TABLE IF NOT EXISTS %s.%s ( + id bigint NOT NULL DEFAULT nextval('public.global_states_id_seq') PRIMARY KEY, + name text UNIQUE, + data text + )` + if _, err := db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName)); err != nil { + return err + } + } + + if !data.Get("skip_index_creation").(bool) { + query = `CREATE UNIQUE INDEX IF NOT EXISTS %s ON %s.%s (name)` + if _, err := db.Exec(fmt.Sprintf(query, statesIndexName, b.schemaName, statesTableName)); err != nil { + return err + } + } + + // Assign db after its schema is prepared. + b.db = db + + return nil +} diff --git a/backend/remote-state/pg/backend_state.go b/backend/remote-state/pg/backend_state.go new file mode 100644 index 000000000000..5c364a737ace --- /dev/null +++ b/backend/remote-state/pg/backend_state.go @@ -0,0 +1,115 @@ +package pg + +import ( + "fmt" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +func (b *Backend) Workspaces() ([]string, error) { + query := `SELECT name FROM %s.%s WHERE name != 'default' ORDER BY name` + rows, err := b.db.Query(fmt.Sprintf(query, b.schemaName, statesTableName)) + if err != nil { + return nil, err + } + defer rows.Close() + + result := []string{ + backend.DefaultStateName, + } + + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + result = append(result, name) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + query := `DELETE FROM %s.%s WHERE name = $1` + _, err := b.db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName), name) + if err != nil { + return err + } + + return nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + // Build the state client + var stateMgr statemgr.Full = &remote.State{ + Client: &RemoteClient{ + Client: b.db, + Name: name, + SchemaName: b.schemaName, + }, + } + + // Check to see if this state already exists. + // If the state doesn't exist, we have to assume this + // is a normal create operation, and take the lock at that point. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + + // Grab a lock, we use this to write an empty state if one doesn't + // exist already. We have to write an empty state as a sentinel value + // so Workspaces() knows it exists. + if !exists { + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock state in Postgres: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(`error unlocking Postgres state: %s`, err) + } + return parent + } + + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + } + + return stateMgr, nil +} diff --git a/backend/remote-state/pg/backend_test.go b/backend/remote-state/pg/backend_test.go new file mode 100644 index 000000000000..f7ff018719f7 --- /dev/null +++ b/backend/remote-state/pg/backend_test.go @@ -0,0 +1,376 @@ +package pg + +// Create the test database: createdb terraform_backend_pg_test +// TF_ACC=1 GO111MODULE=on go test -v -mod=vendor -timeout=2m -parallel=4 github.com/hashicorp/terraform/backend/remote-state/pg + +import ( + "database/sql" + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/lib/pq" + _ "github.com/lib/pq" +) + +// Function to skip a test unless in ACCeptance test mode. +// +// A running Postgres server identified by env variable +// DATABASE_URL is required for acceptance tests. +func testACC(t *testing.T) { + skip := os.Getenv("TF_ACC") == "" + if skip { + t.Log("pg backend tests require setting TF_ACC") + t.Skip() + } + if os.Getenv("DATABASE_URL") == "" { + os.Setenv("DATABASE_URL", "postgres://localhost/terraform_backend_pg_test?sslmode=disable") + } +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + schemaName := pq.QuoteIdentifier(fmt.Sprintf("terraform_%s", t.Name())) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + schemaName = pq.QuoteIdentifier(schemaName) + + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + b := backend.TestBackendConfig(t, New(), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + _, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + + _, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + c := s.(*remote.State).Client.(*RemoteClient) + if c.Name != backend.DefaultStateName { + t.Fatal("RemoteClient name is not configured") + } + + backend.TestBackendStates(t, b) +} + +func TestBackendConfigSkipOptions(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + + testCases := []struct { + Name string + SkipSchemaCreation bool + SkipTableCreation bool + SkipIndexCreation bool + TestIndexIsPresent bool + Setup func(t *testing.T, db *sql.DB, schemaName string) + }{ + { + Name: "skip_schema_creation", + SkipSchemaCreation: true, + TestIndexIsPresent: true, + Setup: func(t *testing.T, db *sql.DB, schemaName string) { + // create the schema as a prerequisites + _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s`, schemaName)) + if err != nil { + t.Fatal(err) + } + }, + }, + { + Name: "skip_table_creation", + SkipTableCreation: true, + TestIndexIsPresent: true, + Setup: func(t *testing.T, db *sql.DB, schemaName string) { + // since the table needs to be already created the schema must be too + _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA %s`, schemaName)) + if err != nil { + t.Fatal(err) + } + _, err = db.Query(fmt.Sprintf(`CREATE TABLE %s.%s ( + id SERIAL PRIMARY KEY, + name TEXT, + data TEXT + )`, schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + }, + }, + { + Name: "skip_index_creation", + SkipIndexCreation: true, + TestIndexIsPresent: true, + Setup: func(t *testing.T, db *sql.DB, schemaName string) { + // Everything need to exists for the index to be created + _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA %s`, schemaName)) + if err != nil { + t.Fatal(err) + } + _, err = db.Query(fmt.Sprintf(`CREATE TABLE %s.%s ( + id SERIAL PRIMARY KEY, + name TEXT, + data TEXT + )`, schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + _, err = db.Exec(fmt.Sprintf(`CREATE UNIQUE INDEX IF NOT EXISTS %s ON %s.%s (name)`, statesIndexName, schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + }, + }, + { + Name: "missing_index", + SkipIndexCreation: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + schemaName := tc.Name + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + "skip_schema_creation": tc.SkipSchemaCreation, + "skip_table_creation": tc.SkipTableCreation, + "skip_index_creation": tc.SkipIndexCreation, + }) + schemaName = pq.QuoteIdentifier(schemaName) + db, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + + if tc.Setup != nil { + tc.Setup(t, db, schemaName) + } + defer db.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + b := backend.TestBackendConfig(t, New(), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + // Make sure everything has been created + + // This tests that both the schema and the table have been created + _, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + if tc.TestIndexIsPresent { + // Make sure that the index exists + query := `select count(*) from pg_indexes where schemaname=$1 and tablename=$2 and indexname=$3;` + var count int + if err := b.db.QueryRow(query, tc.Name, statesTableName, statesIndexName).Scan(&count); err != nil { + t.Fatal(err) + } + if count != 1 { + t.Fatalf("The index has not been created (%d)", count) + } + } + + _, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + c := s.(*remote.State).Client.(*RemoteClient) + if c.Name != backend.DefaultStateName { + t.Fatal("RemoteClient name is not configured") + } + + // Make sure that all workspace must have a unique name + _, err = db.Exec(fmt.Sprintf(`INSERT INTO %s.%s VALUES (100, 'unique_name_test', '')`, schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + _, err = db.Exec(fmt.Sprintf(`INSERT INTO %s.%s VALUES (101, 'unique_name_test', '')`, schemaName, statesTableName)) + if err == nil { + t.Fatal("Creating two workspaces with the same name did not raise an error") + } + }) + } + +} + +func TestBackendStates(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + + testCases := []string{ + fmt.Sprintf("terraform_%s", t.Name()), + fmt.Sprintf("test with spaces: %s", t.Name()), + } + for _, schemaName := range testCases { + t.Run(schemaName, func(t *testing.T) { + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query("DROP SCHEMA IF EXISTS %s CASCADE", pq.QuoteIdentifier(schemaName)) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + backend.TestBackendStates(t, b) + }) + } +} + +func TestBackendStateLocks(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + schemaName := fmt.Sprintf("terraform_%s", t.Name()) + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + bb := backend.TestBackendConfig(t, New(), config).(*Backend) + + if bb == nil { + t.Fatal("Backend could not be configured") + } + + backend.TestBackendStateLocks(t, b, bb) +} + +func TestBackendConcurrentLock(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + + getStateMgr := func(schemaName string) (statemgr.Full, *statemgr.LockInfo) { + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + stateMgr, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("Failed to get the state manager: %v", err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = schemaName + + return stateMgr, info + } + + s1, i1 := getStateMgr(fmt.Sprintf("terraform_%s_1", t.Name())) + s2, i2 := getStateMgr(fmt.Sprintf("terraform_%s_2", t.Name())) + + // First we need to create the workspace as the lock for creating them is + // global + lockID1, err := s1.Lock(i1) + if err != nil { + t.Fatalf("failed to lock first state: %v", err) + } + + if err = s1.PersistState(nil); err != nil { + t.Fatalf("failed to persist state: %v", err) + } + + if err := s1.Unlock(lockID1); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + lockID2, err := s2.Lock(i2) + if err != nil { + t.Fatalf("failed to lock second state: %v", err) + } + + if err = s2.PersistState(nil); err != nil { + t.Fatalf("failed to persist state: %v", err) + } + + if err := s2.Unlock(lockID2); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + // Now we can test concurrent lock + lockID1, err = s1.Lock(i1) + if err != nil { + t.Fatalf("failed to lock first state: %v", err) + } + + lockID2, err = s2.Lock(i2) + if err != nil { + t.Fatalf("failed to lock second state: %v", err) + } + + if err := s1.Unlock(lockID1); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + if err := s2.Unlock(lockID2); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } +} + +func getDatabaseUrl() string { + return os.Getenv("DATABASE_URL") +} diff --git a/backend/remote-state/pg/client.go b/backend/remote-state/pg/client.go new file mode 100644 index 000000000000..2c6db19f13cf --- /dev/null +++ b/backend/remote-state/pg/client.go @@ -0,0 +1,142 @@ +package pg + +import ( + "crypto/md5" + "database/sql" + "fmt" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + _ "github.com/lib/pq" +) + +// RemoteClient is a remote client that stores data in a Postgres database +type RemoteClient struct { + Client *sql.DB + Name string + SchemaName string + + info *statemgr.LockInfo +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + query := `SELECT data FROM %s.%s WHERE name = $1` + row := c.Client.QueryRow(fmt.Sprintf(query, c.SchemaName, statesTableName), c.Name) + var data []byte + err := row.Scan(&data) + switch { + case err == sql.ErrNoRows: + // No existing state returns empty. + return nil, nil + case err != nil: + return nil, err + default: + md5 := md5.Sum(data) + return &remote.Payload{ + Data: data, + MD5: md5[:], + }, nil + } +} + +func (c *RemoteClient) Put(data []byte) error { + query := `INSERT INTO %s.%s (name, data) VALUES ($1, $2) + ON CONFLICT (name) DO UPDATE + SET data = $2 WHERE %s.name = $1` + _, err := c.Client.Exec(fmt.Sprintf(query, c.SchemaName, statesTableName, statesTableName), c.Name, data) + if err != nil { + return err + } + return nil +} + +func (c *RemoteClient) Delete() error { + query := `DELETE FROM %s.%s WHERE name = $1` + _, err := c.Client.Exec(fmt.Sprintf(query, c.SchemaName, statesTableName), c.Name) + if err != nil { + return err + } + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + var err error + var lockID string + + if info.ID == "" { + lockID, err = uuid.GenerateUUID() + if err != nil { + return "", err + } + info.ID = lockID + } + + // Local helper function so we can call it multiple places + // + lockUnlock := func(pgLockId string) error { + query := `SELECT pg_advisory_unlock(%s)` + row := c.Client.QueryRow(fmt.Sprintf(query, pgLockId)) + var didUnlock []byte + err := row.Scan(&didUnlock) + if err != nil { + return &statemgr.LockError{Info: info, Err: err} + } + return nil + } + + // Try to acquire locks for the existing row `id` and the creation lock `-1`. + query := `SELECT %s.id, pg_try_advisory_lock(%s.id), pg_try_advisory_lock(-1) FROM %s.%s WHERE %s.name = $1` + row := c.Client.QueryRow(fmt.Sprintf(query, statesTableName, statesTableName, c.SchemaName, statesTableName, statesTableName), c.Name) + var pgLockId, didLock, didLockForCreate []byte + err = row.Scan(&pgLockId, &didLock, &didLockForCreate) + switch { + case err == sql.ErrNoRows: + // No rows means we're creating the workspace. Take the creation lock. + innerRow := c.Client.QueryRow(`SELECT pg_try_advisory_lock(-1)`) + var innerDidLock []byte + err := innerRow.Scan(&innerDidLock) + if err != nil { + return "", &statemgr.LockError{Info: info, Err: err} + } + if string(innerDidLock) == "false" { + return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Already locked for workspace creation: %s", c.Name)} + } + info.Path = "-1" + case err != nil: + return "", &statemgr.LockError{Info: info, Err: err} + case string(didLock) == "false": + // Existing workspace is already locked. Release the attempted creation lock. + lockUnlock("-1") + return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Workspace is already locked: %s", c.Name)} + case string(didLockForCreate) == "false": + // Someone has the creation lock already. Release the existing workspace because it might not be safe to touch. + lockUnlock(string(pgLockId)) + return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Cannot lock workspace; already locked for workspace creation: %s", c.Name)} + default: + // Existing workspace is now locked. Release the attempted creation lock. + lockUnlock("-1") + info.Path = string(pgLockId) + } + c.info = info + + return info.ID, nil +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + return c.info, nil +} + +func (c *RemoteClient) Unlock(id string) error { + if c.info != nil && c.info.Path != "" { + query := `SELECT pg_advisory_unlock(%s)` + row := c.Client.QueryRow(fmt.Sprintf(query, c.info.Path)) + var didUnlock []byte + err := row.Scan(&didUnlock) + if err != nil { + return &statemgr.LockError{Info: c.info, Err: err} + } + c.info = nil + } + return nil +} diff --git a/backend/remote-state/pg/client_test.go b/backend/remote-state/pg/client_test.go new file mode 100644 index 000000000000..ded47a5d120d --- /dev/null +++ b/backend/remote-state/pg/client_test.go @@ -0,0 +1,76 @@ +package pg + +// Create the test database: createdb terraform_backend_pg_test +// TF_ACC=1 GO111MODULE=on go test -v -mod=vendor -timeout=2m -parallel=4 github.com/hashicorp/terraform/backend/remote-state/pg + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/remote" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + schemaName := fmt.Sprintf("terraform_%s", t.Name()) + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, s.(*remote.State).Client) +} + +func TestRemoteLocks(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + schemaName := fmt.Sprintf("terraform_%s", t.Name()) + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + + b1 := backend.TestBackendConfig(t, New(), config).(*Backend) + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + b2 := backend.TestBackendConfig(t, New(), config).(*Backend) + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} diff --git a/backend/remote-state/s3/backend.go b/backend/remote-state/s3/backend.go new file mode 100644 index 000000000000..2e41e6be5ff2 --- /dev/null +++ b/backend/remote-state/s3/backend.go @@ -0,0 +1,413 @@ +package s3 + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/s3" + awsbase "github.com/hashicorp/aws-sdk-go-base" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/legacy/helper/schema" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/version" +) + +// New creates a new backend for S3 remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the S3 bucket", + }, + + "key": { + Type: schema.TypeString, + Required: true, + Description: "The path to the state file inside the bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + // s3 will strip leading slashes from an object, so while this will + // technically be accepted by s3, it will break our workspace hierarchy. + if strings.HasPrefix(v.(string), "/") { + return nil, []error{errors.New("key must not start with '/'")} + } + // s3 will recognize objects with a trailing slash as a directory + // so they should not be valid keys + if strings.HasSuffix(v.(string), "/") { + return nil, []error{errors.New("key must not end with '/'")} + } + return nil, nil + }, + }, + + "region": { + Type: schema.TypeString, + Required: true, + Description: "AWS region of the S3 Bucket and DynamoDB Table (if used).", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", + }, nil), + }, + + "dynamodb_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the DynamoDB API", + DefaultFunc: schema.EnvDefaultFunc("AWS_DYNAMODB_ENDPOINT", ""), + }, + + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the S3 API", + DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""), + }, + + "iam_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the IAM API", + DefaultFunc: schema.EnvDefaultFunc("AWS_IAM_ENDPOINT", ""), + }, + + "sts_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the STS API", + DefaultFunc: schema.EnvDefaultFunc("AWS_STS_ENDPOINT", ""), + }, + + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: false, + }, + + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Canned ACL to be applied to the state file", + Default: "", + }, + + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "AWS access key", + Default: "", + }, + + "secret_key": { + Type: schema.TypeString, + Optional: true, + Description: "AWS secret key", + Default: "", + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Description: "The ARN of a KMS Key to use for encrypting the state", + Default: "", + }, + + "dynamodb_table": { + Type: schema.TypeString, + Optional: true, + Description: "DynamoDB table for state locking and consistency", + Default: "", + }, + + "profile": { + Type: schema.TypeString, + Optional: true, + Description: "AWS profile name", + Default: "", + }, + + "shared_credentials_file": { + Type: schema.TypeString, + Optional: true, + Description: "Path to a shared credentials file", + Default: "", + }, + + "token": { + Type: schema.TypeString, + Optional: true, + Description: "MFA token", + Default: "", + }, + + "skip_credentials_validation": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip the credentials validation via STS API.", + Default: false, + }, + + "skip_region_validation": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip static validation of region name.", + Default: false, + }, + + "skip_metadata_api_check": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip the AWS Metadata API check.", + Default: false, + }, + + "sse_customer_key": { + Type: schema.TypeString, + Optional: true, + Description: "The base64-encoded encryption key to use for server-side encryption with customer-provided keys (SSE-C).", + DefaultFunc: schema.EnvDefaultFunc("AWS_SSE_CUSTOMER_KEY", ""), + Sensitive: true, + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + key := v.(string) + if key != "" && len(key) != 44 { + return nil, []error{errors.New("sse_customer_key must be 44 characters in length (256 bits, base64 encoded)")} + } + return nil, nil + }, + }, + + "role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "The role to be assumed", + Default: "", + }, + + "session_name": { + Type: schema.TypeString, + Optional: true, + Description: "The session name to use when assuming the role.", + Default: "", + }, + + "external_id": { + Type: schema.TypeString, + Optional: true, + Description: "The external ID to use when assuming the role", + Default: "", + }, + + "assume_role_duration_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: "Seconds to restrict the assume role session duration.", + }, + + "assume_role_policy": { + Type: schema.TypeString, + Optional: true, + Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", + Default: "", + }, + + "assume_role_policy_arns": { + Type: schema.TypeSet, + Optional: true, + Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "assume_role_tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Assume role session tags.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "assume_role_transitive_tag_keys": { + Type: schema.TypeSet, + Optional: true, + Description: "Assume role session tag keys to pass to any subsequent sessions.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "workspace_key_prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The prefix applied to the non-default state path inside the bucket.", + Default: "env:", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + prefix := v.(string) + if strings.HasPrefix(prefix, "/") || strings.HasSuffix(prefix, "/") { + return nil, []error{errors.New("workspace_key_prefix must not start or end with '/'")} + } + return nil, nil + }, + }, + + "force_path_style": { + Type: schema.TypeBool, + Optional: true, + Description: "Force s3 to use path style api.", + Default: false, + }, + + "max_retries": { + Type: schema.TypeInt, + Optional: true, + Description: "The maximum number of times an AWS API request is retried on retryable failure.", + Default: 5, + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + s3Client *s3.S3 + dynClient *dynamodb.DynamoDB + + bucketName string + keyName string + serverSideEncryption bool + customerEncryptionKey []byte + acl string + kmsKeyID string + ddbTable string + workspaceKeyPrefix string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.s3Client != nil { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + + if !data.Get("skip_region_validation").(bool) { + if err := awsbase.ValidateRegion(data.Get("region").(string)); err != nil { + return err + } + } + + b.bucketName = data.Get("bucket").(string) + b.keyName = data.Get("key").(string) + b.acl = data.Get("acl").(string) + b.workspaceKeyPrefix = data.Get("workspace_key_prefix").(string) + b.serverSideEncryption = data.Get("encrypt").(bool) + b.kmsKeyID = data.Get("kms_key_id").(string) + b.ddbTable = data.Get("dynamodb_table").(string) + + customerKeyString := data.Get("sse_customer_key").(string) + if customerKeyString != "" { + if b.kmsKeyID != "" { + return errors.New(encryptionKeyConflictError) + } + + var err error + b.customerEncryptionKey, err = base64.StdEncoding.DecodeString(customerKeyString) + if err != nil { + return fmt.Errorf("Failed to decode sse_customer_key: %s", err.Error()) + } + } + + cfg := &awsbase.Config{ + AccessKey: data.Get("access_key").(string), + AssumeRoleARN: data.Get("role_arn").(string), + AssumeRoleDurationSeconds: data.Get("assume_role_duration_seconds").(int), + AssumeRoleExternalID: data.Get("external_id").(string), + AssumeRolePolicy: data.Get("assume_role_policy").(string), + AssumeRoleSessionName: data.Get("session_name").(string), + CallerDocumentationURL: "https://www.terraform.io/docs/language/settings/backends/s3.html", + CallerName: "S3 Backend", + CredsFilename: data.Get("shared_credentials_file").(string), + DebugLogging: logging.IsDebugOrHigher(), + IamEndpoint: data.Get("iam_endpoint").(string), + MaxRetries: data.Get("max_retries").(int), + Profile: data.Get("profile").(string), + Region: data.Get("region").(string), + SecretKey: data.Get("secret_key").(string), + SkipCredsValidation: data.Get("skip_credentials_validation").(bool), + SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool), + StsEndpoint: data.Get("sts_endpoint").(string), + Token: data.Get("token").(string), + UserAgentProducts: []*awsbase.UserAgentProduct{ + {Name: "APN", Version: "1.0"}, + {Name: "HashiCorp", Version: "1.0"}, + {Name: "Terraform", Version: version.String()}, + }, + } + + if policyARNSet := data.Get("assume_role_policy_arns").(*schema.Set); policyARNSet.Len() > 0 { + for _, policyARNRaw := range policyARNSet.List() { + policyARN, ok := policyARNRaw.(string) + + if !ok { + continue + } + + cfg.AssumeRolePolicyARNs = append(cfg.AssumeRolePolicyARNs, policyARN) + } + } + + if tagMap := data.Get("assume_role_tags").(map[string]interface{}); len(tagMap) > 0 { + cfg.AssumeRoleTags = make(map[string]string) + + for k, vRaw := range tagMap { + v, ok := vRaw.(string) + + if !ok { + continue + } + + cfg.AssumeRoleTags[k] = v + } + } + + if transitiveTagKeySet := data.Get("assume_role_transitive_tag_keys").(*schema.Set); transitiveTagKeySet.Len() > 0 { + for _, transitiveTagKeyRaw := range transitiveTagKeySet.List() { + transitiveTagKey, ok := transitiveTagKeyRaw.(string) + + if !ok { + continue + } + + cfg.AssumeRoleTransitiveTagKeys = append(cfg.AssumeRoleTransitiveTagKeys, transitiveTagKey) + } + } + + sess, err := awsbase.GetSession(cfg) + if err != nil { + return fmt.Errorf("error configuring S3 Backend: %w", err) + } + + b.dynClient = dynamodb.New(sess.Copy(&aws.Config{ + Endpoint: aws.String(data.Get("dynamodb_endpoint").(string)), + })) + b.s3Client = s3.New(sess.Copy(&aws.Config{ + Endpoint: aws.String(data.Get("endpoint").(string)), + S3ForcePathStyle: aws.Bool(data.Get("force_path_style").(bool)), + })) + + return nil +} + +const encryptionKeyConflictError = `Cannot have both kms_key_id and sse_customer_key set. + +The kms_key_id is used for encryption with KMS-Managed Keys (SSE-KMS) +while sse_customer_key is used for encryption with customer-managed keys (SSE-C). +Please choose one or the other.` diff --git a/backend/remote-state/s3/backend_state.go b/backend/remote-state/s3/backend_state.go new file mode 100644 index 000000000000..65f4f8363b25 --- /dev/null +++ b/backend/remote-state/s3/backend_state.go @@ -0,0 +1,221 @@ +package s3 + +import ( + "errors" + "fmt" + "path" + "sort" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +func (b *Backend) Workspaces() ([]string, error) { + const maxKeys = 1000 + + prefix := "" + + if b.workspaceKeyPrefix != "" { + prefix = b.workspaceKeyPrefix + "/" + } + + params := &s3.ListObjectsInput{ + Bucket: &b.bucketName, + Prefix: aws.String(prefix), + MaxKeys: aws.Int64(maxKeys), + } + + wss := []string{backend.DefaultStateName} + err := b.s3Client.ListObjectsPages(params, func(page *s3.ListObjectsOutput, lastPage bool) bool { + for _, obj := range page.Contents { + ws := b.keyEnv(*obj.Key) + if ws != "" { + wss = append(wss, ws) + } + } + return !lastPage + }) + + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == s3.ErrCodeNoSuchBucket { + return nil, fmt.Errorf(errS3NoSuchBucket, err) + } + + sort.Strings(wss[1:]) + return wss, nil +} + +func (b *Backend) keyEnv(key string) string { + prefix := b.workspaceKeyPrefix + + if prefix == "" { + parts := strings.SplitN(key, "/", 2) + if len(parts) > 1 && parts[1] == b.keyName { + return parts[0] + } else { + return "" + } + } + + // add a slash to treat this as a directory + prefix += "/" + + parts := strings.SplitAfterN(key, prefix, 2) + if len(parts) < 2 { + return "" + } + + // shouldn't happen since we listed by prefix + if parts[0] != prefix { + return "" + } + + parts = strings.SplitN(parts[1], "/", 2) + + if len(parts) < 2 { + return "" + } + + // not our key, so don't include it in our listing + if parts[1] != b.keyName { + return "" + } + + return parts[0] +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + + return client.Delete() +} + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + s3Client: b.s3Client, + dynClient: b.dynClient, + bucketName: b.bucketName, + path: b.path(name), + serverSideEncryption: b.serverSideEncryption, + customerEncryptionKey: b.customerEncryptionKey, + acl: b.acl, + kmsKeyID: b.kmsKeyID, + ddbTable: b.ddbTable, + } + + return client, nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + client, err := b.remoteClient(name) + if err != nil { + return nil, err + } + + stateMgr := &remote.State{Client: client} + // Check to see if this state already exists. + // If we're trying to force-unlock a state, we can't take the lock before + // fetching the state. If the state doesn't exist, we have to assume this + // is a normal create operation, and take the lock at that point. + // + // If we need to force-unlock, but for some reason the state no longer + // exists, the user will have to use aws tools to manually fix the + // situation. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + + // We need to create the object so it's listed by States. + if !exists { + // take a lock on this state while we write it + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock s3 state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + // This is to ensure that no one beat us to writing a state between + // the `exists` check and taking the lock. + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return path.Join(b.workspaceKeyPrefix, name, b.keyName) +} + +const errStateUnlock = ` +Error unlocking S3 state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/backend/remote-state/s3/backend_test.go b/backend/remote-state/s3/backend_test.go new file mode 100644 index 000000000000..918aeb4e6e88 --- /dev/null +++ b/backend/remote-state/s3/backend_test.go @@ -0,0 +1,795 @@ +package s3 + +import ( + "fmt" + "net/url" + "os" + "reflect" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/s3" + awsbase "github.com/hashicorp/aws-sdk-go-base" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" +) + +var ( + mockStsGetCallerIdentityRequestBody = url.Values{ + "Action": []string{"GetCallerIdentity"}, + "Version": []string{"2011-06-15"}, + }.Encode() +) + +// verify that we are doing ACC tests or the S3 tests specifically +func testACC(t *testing.T) { + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_S3_TEST") == "" + if skip { + t.Log("s3 backend tests require setting TF_ACC or TF_S3_TEST") + t.Skip() + } + if os.Getenv("AWS_DEFAULT_REGION") == "" { + os.Setenv("AWS_DEFAULT_REGION", "us-west-2") + } +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + testACC(t) + config := map[string]interface{}{ + "region": "us-west-1", + "bucket": "tf-test", + "key": "state", + "encrypt": true, + "dynamodb_table": "dynamoTable", + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) + + if *b.s3Client.Config.Region != "us-west-1" { + t.Fatalf("Incorrect region was populated") + } + if b.bucketName != "tf-test" { + t.Fatalf("Incorrect bucketName was populated") + } + if b.keyName != "state" { + t.Fatalf("Incorrect keyName was populated") + } + + credentials, err := b.s3Client.Config.Credentials.Get() + if err != nil { + t.Fatalf("Error when requesting credentials") + } + if credentials.AccessKeyID == "" { + t.Fatalf("No Access Key Id was populated") + } + if credentials.SecretAccessKey == "" { + t.Fatalf("No Secret Access Key was populated") + } +} + +func TestBackendConfig_AssumeRole(t *testing.T) { + testACC(t) + + testCases := []struct { + Config map[string]interface{} + Description string + MockStsEndpoints []*awsbase.MockEndpoint + }{ + { + Config: map[string]interface{}{ + "bucket": "tf-test", + "key": "state", + "region": "us-west-1", + "role_arn": awsbase.MockStsAssumeRoleArn, + "session_name": awsbase.MockStsAssumeRoleSessionName, + }, + Description: "role_arn", + MockStsEndpoints: []*awsbase.MockEndpoint{ + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, + "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_duration_seconds": 3600, + "bucket": "tf-test", + "key": "state", + "region": "us-west-1", + "role_arn": awsbase.MockStsAssumeRoleArn, + "session_name": awsbase.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_duration_seconds", + MockStsEndpoints: []*awsbase.MockEndpoint{ + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"3600"}, + "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, + "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "bucket": "tf-test", + "external_id": awsbase.MockStsAssumeRoleExternalId, + "key": "state", + "region": "us-west-1", + "role_arn": awsbase.MockStsAssumeRoleArn, + "session_name": awsbase.MockStsAssumeRoleSessionName, + }, + Description: "external_id", + MockStsEndpoints: []*awsbase.MockEndpoint{ + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "ExternalId": []string{awsbase.MockStsAssumeRoleExternalId}, + "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, + "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_policy": awsbase.MockStsAssumeRolePolicy, + "bucket": "tf-test", + "key": "state", + "region": "us-west-1", + "role_arn": awsbase.MockStsAssumeRoleArn, + "session_name": awsbase.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_policy", + MockStsEndpoints: []*awsbase.MockEndpoint{ + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "Policy": []string{awsbase.MockStsAssumeRolePolicy}, + "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, + "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_policy_arns": []interface{}{awsbase.MockStsAssumeRolePolicyArn}, + "bucket": "tf-test", + "key": "state", + "region": "us-west-1", + "role_arn": awsbase.MockStsAssumeRoleArn, + "session_name": awsbase.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_policy_arns", + MockStsEndpoints: []*awsbase.MockEndpoint{ + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "PolicyArns.member.1.arn": []string{awsbase.MockStsAssumeRolePolicyArn}, + "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, + "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_tags": map[string]interface{}{ + awsbase.MockStsAssumeRoleTagKey: awsbase.MockStsAssumeRoleTagValue, + }, + "bucket": "tf-test", + "key": "state", + "region": "us-west-1", + "role_arn": awsbase.MockStsAssumeRoleArn, + "session_name": awsbase.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_tags", + MockStsEndpoints: []*awsbase.MockEndpoint{ + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, + "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, + "Tags.member.1.Key": []string{awsbase.MockStsAssumeRoleTagKey}, + "Tags.member.1.Value": []string{awsbase.MockStsAssumeRoleTagValue}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_tags": map[string]interface{}{ + awsbase.MockStsAssumeRoleTagKey: awsbase.MockStsAssumeRoleTagValue, + }, + "assume_role_transitive_tag_keys": []interface{}{awsbase.MockStsAssumeRoleTagKey}, + "bucket": "tf-test", + "key": "state", + "region": "us-west-1", + "role_arn": awsbase.MockStsAssumeRoleArn, + "session_name": awsbase.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_transitive_tag_keys", + MockStsEndpoints: []*awsbase.MockEndpoint{ + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, + "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, + "Tags.member.1.Key": []string{awsbase.MockStsAssumeRoleTagKey}, + "Tags.member.1.Value": []string{awsbase.MockStsAssumeRoleTagValue}, + "TransitiveTagKeys.member.1": []string{awsbase.MockStsAssumeRoleTagKey}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(testCase.Description, func(t *testing.T) { + closeSts, mockStsSession, err := awsbase.GetMockedAwsApiSession("STS", testCase.MockStsEndpoints) + defer closeSts() + + if err != nil { + t.Fatalf("unexpected error creating mock STS server: %s", err) + } + + if mockStsSession != nil && mockStsSession.Config != nil { + testCase.Config["sts_endpoint"] = aws.StringValue(mockStsSession.Config.Endpoint) + } + + diags := New().Configure(hcl2shim.HCL2ValueFromConfigValue(testCase.Config)) + + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("unexpected error: %s", diag.Description().Summary) + } + } + }) + } +} + +func TestBackendConfig_invalidKey(t *testing.T) { + testACC(t) + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ + "region": "us-west-1", + "bucket": "tf-test", + "key": "/leading-slash", + "encrypt": true, + "dynamodb_table": "dynamoTable", + }) + + _, diags := New().PrepareConfig(cfg) + if !diags.HasErrors() { + t.Fatal("expected config validation error") + } + + cfg = hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ + "region": "us-west-1", + "bucket": "tf-test", + "key": "trailing-slash/", + "encrypt": true, + "dynamodb_table": "dynamoTable", + }) + + _, diags = New().PrepareConfig(cfg) + if !diags.HasErrors() { + t.Fatal("expected config validation error") + } +} + +func TestBackendConfig_invalidSSECustomerKeyLength(t *testing.T) { + testACC(t) + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ + "region": "us-west-1", + "bucket": "tf-test", + "encrypt": true, + "key": "state", + "dynamodb_table": "dynamoTable", + "sse_customer_key": "key", + }) + + _, diags := New().PrepareConfig(cfg) + if !diags.HasErrors() { + t.Fatal("expected error for invalid sse_customer_key length") + } +} + +func TestBackendConfig_invalidSSECustomerKeyEncoding(t *testing.T) { + testACC(t) + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ + "region": "us-west-1", + "bucket": "tf-test", + "encrypt": true, + "key": "state", + "dynamodb_table": "dynamoTable", + "sse_customer_key": "====CT70aTYB2JGff7AjQtwbiLkwH4npICay1PWtmdka", + }) + + diags := New().Configure(cfg) + if !diags.HasErrors() { + t.Fatal("expected error for failing to decode sse_customer_key") + } +} + +func TestBackendConfig_conflictingEncryptionSchema(t *testing.T) { + testACC(t) + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ + "region": "us-west-1", + "bucket": "tf-test", + "key": "state", + "encrypt": true, + "dynamodb_table": "dynamoTable", + "sse_customer_key": "1hwbcNPGWL+AwDiyGmRidTWAEVmCWMKbEHA+Es8w75o=", + "kms_key_id": "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + }) + + diags := New().Configure(cfg) + if !diags.HasErrors() { + t.Fatal("expected error for simultaneous usage of kms_key_id and sse_customer_key") + } +} + +func TestBackend(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + keyName := "testState" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + })).(*Backend) + + createS3Bucket(t, b.s3Client, bucketName) + defer deleteS3Bucket(t, b.s3Client, bucketName) + + backend.TestBackendStates(t, b) +} + +func TestBackendLocked(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + keyName := "test/state" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + createS3Bucket(t, b1.s3Client, bucketName) + defer deleteS3Bucket(t, b1.s3Client, bucketName) + createDynamoDBTable(t, b1.dynClient, bucketName) + defer deleteDynamoDBTable(t, b1.dynClient, bucketName) + + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) +} + +func TestBackendSSECustomerKey(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "encrypt": true, + "key": "test-SSE-C", + "sse_customer_key": "4Dm1n4rphuFgawxuzY/bEfvLf6rYK0gIjfaDSLlfXNk=", + })).(*Backend) + + createS3Bucket(t, b.s3Client, bucketName) + defer deleteS3Bucket(t, b.s3Client, bucketName) + + backend.TestBackendStates(t, b) +} + +// add some extra junk in S3 to try and confuse the env listing. +func TestBackendExtraPaths(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + keyName := "test/state/tfstate" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + })).(*Backend) + + createS3Bucket(t, b.s3Client, bucketName) + defer deleteS3Bucket(t, b.s3Client, bucketName) + + // put multiple states in old env paths. + s1 := states.NewState() + s2 := states.NewState() + + // RemoteClient to Put things in various paths + client := &RemoteClient{ + s3Client: b.s3Client, + dynClient: b.dynClient, + bucketName: b.bucketName, + path: b.path("s1"), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + kmsKeyID: b.kmsKeyID, + ddbTable: b.ddbTable, + } + + // Write the first state + stateMgr := &remote.State{Client: client} + stateMgr.WriteState(s1) + if err := stateMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + + // Write the second state + // Note a new state manager - otherwise, because these + // states are equal, the state will not Put to the remote + client.path = b.path("s2") + stateMgr2 := &remote.State{Client: client} + stateMgr2.WriteState(s2) + if err := stateMgr2.PersistState(nil); err != nil { + t.Fatal(err) + } + + s2Lineage := stateMgr2.StateSnapshotMeta().Lineage + + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // put a state in an env directory name + client.path = b.workspaceKeyPrefix + "/error" + stateMgr.WriteState(states.NewState()) + if err := stateMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // add state with the wrong key for an existing env + client.path = b.workspaceKeyPrefix + "/s2/notTestState" + stateMgr.WriteState(states.NewState()) + if err := stateMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // remove the state with extra subkey + if err := client.Delete(); err != nil { + t.Fatal(err) + } + + // delete the real workspace + if err := b.DeleteWorkspace("s2", true); err != nil { + t.Fatal(err) + } + + if err := checkStateList(b, []string{"default", "s1"}); err != nil { + t.Fatal(err) + } + + // fetch that state again, which should produce a new lineage + s2Mgr, err := b.StateMgr("s2") + if err != nil { + t.Fatal(err) + } + if err := s2Mgr.RefreshState(); err != nil { + t.Fatal(err) + } + + if s2Mgr.(*remote.State).StateSnapshotMeta().Lineage == s2Lineage { + t.Fatal("state s2 was not deleted") + } + s2 = s2Mgr.State() + s2Lineage = stateMgr.StateSnapshotMeta().Lineage + + // add a state with a key that matches an existing environment dir name + client.path = b.workspaceKeyPrefix + "/s2/" + stateMgr.WriteState(states.NewState()) + if err := stateMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + + // make sure s2 is OK + s2Mgr, err = b.StateMgr("s2") + if err != nil { + t.Fatal(err) + } + if err := s2Mgr.RefreshState(); err != nil { + t.Fatal(err) + } + + if stateMgr.StateSnapshotMeta().Lineage != s2Lineage { + t.Fatal("we got the wrong state for s2") + } + + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } +} + +// ensure we can separate the workspace prefix when it also matches the prefix +// of the workspace name itself. +func TestBackendPrefixInWorkspace(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": "test-env.tfstate", + "workspace_key_prefix": "env", + })).(*Backend) + + createS3Bucket(t, b.s3Client, bucketName) + defer deleteS3Bucket(t, b.s3Client, bucketName) + + // get a state that contains the prefix as a substring + sMgr, err := b.StateMgr("env-1") + if err != nil { + t.Fatal(err) + } + if err := sMgr.RefreshState(); err != nil { + t.Fatal(err) + } + + if err := checkStateList(b, []string{"default", "env-1"}); err != nil { + t.Fatal(err) + } +} + +func TestKeyEnv(t *testing.T) { + testACC(t) + keyName := "some/paths/tfstate" + + bucket0Name := fmt.Sprintf("terraform-remote-s3-test-%x-0", time.Now().Unix()) + b0 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucket0Name, + "key": keyName, + "encrypt": true, + "workspace_key_prefix": "", + })).(*Backend) + + createS3Bucket(t, b0.s3Client, bucket0Name) + defer deleteS3Bucket(t, b0.s3Client, bucket0Name) + + bucket1Name := fmt.Sprintf("terraform-remote-s3-test-%x-1", time.Now().Unix()) + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucket1Name, + "key": keyName, + "encrypt": true, + "workspace_key_prefix": "project/env:", + })).(*Backend) + + createS3Bucket(t, b1.s3Client, bucket1Name) + defer deleteS3Bucket(t, b1.s3Client, bucket1Name) + + bucket2Name := fmt.Sprintf("terraform-remote-s3-test-%x-2", time.Now().Unix()) + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucket2Name, + "key": keyName, + "encrypt": true, + })).(*Backend) + + createS3Bucket(t, b2.s3Client, bucket2Name) + defer deleteS3Bucket(t, b2.s3Client, bucket2Name) + + if err := testGetWorkspaceForKey(b0, "some/paths/tfstate", ""); err != nil { + t.Fatal(err) + } + + if err := testGetWorkspaceForKey(b0, "ws1/some/paths/tfstate", "ws1"); err != nil { + t.Fatal(err) + } + + if err := testGetWorkspaceForKey(b1, "project/env:/ws1/some/paths/tfstate", "ws1"); err != nil { + t.Fatal(err) + } + + if err := testGetWorkspaceForKey(b1, "project/env:/ws2/some/paths/tfstate", "ws2"); err != nil { + t.Fatal(err) + } + + if err := testGetWorkspaceForKey(b2, "env:/ws3/some/paths/tfstate", "ws3"); err != nil { + t.Fatal(err) + } + + backend.TestBackendStates(t, b0) + backend.TestBackendStates(t, b1) + backend.TestBackendStates(t, b2) +} + +func testGetWorkspaceForKey(b *Backend, key string, expected string) error { + if actual := b.keyEnv(key); actual != expected { + return fmt.Errorf("incorrect workspace for key[%q]. Expected[%q]: Actual[%q]", key, expected, actual) + } + return nil +} + +func checkStateList(b backend.Backend, expected []string) error { + states, err := b.Workspaces() + if err != nil { + return err + } + + if !reflect.DeepEqual(states, expected) { + return fmt.Errorf("incorrect states listed: %q", states) + } + return nil +} + +func createS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) { + createBucketReq := &s3.CreateBucketInput{ + Bucket: &bucketName, + } + + // Be clear about what we're doing in case the user needs to clean + // this up later. + t.Logf("creating S3 bucket %s in %s", bucketName, *s3Client.Config.Region) + _, err := s3Client.CreateBucket(createBucketReq) + if err != nil { + t.Fatal("failed to create test S3 bucket:", err) + } +} + +func deleteS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) { + warning := "WARNING: Failed to delete the test S3 bucket. It may have been left in your AWS account and may incur storage charges. (error was %s)" + + // first we have to get rid of the env objects, or we can't delete the bucket + resp, err := s3Client.ListObjects(&s3.ListObjectsInput{Bucket: &bucketName}) + if err != nil { + t.Logf(warning, err) + return + } + for _, obj := range resp.Contents { + if _, err := s3Client.DeleteObject(&s3.DeleteObjectInput{Bucket: &bucketName, Key: obj.Key}); err != nil { + // this will need cleanup no matter what, so just warn and exit + t.Logf(warning, err) + return + } + } + + if _, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{Bucket: &bucketName}); err != nil { + t.Logf(warning, err) + } +} + +// create the dynamoDB table, and wait until we can query it. +func createDynamoDBTable(t *testing.T, dynClient *dynamodb.DynamoDB, tableName string) { + createInput := &dynamodb.CreateTableInput{ + AttributeDefinitions: []*dynamodb.AttributeDefinition{ + { + AttributeName: aws.String("LockID"), + AttributeType: aws.String("S"), + }, + }, + KeySchema: []*dynamodb.KeySchemaElement{ + { + AttributeName: aws.String("LockID"), + KeyType: aws.String("HASH"), + }, + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(5), + WriteCapacityUnits: aws.Int64(5), + }, + TableName: aws.String(tableName), + } + + _, err := dynClient.CreateTable(createInput) + if err != nil { + t.Fatal(err) + } + + // now wait until it's ACTIVE + start := time.Now() + time.Sleep(time.Second) + + describeInput := &dynamodb.DescribeTableInput{ + TableName: aws.String(tableName), + } + + for { + resp, err := dynClient.DescribeTable(describeInput) + if err != nil { + t.Fatal(err) + } + + if *resp.Table.TableStatus == "ACTIVE" { + return + } + + if time.Since(start) > time.Minute { + t.Fatalf("timed out creating DynamoDB table %s", tableName) + } + + time.Sleep(3 * time.Second) + } + +} + +func deleteDynamoDBTable(t *testing.T, dynClient *dynamodb.DynamoDB, tableName string) { + params := &dynamodb.DeleteTableInput{ + TableName: aws.String(tableName), + } + _, err := dynClient.DeleteTable(params) + if err != nil { + t.Logf("WARNING: Failed to delete the test DynamoDB table %q. It has been left in your AWS account and may incur charges. (error was %s)", tableName, err) + } +} diff --git a/backend/remote-state/s3/client.go b/backend/remote-state/s3/client.go new file mode 100644 index 000000000000..e1624f04ccc6 --- /dev/null +++ b/backend/remote-state/s3/client.go @@ -0,0 +1,422 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/s3" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" +) + +// Store the last saved serial in dynamo with this suffix for consistency checks. +const ( + s3EncryptionAlgorithm = "AES256" + stateIDSuffix = "-md5" + s3ErrCodeInternalError = "InternalError" +) + +type RemoteClient struct { + s3Client *s3.S3 + dynClient *dynamodb.DynamoDB + bucketName string + path string + serverSideEncryption bool + customerEncryptionKey []byte + acl string + kmsKeyID string + ddbTable string +} + +var ( + // The amount of time we will retry a state waiting for it to match the + // expected checksum. + consistencyRetryTimeout = 10 * time.Second + + // delay when polling the state + consistencyRetryPollInterval = 2 * time.Second +) + +// test hook called when checksums don't match +var testChecksumHook func() + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + deadline := time.Now().Add(consistencyRetryTimeout) + + // If we have a checksum, and the returned payload doesn't match, we retry + // up until deadline. + for { + payload, err = c.get() + if err != nil { + return nil, err + } + + // If the remote state was manually removed the payload will be nil, + // but if there's still a digest entry for that state we will still try + // to compare the MD5 below. + var digest []byte + if payload != nil { + digest = payload.MD5 + } + + // verify that this state is what we expect + if expected, err := c.getMD5(); err != nil { + log.Printf("[WARN] failed to fetch state md5: %s", err) + } else if len(expected) > 0 && !bytes.Equal(expected, digest) { + log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) + + if testChecksumHook != nil { + testChecksumHook() + } + + if time.Now().Before(deadline) { + time.Sleep(consistencyRetryPollInterval) + log.Println("[INFO] retrying S3 RemoteClient.Get...") + continue + } + + return nil, fmt.Errorf(errBadChecksumFmt, digest) + } + + break + } + + return payload, err +} + +func (c *RemoteClient) get() (*remote.Payload, error) { + var output *s3.GetObjectOutput + var err error + + input := &s3.GetObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + } + + if c.serverSideEncryption && c.customerEncryptionKey != nil { + input.SetSSECustomerKey(string(c.customerEncryptionKey)) + input.SetSSECustomerAlgorithm(s3EncryptionAlgorithm) + input.SetSSECustomerKeyMD5(c.getSSECustomerKeyMD5()) + } + + output, err = c.s3Client.GetObject(input) + + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + switch awserr.Code() { + case s3.ErrCodeNoSuchBucket: + return nil, fmt.Errorf(errS3NoSuchBucket, err) + case s3.ErrCodeNoSuchKey: + return nil, nil + } + } + return nil, err + } + + defer output.Body.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + sum := md5.Sum(buf.Bytes()) + payload := &remote.Payload{ + Data: buf.Bytes(), + MD5: sum[:], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + contentType := "application/json" + contentLength := int64(len(data)) + + i := &s3.PutObjectInput{ + ContentType: &contentType, + ContentLength: &contentLength, + Body: bytes.NewReader(data), + Bucket: &c.bucketName, + Key: &c.path, + } + + if c.serverSideEncryption { + if c.kmsKeyID != "" { + i.SSEKMSKeyId = &c.kmsKeyID + i.ServerSideEncryption = aws.String("aws:kms") + } else if c.customerEncryptionKey != nil { + i.SetSSECustomerKey(string(c.customerEncryptionKey)) + i.SetSSECustomerAlgorithm(s3EncryptionAlgorithm) + i.SetSSECustomerKeyMD5(c.getSSECustomerKeyMD5()) + } else { + i.ServerSideEncryption = aws.String(s3EncryptionAlgorithm) + } + } + + if c.acl != "" { + i.ACL = aws.String(c.acl) + } + + log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) + + _, err := c.s3Client.PutObject(i) + if err != nil { + return fmt.Errorf("failed to upload state: %s", err) + } + + sum := md5.Sum(data) + if err := c.putMD5(sum[:]); err != nil { + // if this errors out, we unfortunately have to error out altogether, + // since the next Get will inevitably fail. + return fmt.Errorf("failed to store state MD5: %s", err) + + } + + return nil +} + +func (c *RemoteClient) Delete() error { + _, err := c.s3Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + }) + + if err != nil { + return err + } + + if err := c.deleteMD5(); err != nil { + log.Printf("error deleting state md5: %s", err) + } + + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + if c.ddbTable == "" { + return "", nil + } + + info.Path = c.lockPath() + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + putParams := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + "Info": {S: aws.String(string(info.Marshal()))}, + }, + TableName: aws.String(c.ddbTable), + ConditionExpression: aws.String("attribute_not_exists(LockID)"), + } + _, err := c.dynClient.PutItem(putParams) + + if err != nil { + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + lockErr := &statemgr.LockError{ + Err: err, + Info: lockInfo, + } + return "", lockErr + } + + return info.ID, nil +} + +func (c *RemoteClient) getMD5() ([]byte, error) { + if c.ddbTable == "" { + return nil, nil + } + + getParams := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + }, + ProjectionExpression: aws.String("LockID, Digest"), + TableName: aws.String(c.ddbTable), + ConsistentRead: aws.Bool(true), + } + + resp, err := c.dynClient.GetItem(getParams) + if err != nil { + return nil, err + } + + var val string + if v, ok := resp.Item["Digest"]; ok && v.S != nil { + val = *v.S + } + + sum, err := hex.DecodeString(val) + if err != nil || len(sum) != md5.Size { + return nil, errors.New("invalid md5") + } + + return sum, nil +} + +// store the hash of the state so that clients can check for stale state files. +func (c *RemoteClient) putMD5(sum []byte) error { + if c.ddbTable == "" { + return nil + } + + if len(sum) != md5.Size { + return errors.New("invalid payload md5") + } + + putParams := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + "Digest": {S: aws.String(hex.EncodeToString(sum))}, + }, + TableName: aws.String(c.ddbTable), + } + _, err := c.dynClient.PutItem(putParams) + if err != nil { + log.Printf("[WARN] failed to record state serial in dynamodb: %s", err) + } + + return nil +} + +// remove the hash value for a deleted state +func (c *RemoteClient) deleteMD5() error { + if c.ddbTable == "" { + return nil + } + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + }, + TableName: aws.String(c.ddbTable), + } + if _, err := c.dynClient.DeleteItem(params); err != nil { + return err + } + return nil +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + getParams := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + }, + ProjectionExpression: aws.String("LockID, Info"), + TableName: aws.String(c.ddbTable), + ConsistentRead: aws.Bool(true), + } + + resp, err := c.dynClient.GetItem(getParams) + if err != nil { + return nil, err + } + + var infoData string + if v, ok := resp.Item["Info"]; ok && v.S != nil { + infoData = *v.S + } + + lockInfo := &statemgr.LockInfo{} + err = json.Unmarshal([]byte(infoData), lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +func (c *RemoteClient) Unlock(id string) error { + if c.ddbTable == "" { + return nil + } + + lockErr := &statemgr.LockError{} + + // TODO: store the path and lock ID in separate fields, and have proper + // projection expression only delete the lock if both match, rather than + // checking the ID from the info field first. + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + }, + TableName: aws.String(c.ddbTable), + } + _, err = c.dynClient.DeleteItem(params) + + if err != nil { + lockErr.Err = err + return lockErr + } + return nil +} + +func (c *RemoteClient) lockPath() string { + return fmt.Sprintf("%s/%s", c.bucketName, c.path) +} + +func (c *RemoteClient) getSSECustomerKeyMD5() string { + b := md5.Sum(c.customerEncryptionKey) + return base64.StdEncoding.EncodeToString(b[:]) +} + +const errBadChecksumFmt = `state data in S3 does not have the expected content. + +This may be caused by unusually long delays in S3 processing a previous state +update. Please wait for a minute or two and try again. If this problem +persists, and neither S3 nor DynamoDB are experiencing an outage, you may need +to manually verify the remote state and update the Digest value stored in the +DynamoDB table to the following value: %x +` + +const errS3NoSuchBucket = `S3 bucket does not exist. + +The referenced S3 bucket must have been previously created. If the S3 bucket +was created within the last minute, please wait for a minute or two and try +again. + +Error: %s +` diff --git a/backend/remote-state/s3/client_test.go b/backend/remote-state/s3/client_test.go new file mode 100644 index 000000000000..c18f99e3cfa6 --- /dev/null +++ b/backend/remote-state/s3/client_test.go @@ -0,0 +1,317 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + keyName := "testState" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + })).(*Backend) + + createS3Bucket(t, b.s3Client, bucketName) + defer deleteS3Bucket(t, b.s3Client, bucketName) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientLocks(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + keyName := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + createS3Bucket(t, b1.s3Client, bucketName) + defer deleteS3Bucket(t, b1.s3Client, bucketName) + createDynamoDBTable(t, b1.dynClient, bucketName) + defer deleteDynamoDBTable(t, b1.dynClient, bucketName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +// verify that we can unlock a state with an existing lock +func TestForceUnlock(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-remote-s3-test-force-%x", time.Now().Unix()) + keyName := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + createS3Bucket(t, b1.s3Client, bucketName) + defer deleteS3Bucket(t, b1.s3Client, bucketName) + createDynamoDBTable(t, b1.dynClient, bucketName) + defer deleteDynamoDBTable(t, b1.dynClient, bucketName) + + // first test with default + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err := s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal("failed to get default state to force unlock:", err) + } + + if err := s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock default state") + } + + // now try the same thing with a named state + // first test with default + s1, err = b1.StateMgr("test") + if err != nil { + t.Fatal(err) + } + + info = statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err = s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err = b2.StateMgr("test") + if err != nil { + t.Fatal("failed to get named state to force unlock:", err) + } + + if err = s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock named state") + } +} + +func TestRemoteClient_clientMD5(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + keyName := "testState" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "dynamodb_table": bucketName, + })).(*Backend) + + createS3Bucket(t, b.s3Client, bucketName) + defer deleteS3Bucket(t, b.s3Client, bucketName) + createDynamoDBTable(t, b.dynClient, bucketName) + defer deleteDynamoDBTable(t, b.dynClient, bucketName) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client := s.(*remote.State).Client.(*RemoteClient) + + sum := md5.Sum([]byte("test")) + + if err := client.putMD5(sum[:]); err != nil { + t.Fatal(err) + } + + getSum, err := client.getMD5() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(getSum, sum[:]) { + t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum) + } + + if err := client.deleteMD5(); err != nil { + t.Fatal(err) + } + + if getSum, err := client.getMD5(); err == nil { + t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum) + } +} + +// verify that a client won't return a state with an incorrect checksum. +func TestRemoteClient_stateChecksum(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + keyName := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "dynamodb_table": bucketName, + })).(*Backend) + + createS3Bucket(t, b1.s3Client, bucketName) + defer deleteS3Bucket(t, b1.s3Client, bucketName) + createDynamoDBTable(t, b1.dynClient, bucketName) + defer deleteDynamoDBTable(t, b1.dynClient, bucketName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client1 := s1.(*remote.State).Client + + // create an old and new state version to persist + s := statemgr.TestFullInitialState() + sf := &statefile.File{State: s} + var oldState bytes.Buffer + if err := statefile.Write(sf, &oldState); err != nil { + t.Fatal(err) + } + sf.Serial++ + var newState bytes.Buffer + if err := statefile.Write(sf, &newState); err != nil { + t.Fatal(err) + } + + // Use b2 without a dynamodb_table to bypass the lock table to write the state directly. + // client2 will write the "incorrect" state, simulating s3 eventually consistency delays + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + })).(*Backend) + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client2 := s2.(*remote.State).Client + + // write the new state through client2 so that there is no checksum yet + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // verify that we can pull a state without a checksum + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } + + // write the new state back with its checksum + if err := client1.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // put an empty state in place to check for panics during get + if err := client2.Put([]byte{}); err != nil { + t.Fatal(err) + } + + // remove the timeouts so we can fail immediately + origTimeout := consistencyRetryTimeout + origInterval := consistencyRetryPollInterval + defer func() { + consistencyRetryTimeout = origTimeout + consistencyRetryPollInterval = origInterval + }() + consistencyRetryTimeout = 0 + consistencyRetryPollInterval = 0 + + // fetching an empty state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // put the old state in place of the new, without updating the checksum + if err := client2.Put(oldState.Bytes()); err != nil { + t.Fatal(err) + } + + // fetching the wrong state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // update the state with the correct one after we Get again + testChecksumHook = func() { + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + testChecksumHook = nil + } + + consistencyRetryTimeout = origTimeout + + // this final Get will fail to fail the checksum verification, the above + // callback will update the state with the correct version, and Get should + // retry automatically. + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } +} diff --git a/backend/remote/backend.go b/backend/remote/backend.go new file mode 100644 index 000000000000..b0874de27ddf --- /dev/null +++ b/backend/remote/backend.go @@ -0,0 +1,1079 @@ +package remote + +import ( + "context" + "fmt" + "log" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + "time" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" +) + +const ( + defaultHostname = "app.terraform.io" + defaultParallelism = 10 + stateServiceID = "state.v2" + tfeServiceID = "tfe.v2.1" + genericHostname = "localterraform.com" +) + +// Remote is an implementation of EnhancedBackend that performs all +// operations in a remote backend. +type Remote struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ContextOpts are the base context options to set when initializing a + // new Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // client is the remote backend API client. + client *tfe.Client + + // lastRetry is set to the last time a request was retried. + lastRetry time.Time + + // hostname of the remote backend server. + hostname string + + // organization is the organization that contains the target workspaces. + organization string + + // workspace is used to map the default workspace to a remote workspace. + workspace string + + // prefix is used to filter down a set of workspaces that use a single + // configuration. + prefix string + + // services is used for service discovery + services *disco.Disco + + // local, if non-nil, will be used for all enhanced behavior. This + // allows local behavior with the remote backend functioning as remote + // state storage backend. + local backend.Enhanced + + // forceLocal, if true, will force the use of the local backend. + forceLocal bool + + // opLock locks operations + opLock sync.Mutex + + // ignoreVersionConflict, if true, will disable the requirement that the + // local Terraform version matches the remote workspace's configured + // version. This will also cause VerifyWorkspaceTerraformVersion to return + // a warning diagnostic instead of an error. + ignoreVersionConflict bool +} + +var _ backend.Backend = (*Remote)(nil) +var _ backend.Enhanced = (*Remote)(nil) +var _ backend.Local = (*Remote)(nil) + +// New creates a new initialized remote backend. +func New(services *disco.Disco) *Remote { + return &Remote{ + services: services, + } +} + +// ConfigSchema implements backend.Enhanced. +func (b *Remote) ConfigSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "hostname": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["hostname"], + }, + "organization": { + Type: cty.String, + Required: true, + Description: schemaDescriptions["organization"], + }, + "token": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["token"], + }, + }, + + BlockTypes: map[string]*configschema.NestedBlock{ + "workspaces": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["name"], + }, + "prefix": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["prefix"], + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + } +} + +// PrepareConfig implements backend.Backend. +func (b *Remote) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return obj, diags + } + + if val := obj.GetAttr("organization"); val.IsNull() || val.AsString() == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid organization value", + `The "organization" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + } + + var name, prefix string + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + name = val.AsString() + } + if val := workspaces.GetAttr("prefix"); !val.IsNull() { + prefix = val.AsString() + } + } + + // Make sure that we have either a workspace name or a prefix. + if name == "" && prefix == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + `Either workspace "name" or "prefix" is required.`, + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + )) + } + + // Make sure that only one of workspace name or a prefix is configured. + if name != "" && prefix != "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + `Only one of workspace "name" or "prefix" is allowed.`, + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + )) + } + + return obj, diags +} + +// configureGenericHostname aliases the remote backend hostname configuration +// as a generic "localterraform.com" hostname. This was originally added as a +// Terraform Enterprise feature and is useful for re-using whatever the +// Cloud/Enterprise backend host is in nested module sources in order +// to prevent code churn when re-using config between multiple +// Terraform Enterprise environments. +func (b *Remote) configureGenericHostname() { + // This won't be an error for the given constant value + genericHost, _ := svchost.ForComparison(genericHostname) + + // This won't be an error because, by this time, the hostname has been parsed and + // service discovery requests made against it. + targetHost, _ := svchost.ForComparison(b.hostname) + + b.services.Alias(genericHost, targetHost) +} + +// Configure implements backend.Enhanced. +func (b *Remote) Configure(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return diags + } + + // Get the hostname. + if val := obj.GetAttr("hostname"); !val.IsNull() && val.AsString() != "" { + b.hostname = val.AsString() + } else { + b.hostname = defaultHostname + } + + // Get the organization. + if val := obj.GetAttr("organization"); !val.IsNull() { + b.organization = val.AsString() + } + + // Get the workspaces configuration block and retrieve the + // default workspace name and prefix. + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + b.workspace = val.AsString() + } + if val := workspaces.GetAttr("prefix"); !val.IsNull() { + b.prefix = val.AsString() + } + } + + // Determine if we are forced to use the local backend. + b.forceLocal = os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" + + serviceID := tfeServiceID + if b.forceLocal { + serviceID = stateServiceID + } + + // Discover the service URL for this host to confirm that it provides + // a remote backend API and to get the version constraints. + service, constraints, err := b.discover(serviceID) + + // First check any contraints we might have received. + if constraints != nil { + diags = diags.Append(b.checkConstraints(constraints)) + if diags.HasErrors() { + return diags + } + } + + // When we don't have any constraints errors, also check for discovery + // errors before we continue. + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + + // Get the token from the config. + var token string + if val := obj.GetAttr("token"); !val.IsNull() { + token = val.AsString() + } + + // Retrieve the token for this host as configured in the credentials + // section of the CLI Config File if no token was configured for this + // host in the config. + if token == "" { + token, err = b.token() + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + } + + // Return an error if we still don't have a token at this point. + if token == "" { + loginCommand := "terraform login" + if b.hostname != defaultHostname { + loginCommand = loginCommand + " " + b.hostname + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required token could not be found", + fmt.Sprintf( + "Run the following command to generate a token for %s:\n %s", + b.hostname, + loginCommand, + ), + )) + return diags + } + + b.configureGenericHostname() + + cfg := &tfe.Config{ + Address: service.String(), + BasePath: service.Path, + Token: token, + Headers: make(http.Header), + RetryLogHook: b.retryLogHook, + } + + // Set the version header to the current version. + cfg.Headers.Set(tfversion.Header, tfversion.Version) + + // Create the remote backend API client. + b.client, err = tfe.NewClient(cfg) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create the Terraform Enterprise client", + fmt.Sprintf( + `The "remote" backend encountered an unexpected error while creating the `+ + `Terraform Enterprise client: %s.`, err, + ), + )) + return diags + } + + // Check if the organization exists by reading its entitlements. + entitlements, err := b.client.Organizations.ReadEntitlements(context.Background(), b.organization) + if err != nil { + if err == tfe.ErrResourceNotFound { + err = fmt.Errorf("organization %q at host %s not found.\n\n"+ + "Please ensure that the organization and hostname are correct "+ + "and that your API token for %s is valid.", + b.organization, b.hostname, b.hostname) + } + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + fmt.Sprintf("Failed to read organization %q at host %s", b.organization, b.hostname), + fmt.Sprintf("The \"remote\" backend encountered an unexpected error while reading the "+ + "organization settings: %s", err), + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + return diags + } + + // Configure a local backend for when we need to run operations locally. + b.local = backendLocal.NewWithBackend(b) + b.forceLocal = b.forceLocal || !entitlements.Operations + + // Enable retries for server errors as the backend is now fully configured. + b.client.RetryServerErrors(true) + + return diags +} + +// discover the remote backend API service URL and version constraints. +func (b *Remote) discover(serviceID string) (*url.URL, *disco.Constraints, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return nil, nil, err + } + + host, err := b.services.Discover(hostname) + if err != nil { + return nil, nil, err + } + + service, err := host.ServiceURL(serviceID) + // Return the error, unless its a disco.ErrVersionNotSupported error. + if _, ok := err.(*disco.ErrVersionNotSupported); !ok && err != nil { + return nil, nil, err + } + + // We purposefully ignore the error and return the previous error, as + // checking for version constraints is considered optional. + constraints, _ := host.VersionConstraints(serviceID, "terraform") + + return service, constraints, err +} + +// checkConstraints checks service version constrains against our own +// version and returns rich and informational diagnostics in case any +// incompatibilities are detected. +func (b *Remote) checkConstraints(c *disco.Constraints) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if c == nil || c.Minimum == "" || c.Maximum == "" { + return diags + } + + // Generate a parsable constraints string. + excluding := "" + if len(c.Excluding) > 0 { + excluding = fmt.Sprintf(", != %s", strings.Join(c.Excluding, ", != ")) + } + constStr := fmt.Sprintf(">= %s%s, <= %s", c.Minimum, excluding, c.Maximum) + + // Create the constraints to check against. + constraints, err := version.NewConstraint(constStr) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + // Create the version to check. + v, err := version.NewVersion(tfversion.Version) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + // Return if we satisfy all constraints. + if constraints.Check(v) { + return diags + } + + // Find out what action (upgrade/downgrade) we should advice. + minimum, err := version.NewVersion(c.Minimum) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + maximum, err := version.NewVersion(c.Maximum) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + var excludes []*version.Version + for _, exclude := range c.Excluding { + v, err := version.NewVersion(exclude) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + excludes = append(excludes, v) + } + + // Sort all the excludes. + sort.Sort(version.Collection(excludes)) + + var action, toVersion string + switch { + case minimum.GreaterThan(v): + action = "upgrade" + toVersion = ">= " + minimum.String() + case maximum.LessThan(v): + action = "downgrade" + toVersion = "<= " + maximum.String() + case len(excludes) > 0: + // Get the latest excluded version. + action = "upgrade" + toVersion = "> " + excludes[len(excludes)-1].String() + } + + switch { + case len(excludes) == 1: + excluding = fmt.Sprintf(", excluding version %s", excludes[0].String()) + case len(excludes) > 1: + var vs []string + for _, v := range excludes { + vs = append(vs, v.String()) + } + excluding = fmt.Sprintf(", excluding versions %s", strings.Join(vs, ", ")) + default: + excluding = "" + } + + summary := fmt.Sprintf("Incompatible Terraform version v%s", v.String()) + details := fmt.Sprintf( + "The configured Terraform Enterprise backend is compatible with Terraform "+ + "versions >= %s, <= %s%s.", c.Minimum, c.Maximum, excluding, + ) + + if action != "" && toVersion != "" { + summary = fmt.Sprintf("Please %s Terraform to %s", action, toVersion) + details += fmt.Sprintf(" Please %s to a supported version and try again.", action) + } + + // Return the customized and informational error message. + return diags.Append(tfdiags.Sourceless(tfdiags.Error, summary, details)) +} + +// token returns the token for this host as configured in the credentials +// section of the CLI Config File. If no token was configured, an empty +// string will be returned instead. +func (b *Remote) token() (string, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return "", err + } + creds, err := b.services.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", b.hostname, err) + return "", nil + } + if creds != nil { + return creds.Token(), nil + } + return "", nil +} + +// retryLogHook is invoked each time a request is retried allowing the +// backend to log any connection issues to prevent data loss. +func (b *Remote) retryLogHook(attemptNum int, resp *http.Response) { + if b.CLI != nil { + // Ignore the first retry to make sure any delayed output will + // be written to the console before we start logging retries. + // + // The retry logic in the TFE client will retry both rate limited + // requests and server errors, but in the remote backend we only + // care about server errors so we ignore rate limit (429) errors. + if attemptNum == 0 || (resp != nil && resp.StatusCode == 429) { + // Reset the last retry time. + b.lastRetry = time.Now() + return + } + + if attemptNum == 1 { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(initialRetryError))) + } else { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace( + fmt.Sprintf(repeatedRetryError, time.Since(b.lastRetry).Round(time.Second))))) + } + } +} + +// Workspaces implements backend.Enhanced. +func (b *Remote) Workspaces() ([]string, error) { + if b.prefix == "" { + return nil, backend.ErrWorkspacesNotSupported + } + return b.workspaces() +} + +// workspaces returns a filtered list of remote workspace names. +func (b *Remote) workspaces() ([]string, error) { + options := &tfe.WorkspaceListOptions{} + switch { + case b.workspace != "": + options.Search = b.workspace + case b.prefix != "": + options.Search = b.prefix + } + + // Create a slice to contain all the names. + var names []string + + for { + wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) + if err != nil { + return nil, err + } + + for _, w := range wl.Items { + if b.workspace != "" && w.Name == b.workspace { + names = append(names, backend.DefaultStateName) + continue + } + if b.prefix != "" && strings.HasPrefix(w.Name, b.prefix) { + names = append(names, strings.TrimPrefix(w.Name, b.prefix)) + } + } + + // Exit the loop when we've seen all pages. + if wl.CurrentPage >= wl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = wl.NextPage + } + + // Sort the result so we have consistent output. + sort.StringSlice(names).Sort() + + return names, nil +} + +// WorkspaceNamePattern provides an appropriate workspace renaming pattern for backend migration +// purposes (handled outside of this package), based on previous usage of this backend with the +// 'prefix' workspace functionality. As of this writing, see meta_backend.migrate.go +func (b *Remote) WorkspaceNamePattern() string { + if b.prefix != "" { + return b.prefix + "*" + } + + return "" +} + +// DeleteWorkspace implements backend.Enhanced. +func (b *Remote) DeleteWorkspace(name string, _ bool) error { + if b.workspace == "" && name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { + return backend.ErrWorkspacesNotSupported + } + + // Configure the remote workspace name. + switch { + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name + } + + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: &tfe.Workspace{ + Name: name, + }, + } + + return client.Delete() +} + +// StateMgr implements backend.Enhanced. +func (b *Remote) StateMgr(name string) (statemgr.Full, error) { + if b.workspace == "" && name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + // Configure the remote workspace name. + switch { + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name + } + + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) + if err != nil && err != tfe.ErrResourceNotFound { + return nil, fmt.Errorf("Failed to retrieve workspace %s: %v", name, err) + } + + if err == tfe.ErrResourceNotFound { + options := tfe.WorkspaceCreateOptions{ + Name: tfe.String(name), + } + + // We only set the Terraform Version for the new workspace if this is + // a release candidate or a final release. + if tfversion.Prerelease == "" || strings.HasPrefix(tfversion.Prerelease, "rc") { + options.TerraformVersion = tfe.String(tfversion.String()) + } + + workspace, err = b.client.Workspaces.Create(context.Background(), b.organization, options) + if err != nil { + return nil, fmt.Errorf("Error creating workspace %s: %v", name, err) + } + } + + // This is a fallback error check. Most code paths should use other + // mechanisms to check the version, then set the ignoreVersionConflict + // field to true. This check is only in place to ensure that we don't + // accidentally upgrade state with a new code path, and the version check + // logic is coarser and simpler. + if !b.ignoreVersionConflict { + wsv := workspace.TerraformVersion + // Explicitly ignore the pseudo-version "latest" here, as it will cause + // plan and apply to always fail. + if wsv != tfversion.String() && wsv != "latest" { + return nil, fmt.Errorf("Remote workspace Terraform version %q does not match local Terraform version %q", workspace.TerraformVersion, tfversion.String()) + } + } + + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: workspace, + + // This is optionally set during Terraform Enterprise runs. + runID: os.Getenv("TFE_RUN_ID"), + } + + return &remote.State{Client: client}, nil +} + +func isLocalExecutionMode(execMode string) bool { + return execMode == "local" +} + +func (b *Remote) fetchWorkspace(ctx context.Context, organization string, name string) (*tfe.Workspace, error) { + remoteWorkspaceName := b.getRemoteWorkspaceName(name) + // Retrieve the workspace for this operation. + w, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) + if err != nil { + switch err { + case context.Canceled: + return nil, err + case tfe.ErrResourceNotFound: + return nil, fmt.Errorf( + "workspace %s not found\n\n"+ + "The configured \"remote\" backend returns '404 Not Found' errors for resources\n"+ + "that do not exist, as well as for resources that a user doesn't have access\n"+ + "to. If the resource does exist, please check the rights for the used token", + name, + ) + default: + err := fmt.Errorf( + "the configured \"remote\" backend encountered an unexpected error:\n\n%s", + err, + ) + return nil, err + } + } + + return w, nil +} + +// Operation implements backend.Enhanced. +func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + w, err := b.fetchWorkspace(ctx, b.organization, op.Workspace) + + if err != nil { + return nil, err + } + + // Terraform remote version conflicts are not a concern for operations. We + // are in one of three states: + // + // - Running remotely, in which case the local version is irrelevant; + // - Workspace configured for local operations, in which case the remote + // version is meaningless; + // - Forcing local operations with a remote backend, which should only + // happen in the Terraform Cloud worker, in which case the Terraform + // versions by definition match. + b.IgnoreVersionConflict() + + // Check if we need to use the local backend to run the operation. + if b.forceLocal || isLocalExecutionMode(w.ExecutionMode) { + // Record that we're forced to run operations locally to allow the + // command package UI to operate correctly + b.forceLocal = true + log.Printf("[DEBUG] Remote backend is delegating %s to the local backend", op.Type) + return b.local.Operation(ctx, op) + } + + // Set the remote workspace name. + op.Workspace = w.Name + + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation, *tfe.Workspace) (*tfe.Run, error) + switch op.Type { + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + case backend.OperationTypeRefresh: + return nil, fmt.Errorf( + "\n\nThe \"refresh\" operation is not supported when using the \"remote\" backend. " + + "Use \"terraform apply -refresh-only\" instead.") + default: + return nil, fmt.Errorf( + "\n\nThe \"remote\" backend does not support the %q operation.", op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + PlanEmpty: true, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + // Do it. + go func() { + defer logging.PanicHandler() + defer done() + defer stop() + defer cancel() + + defer b.opLock.Unlock() + + r, opErr := f(stopCtx, cancelCtx, op, w) + if opErr != nil && opErr != context.Canceled { + var diags tfdiags.Diagnostics + diags = diags.Append(opErr) + op.ReportResult(runningOp, diags) + return + } + + if r == nil && opErr == context.Canceled { + runningOp.Result = backend.OperationFailure + return + } + + if r != nil { + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + var diags tfdiags.Diagnostics + diags = diags.Append(generalError("Failed to retrieve run", err)) + op.ReportResult(runningOp, diags) + return + } + + // Record if there are any changes. + runningOp.PlanEmpty = !r.HasChanges + + if opErr == context.Canceled { + if err := b.cancel(cancelCtx, op, r); err != nil { + var diags tfdiags.Diagnostics + diags = diags.Append(generalError("Failed to retrieve run", err)) + op.ReportResult(runningOp, diags) + return + } + } + + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + runningOp.Result = backend.OperationFailure + } + } + }() + + // Return the running operation. + return runningOp, nil +} + +func (b *Remote) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.Actions.IsCancelable { + // Only ask if the remote operation should be canceled + // if the auto approve flag is not set. + if !op.AutoApprove { + v, err := op.UIIn.Input(cancelCtx, &terraform.InputOpts{ + Id: "cancel", + Query: "\nDo you want to cancel the remote operation?", + Description: "Only 'yes' will be accepted to cancel.", + }) + if err != nil { + return generalError("Failed asking to cancel", err) + } + if v != "yes" { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled))) + } + return nil + } + } else { + if b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + } + + // Try to cancel the remote operation. + err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{}) + if err != nil { + return generalError("Failed to cancel run", err) + } + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled))) + } + } + + return nil +} + +// IgnoreVersionConflict allows commands to disable the fall-back check that +// the local Terraform version matches the remote workspace's configured +// Terraform version. This should be called by commands where this check is +// unnecessary, such as those performing remote operations, or read-only +// operations. It will also be called if the user uses a command-line flag to +// override this check. +func (b *Remote) IgnoreVersionConflict() { + b.ignoreVersionConflict = true +} + +// VerifyWorkspaceTerraformVersion compares the local Terraform version against +// the workspace's configured Terraform version. If they are equal, this means +// that there are no compatibility concerns, so it returns no diagnostics. +// +// If the versions differ, +func (b *Remote) VerifyWorkspaceTerraformVersion(workspaceName string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + workspace, err := b.getRemoteWorkspace(context.Background(), workspaceName) + if err != nil { + // If the workspace doesn't exist, there can be no compatibility + // problem, so we can return. This is most likely to happen when + // migrating state from a local backend to a new workspace. + if err == tfe.ErrResourceNotFound { + return nil + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error looking up workspace", + fmt.Sprintf("Workspace read failed: %s", err), + )) + return diags + } + + // If the workspace has the pseudo-version "latest", all bets are off. We + // cannot reasonably determine what the intended Terraform version is, so + // we'll skip version verification. + if workspace.TerraformVersion == "latest" { + return nil + } + + // If the workspace has remote operations disabled, the remote Terraform + // version is effectively meaningless, so we'll skip version verification. + if isLocalExecutionMode(workspace.ExecutionMode) { + return nil + } + + remoteVersion, err := version.NewSemver(workspace.TerraformVersion) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error looking up workspace", + fmt.Sprintf("Invalid Terraform version: %s", err), + )) + return diags + } + + v014 := version.Must(version.NewSemver("0.14.0")) + if tfversion.SemVer.LessThan(v014) || remoteVersion.LessThan(v014) { + // Versions of Terraform prior to 0.14.0 will refuse to load state files + // written by a newer version of Terraform, even if it is only a patch + // level difference. As a result we require an exact match. + if tfversion.SemVer.Equal(remoteVersion) { + return diags + } + } + if tfversion.SemVer.GreaterThanOrEqual(v014) && remoteVersion.GreaterThanOrEqual(v014) { + // Versions of Terraform after 0.14.0 should be compatible with each + // other. At the time this code was written, the only constraints we + // are aware of are: + // + // - 0.14.0 is guaranteed to be compatible with versions up to but not + // including 1.3.0 + v130 := version.Must(version.NewSemver("1.3.0")) + if tfversion.SemVer.LessThan(v130) && remoteVersion.LessThan(v130) { + return diags + } + // - Any new Terraform state version will require at least minor patch + // increment, so x.y.* will always be compatible with each other + tfvs := tfversion.SemVer.Segments64() + rwvs := remoteVersion.Segments64() + if len(tfvs) == 3 && len(rwvs) == 3 && tfvs[0] == rwvs[0] && tfvs[1] == rwvs[1] { + return diags + } + } + + // Even if ignoring version conflicts, it may still be useful to call this + // method and warn the user about a mismatch between the local and remote + // Terraform versions. + severity := tfdiags.Error + if b.ignoreVersionConflict { + severity = tfdiags.Warning + } + + suggestion := " If you're sure you want to upgrade the state, you can force Terraform to continue using the -ignore-remote-version flag. This may result in an unusable workspace." + if b.ignoreVersionConflict { + suggestion = "" + } + diags = diags.Append(tfdiags.Sourceless( + severity, + "Terraform version mismatch", + fmt.Sprintf( + "The local Terraform version (%s) does not match the configured version for remote workspace %s/%s (%s).%s", + tfversion.String(), + b.organization, + workspace.Name, + workspace.TerraformVersion, + suggestion, + ), + )) + + return diags +} + +func (b *Remote) IsLocalOperations() bool { + return b.forceLocal +} + +func generalError(msg string, err error) error { + var diags tfdiags.Diagnostics + + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + + switch err { + case context.Canceled: + return err + case tfe.ErrResourceNotFound: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `The configured "remote" backend returns '404 Not Found' errors for resources `+ + `that do not exist, as well as for resources that a user doesn't have access `+ + `to. If the resource does exist, please check the rights for the used token.`, + )) + return diags.Err() + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `The configured "remote" backend encountered an unexpected error. Sometimes `+ + `this is caused by network connection problems, in which case you could retry `+ + `the command. If the issue persists please open a support ticket to get help `+ + `resolving the problem.`, + )) + return diags.Err() + } +} + +func checkConstraintsWarning(err error) tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Warning, + fmt.Sprintf("Failed to check version constraints: %v", err), + "Checking version constraints is considered optional, but this is an"+ + "unexpected error which should be reported.", + ) +} + +// The newline in this error is to make it look good in the CLI! +const initialRetryError = ` +[reset][yellow]There was an error connecting to the remote backend. Please do not exit +Terraform to prevent data loss! Trying to restore the connection... +[reset] +` + +const repeatedRetryError = ` +[reset][yellow]Still trying to restore the connection... (%s elapsed)[reset] +` + +const operationCanceled = ` +[reset][red]The remote operation was successfully cancelled.[reset] +` + +const operationNotCanceled = ` +[reset][red]The remote operation was not cancelled.[reset] +` + +var schemaDescriptions = map[string]string{ + "hostname": "The remote backend hostname to connect to (defaults to app.terraform.io).", + "organization": "The name of the organization containing the targeted workspace(s).", + "token": "The token used to authenticate with the remote backend. If credentials for the\n" + + "host are configured in the CLI Config File, then those will be used instead.", + "name": "A workspace name used to map the default workspace to a named remote workspace.\n" + + "When configured only the default workspace can be used. This option conflicts\n" + + "with \"prefix\"", + "prefix": "A prefix used to filter workspaces using a single configuration. New workspaces\n" + + "will automatically be prefixed with this prefix. If omitted only the default\n" + + "workspace can be used. This option conflicts with \"name\"", +} diff --git a/backend/remote/backend_apply.go b/backend/remote/backend_apply.go new file mode 100644 index 000000000000..f1782b095bbe --- /dev/null +++ b/backend/remote/backend_apply.go @@ -0,0 +1,301 @@ +package remote + +import ( + "bufio" + "context" + "fmt" + "io" + "log" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] backend/remote: starting Apply operation") + + var diags tfdiags.Diagnostics + + // We should remove the `CanUpdate` part of this test, but for now + // (to remain compatible with tfe.v2.1) we'll leave it in here. + if !w.Permissions.CanUpdate && !w.Permissions.CanQueueApply { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to apply changes", + "The provided credentials have insufficient rights to apply changes. In order "+ + "to apply changes at least write permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if w.VCSRepo != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Apply not allowed for workspaces with a VCS connection", + "A workspace that is connected to a VCS requires the VCS-driven workflow "+ + "to ensure that the VCS remains the single source of truth.", + )) + return nil, diags.Err() + } + + if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `The "remote" backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Applying a saved plan is currently not supported", + `The "remote" backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if b.hasExplicitVariableValues(op) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Run variables are currently not supported", + fmt.Sprintf( + "The \"remote\" backend does not support setting run variables at this time. "+ + "Currently the only to way to pass variables to the remote backend is by "+ + "creating a '*.auto.tfvars' variables file. This file will automatically "+ + "be loaded by the \"remote\" backend when the workspace is configured to use "+ + "Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+ + "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", + b.hostname, b.organization, op.Workspace, + ), + )) + } + + if !op.HasConfig() && op.PlanMode != plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Apply requires configuration to be present. Applying without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run 'terraform destroy' which `+ + `does not require any configuration files.`, + )) + } + + // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, + // so if there's an error when parsing the RemoteAPIVersion, it's handled as + // equivalent to an API version < 2.3. + currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) + + if !op.PlanRefresh { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning without refresh is not supported", + fmt.Sprintf( + `The host %s does not support the -refresh=false option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if op.PlanMode == plans.RefreshOnlyMode { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Refresh-only mode is not supported", + fmt.Sprintf( + `The host %s does not support -refresh-only mode for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if len(op.ForceReplace) != 0 { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning resource replacements is not supported", + fmt.Sprintf( + `The host %s does not support the -replace option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if len(op.Targets) != 0 { + desiredAPIVersion, _ := version.NewVersion("2.3") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource targeting is not supported", + fmt.Sprintf( + `The host %s does not support the -target option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + // Run the plan phase. + r, err := b.plan(stopCtx, cancelCtx, op, w) + if err != nil { + return r, err + } + + // This check is also performed in the plan method to determine if + // the policies should be checked, but we need to check the values + // here again to determine if we are done and should return. + if !r.HasChanges || r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return r, nil + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run cannot be confirmed. + if !w.AutoApply && !r.Actions.IsConfirmable { + return r, nil + } + + // Since we already checked the permissions before creating the run + // this should never happen. But it doesn't hurt to keep this in as + // a safeguard for any unexpected situations. + if !w.AutoApply && !r.Permissions.CanApply { + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + switch op.PlanMode { + case plans.DestroyMode: + return r, generalError("Failed to discard destroy", err) + default: + return r, generalError("Failed to discard apply", err) + } + } + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to approve the pending changes", + fmt.Sprintf("There are pending changes, but the provided credentials have "+ + "insufficient rights to approve them. The run will be discarded to prevent "+ + "it from blocking the queue waiting for external approval. To queue a run "+ + "that can be approved by someone else, please use the 'Queue Plan' button in "+ + "the web UI:\nhttps://%s/app/%s/%s/runs", b.hostname, b.organization, op.Workspace), + )) + return r, diags.Err() + } + + mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove + + if !w.AutoApply { + if mustConfirm { + opts := &terraform.InputOpts{Id: "approve"} + + if op.PlanMode == plans.DestroyMode { + opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + opts.Description = "Terraform will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + } else { + opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?" + opts.Description = "Terraform will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + err = b.confirm(stopCtx, op, opts, r, "yes") + if err != nil && err != errRunApproved { + return r, err + } + } + + if err != errRunApproved { + if err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}); err != nil { + return r, generalError("Failed to approve the apply command", err) + } + } + } + + // If we don't need to ask for confirmation, insert a blank + // line to separate the ouputs. + if w.AutoApply || !mustConfirm { + if b.CLI != nil { + b.CLI.Output("") + } + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w) + if err != nil { + return r, err + } + + logs, err := b.client.Applies.Logs(stopCtx, r.Apply.ID) + if err != nil { + return r, generalError("Failed to retrieve logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + if b.CLI != nil { + skip := 0 + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return r, generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + // Skip the first 3 lines to prevent duplicate output. + if skip < 3 { + skip++ + continue + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + return r, nil +} + +const applyDefaultHeader = ` +[reset][yellow]Running apply in the remote backend. Output will stream here. Pressing Ctrl-C +will cancel the remote apply if it's still pending. If the apply started it +will stop streaming the logs, but will not stop the apply running remotely.[reset] + +Preparing the remote apply... +` diff --git a/backend/remote/backend_apply_test.go b/backend/remote/backend_apply_test.go new file mode 100644 index 000000000000..aaf39c2be6b9 --- /dev/null +++ b/backend/remote/backend_apply_test.go @@ -0,0 +1,1665 @@ +package remote + +import ( + "context" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/cloud" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + tfversion "github.com/hashicorp/terraform/version" + "github.com/mitchellh/cli" +) + +func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationApplyWithTimeout(t, configDir, 0) +} + +func testOperationApplyWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + // Many of our tests use an overridden "null" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/null")) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypeApply, + View: operationView, + DependencyLocks: depLocks, + }, configCleanup, done +} + +func TestRemote_applyBasic(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestRemote_applyCanceled(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Stop the run to simulate a Ctrl-C. + run.Stop() + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after cancelling apply: %s", err.Error()) + } +} + +func TestRemote_applyWithoutPermissions(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueApply = false + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Insufficient rights to apply changes") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestRemote_applyWithVCS(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + // Create a named workspace with a VCS. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + VCSRepo: &tfe.VCSRepoOptions{}, + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "not allowed for workspaces with a VCS") { + t.Fatalf("expected a VCS error, got: %v", errOutput) + } +} + +func TestRemote_applyWithParallelism(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + if b.ContextOpts == nil { + b.ContextOpts = &terraform.ContextOpts{} + } + b.ContextOpts.Parallelism = 3 + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestRemote_applyWithPlan(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.PlanFile = &planfile.Reader{} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestRemote_applyWithoutRefresh(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has refresh set + // to false. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(false, run.Refresh); diff != "" { + t.Errorf("wrong Refresh setting in the created run\n%s", diff) + } + } +} + +func TestRemote_applyWithoutRefreshIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Planning without refresh is not supported") { + t.Fatalf("expected a not supported error, got: %v", errOutput) + } +} + +func TestRemote_applyWithRefreshOnly(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has refresh-only set + // to true. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { + t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) + } + } +} + +func TestRemote_applyWithRefreshOnlyIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Refresh-only mode is not supported") { + t.Fatalf("expected a not supported error, got: %v", errOutput) + } +} + +func TestRemote_applyWithTarget(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // target address we requested above. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { + t.Errorf("wrong TargetAddrs in the created run\n%s", diff) + } + } +} + +func TestRemote_applyWithTargetIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + // Set the tfe client's RemoteAPIVersion to an empty string, to mimic + // API versions prior to 2.3. + b.client.SetFakeRemoteAPIVersion("") + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Resource targeting is not supported") { + t.Fatalf("expected a targeting error, got: %v", errOutput) + } +} + +func TestRemote_applyWithReplace(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // refresh address we requested above. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { + t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) + } + } +} + +func TestRemote_applyWithReplaceIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Planning resource replacements is not supported") { + t.Fatalf("expected a not supported error, got: %v", errOutput) + } +} + +func TestRemote_applyWithVariables(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-variables") + defer configCleanup() + + op.Variables = testVariables(terraform.ValueFromNamedFile, "foo", "bar") + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "variables are currently not supported") { + t.Fatalf("expected a variables error, got: %v", errOutput) + } +} + +func TestRemote_applyNoConfig(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after failed apply: %s", err.Error()) + } +} + +func TestRemote_applyNoChanges(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-no-changes") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summery: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } +} + +func TestRemote_applyNoApprove(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Apply discarded") { + t.Fatalf("expected an apply discarded error, got: %v", errOutput) + } +} + +func TestRemote_applyAutoApprove(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyApprovedExternally(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "wait-for-external-update", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + ctx := context.Background() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) + + wl, err := b.client.Workspaces.List( + ctx, + b.organization, + nil, + ) + if err != nil { + t.Fatalf("unexpected error listing workspaces: %v", err) + } + if len(wl.Items) != 1 { + t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) + } + + rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) + if err != nil { + t.Fatalf("unexpected error listing runs: %v", err) + } + if len(rl.Items) != 1 { + t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) + } + + err = b.client.Runs.Apply(context.Background(), rl.Items[0].ID, tfe.RunApplyOptions{}) + if err != nil { + t.Fatalf("unexpected error approving run: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "approved using the UI or API") { + t.Fatalf("expected external approval in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyDiscardedExternally(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "wait-for-external-update", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + ctx := context.Background() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) + + wl, err := b.client.Workspaces.List( + ctx, + b.organization, + nil, + ) + if err != nil { + t.Fatalf("unexpected error listing workspaces: %v", err) + } + if len(wl.Items) != 1 { + t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) + } + + rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) + if err != nil { + t.Fatalf("unexpected error listing runs: %v", err) + } + if len(rl.Items) != 1 { + t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) + } + + err = b.client.Runs.Discard(context.Background(), rl.Items[0].ID, tfe.RunDiscardOptions{}) + if err != nil { + t.Fatalf("unexpected error discarding run: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "discarded using the UI or API") { + t.Fatalf("expected external discard output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyWithAutoApply(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + AutoApply: tfe.Bool(true), + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the remote backend will use + // the local backend with itself as embedded backend. + if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { + t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) + } + defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + if !run.State.HasManagedResourceInstanceObjects() { + t.Fatalf("expected resources in state") + } +} + +func TestRemote_applyWorkspaceWithoutOperations(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "no-operations" + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + if !run.State.HasManagedResourceInstanceObjects() { + t.Fatalf("expected resources in state") + } +} + +func TestRemote_applyLockTimeout(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationApplyWithTimeout(t, "./testdata/apply", 50*time.Millisecond) + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("expected lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summery in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyDestroy(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-destroy") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.PlanMode = plans.DestroyMode + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyDestroyNoConfig(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } +} + +func TestRemote_applyPolicyPass(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-passed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicyHardFail(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-hard-failed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answers, got: %v", input.answers) + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicySoftFail(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.AutoApprove = false + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicySoftFailAutoApproveSuccess(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + + input := testInput(t, map[string]string{}) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to success due to auto-approve") + } + + if run.PlanEmpty { + t.Fatalf("expected plan to not be empty, plan opertion completed without error") + } + + if len(input.answers) != 0 { + t.Fatalf("expected no answers, got: %v", input.answers) + } + + errOutput := viewOutput.Stderr() + if strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected no policy check errors, instead got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check to be false, insead got: %s", output) + } + if !strings.Contains(output, "Apply complete!") { + t.Fatalf("expected apply to be complete, instead got: %s", output) + } + + if !strings.Contains(output, "Resources: 1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected resources, instead got: %s", output) + } +} + +func TestRemote_applyPolicySoftFailAutoApply(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + AutoApply: tfe.Bool(true), + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyWithRemoteError(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("expected apply error in output: %s", output) + } +} + +func TestRemote_applyVersionCheck(t *testing.T) { + testCases := map[string]struct { + localVersion string + remoteVersion string + forceLocal bool + executionMode string + wantErr string + }{ + "versions can be different for remote apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + executionMode: "remote", + }, + "versions can be different for local apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + executionMode: "local", + }, + "force local with remote operations and different versions is acceptable": { + localVersion: "0.14.0", + remoteVersion: "0.14.0-acme-provider-bundle", + forceLocal: true, + executionMode: "remote", + }, + "no error if versions are identical": { + localVersion: "0.14.0", + remoteVersion: "0.14.0", + forceLocal: true, + executionMode: "remote", + }, + "no error if force local but workspace has remote operations disabled": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + forceLocal: true, + executionMode: "local", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // SETUP: Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // SETUP: Set local version for the test case + tfversion.Prerelease = "" + tfversion.Version = tc.localVersion + tfversion.SemVer = version.Must(version.NewSemver(tc.localVersion)) + + // SETUP: Set force local for the test case + b.forceLocal = tc.forceLocal + + ctx := context.Background() + + // SETUP: set the operations and Terraform Version fields on the + // remote workspace + _, err := b.client.Workspaces.Update( + ctx, + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + ExecutionMode: tfe.String(tc.executionMode), + TerraformVersion: tfe.String(tc.remoteVersion), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + // RUN: prepare the apply operation and run it + op, configCleanup, _ := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // RUN: wait for completion + <-run.Done() + output := done(t) + + if tc.wantErr != "" { + // ASSERT: if the test case wants an error, check for failure + // and the error message + if run.Result != backend.OperationFailure { + t.Fatalf("expected run to fail, but result was %#v", run.Result) + } + errOutput := output.Stderr() + if !strings.Contains(errOutput, tc.wantErr) { + t.Fatalf("missing error %q\noutput: %s", tc.wantErr, errOutput) + } + } else { + // ASSERT: otherwise, check for success and appropriate output + // based on whether the run should be local or remote + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + output := b.CLI.(*cli.MockUi).OutputWriter.String() + hasRemote := strings.Contains(output, "Running apply in the remote backend") + hasSummary := strings.Contains(output, "1 added, 0 changed, 0 destroyed") + hasResources := run.State.HasManagedResourceInstanceObjects() + if !tc.forceLocal && !isLocalExecutionMode(tc.executionMode) { + if !hasRemote { + t.Errorf("missing remote backend header in output: %s", output) + } + if !hasSummary { + t.Errorf("expected apply summary in output: %s", output) + } + } else { + if hasRemote { + t.Errorf("unexpected remote backend header in output: %s", output) + } + if !hasResources { + t.Errorf("expected resources in state") + } + } + } + }) + } +} diff --git a/backend/remote/backend_common.go b/backend/remote/backend_common.go new file mode 100644 index 000000000000..116ca95483a2 --- /dev/null +++ b/backend/remote/backend_common.go @@ -0,0 +1,577 @@ +package remote + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "math" + "strconv" + "strings" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terraform" +) + +var ( + errApplyDiscarded = errors.New("Apply discarded.") + errDestroyDiscarded = errors.New("Destroy discarded.") + errRunApproved = errors.New("approved using the UI or API") + errRunDiscarded = errors.New("discarded using the UI or API") + errRunOverridden = errors.New("overridden using the UI or API") +) + +var ( + backoffMin = 1000.0 + backoffMax = 3000.0 + + runPollInterval = 3 * time.Second +) + +// backoff will perform exponential backoff based on the iteration and +// limited by the provided min and max (in milliseconds) durations. +func backoff(min, max float64, iter int) time.Duration { + backoff := math.Pow(2, float64(iter)/5) * min + if backoff > max { + backoff = max + } + return time.Duration(backoff) * time.Millisecond +} + +func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) { + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return r, stopCtx.Err() + case <-cancelCtx.Done(): + return r, cancelCtx.Err() + case <-time.After(backoff(backoffMin, backoffMax, i)): + // Timer up, show status + } + + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run is no longer pending. + if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed { + if i == 0 && opType == "plan" && b.CLI != nil { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType))) + } + if i > 0 && b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + return r, nil + } + + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + position := 0 + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + + // Retrieve the workspace used to run this operation in. + w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + // If the workspace is locked the run will not be queued and we can + // update the status without making any expensive calls. + if w.Locked && w.CurrentRun != nil { + cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID) + if err != nil { + return r, generalError("Failed to retrieve current run", err) + } + if cr.Status == tfe.RunPending { + b.CLI.Output(b.Colorize().Color( + "Waiting for the manually locked workspace to be unlocked..." + elapsed)) + continue + } + } + + // Skip checking the workspace queue when we are the current run. + if w.CurrentRun == nil || w.CurrentRun.ID != r.ID { + found := false + options := &tfe.RunListOptions{} + runlist: + for { + rl, err := b.client.Runs.List(stopCtx, w.ID, options) + if err != nil { + return r, generalError("Failed to retrieve run list", err) + } + + // Loop through all runs to calculate the workspace queue position. + for _, item := range rl.Items { + if !found { + if r.ID == item.ID { + found = true + } + continue + } + + // If the run is in a final state, ignore it and continue. + switch item.Status { + case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored: + continue + case tfe.RunPlanned: + if op.Type == backend.OperationTypePlan { + continue + } + } + + // Increase the workspace queue position. + position++ + + // Stop searching when we reached the current run. + if w.CurrentRun != nil && w.CurrentRun.ID == item.ID { + break runlist + } + } + + // Exit the loop when we've seen all pages. + if rl.CurrentPage >= rl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rl.NextPage + } + + if position > 0 { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d run(s) to finish before being queued...%s", + position, + elapsed, + ))) + continue + } + } + + options := tfe.ReadRunQueueOptions{} + search: + for { + rq, err := b.client.Organizations.ReadRunQueue(stopCtx, b.organization, options) + if err != nil { + return r, generalError("Failed to retrieve queue", err) + } + + // Search through all queued items to find our run. + for _, item := range rq.Items { + if r.ID == item.ID { + position = item.PositionInQueue + break search + } + } + + // Exit the loop when we've seen all pages. + if rq.CurrentPage >= rq.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rq.NextPage + } + + if position > 0 { + c, err := b.client.Organizations.ReadCapacity(stopCtx, b.organization) + if err != nil { + return r, generalError("Failed to retrieve capacity", err) + } + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d queued run(s) to finish before starting...%s", + position-c.Running, + elapsed, + ))) + continue + } + + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for the %s to start...%s", opType, elapsed))) + } + } +} + +// hasExplicitVariableValues is a best-effort check to determine whether the +// user has provided -var or -var-file arguments to a remote operation. +// +// The results may be inaccurate if the configuration is invalid or if +// individual variable values are invalid. That's okay because we only use this +// result to hint the user to set variables a different way. It's always the +// remote system's responsibility to do final validation of the input. +func (b *Remote) hasExplicitVariableValues(op *backend.Operation) bool { + // Load the configuration using the caller-provided configuration loader. + config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir) + if configDiags.HasErrors() { + // If we can't load the configuration then we'll assume no explicit + // variable values just to let the remote operation start and let + // the remote system return the same set of configuration errors. + return false + } + + // We're intentionally ignoring the diagnostics here because validation + // of the variable values is the responsibilty of the remote system. Our + // goal here is just to make a best effort count of how many variable + // values are coming from -var or -var-file CLI arguments so that we can + // hint the user that those are not supported for remote operations. + variables, _ := backend.ParseVariableValues(op.Variables, config.Module.Variables) + + // Check for explicitly-defined (-var and -var-file) variables, which the + // remote backend does not support. All other source types are okay, + // because they are implicit from the execution context anyway and so + // their final values will come from the _remote_ execution context. + for _, v := range variables { + switch v.SourceType { + case terraform.ValueFromCLIArg, terraform.ValueFromNamedFile: + return true + } + } + + return false +} + +func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.CostEstimate == nil { + return nil + } + + msgPrefix := "Cost estimation" + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return stopCtx.Err() + case <-cancelCtx.Done(): + return cancelCtx.Err() + case <-time.After(backoff(backoffMin, backoffMax, i)): + } + + // Retrieve the cost estimate to get its current status. + ce, err := b.client.CostEstimates.Read(stopCtx, r.CostEstimate.ID) + if err != nil { + return generalError("Failed to retrieve cost estimate", err) + } + + // If the run is canceled or errored, but the cost-estimate still has + // no result, there is nothing further to render. + if ce.Status != tfe.CostEstimateFinished { + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return nil + } + } + + // checking if i == 0 so as to avoid printing this starting horizontal-rule + // every retry, and that it only prints it on the first (i=0) attempt. + if b.CLI != nil && i == 0 { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + + switch ce.Status { + case tfe.CostEstimateFinished: + delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64) + if err != nil { + return generalError("Unexpected error", err) + } + + sign := "+" + if delta < 0 { + sign = "-" + } + + deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1) + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount))) + b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr))) + + if len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply { + b.CLI.Output("\n------------------------------------------------------------------------") + } + } + + return nil + case tfe.CostEstimatePending, tfe.CostEstimateQueued: + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n")) + } + continue + case tfe.CostEstimateSkippedDueToTargeting: + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + b.CLI.Output("Not available for this plan, because it was created with the -target option.") + b.CLI.Output("\n------------------------------------------------------------------------") + return nil + case tfe.CostEstimateErrored: + b.CLI.Output(msgPrefix + " errored.\n") + b.CLI.Output("\n------------------------------------------------------------------------") + return nil + case tfe.CostEstimateCanceled: + return fmt.Errorf(msgPrefix + " canceled.") + default: + return fmt.Errorf("Unknown or unexpected cost estimate state: %s", ce.Status) + } + } +} + +func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + for i, pc := range r.PolicyChecks { + // Read the policy check logs. This is a blocking call that will only + // return once the policy check is complete. + logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + // Retrieve the policy check to get its current status. + pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check", err) + } + + // If the run is canceled or errored, but the policy check still has + // no result, there is nothing further to render. + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + switch pc.Status { + case tfe.PolicyPending, tfe.PolicyQueued, tfe.PolicyUnreachable: + continue + } + } + + var msgPrefix string + switch pc.Scope { + case tfe.PolicyScopeOrganization: + msgPrefix = "Organization policy check" + case tfe.PolicyScopeWorkspace: + msgPrefix = "Workspace policy check" + default: + msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope) + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + } + + if b.CLI != nil { + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + switch pc.Status { + case tfe.PolicyPasses: + if (r.HasChanges && op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------") + } + continue + case tfe.PolicyErrored: + return fmt.Errorf(msgPrefix + " errored.") + case tfe.PolicyHardFailed: + return fmt.Errorf(msgPrefix + " hard failed.") + case tfe.PolicySoftFailed: + runUrl := fmt.Sprintf(runHeader, b.hostname, b.organization, op.Workspace, r.ID) + + if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil || + !pc.Actions.IsOverridable || !pc.Permissions.CanOverride { + return fmt.Errorf(msgPrefix + " soft failed.\n" + runUrl) + } + + if op.AutoApprove { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } + } else { + opts := &terraform.InputOpts{ + Id: "override", + Query: "\nDo you want to override the soft failed policy check?", + Description: "Only 'override' will be accepted to override.", + } + err = b.confirm(stopCtx, op, opts, r, "override") + if err != nil && err != errRunOverridden { + return fmt.Errorf( + fmt.Sprintf("Failed to override: %s\n%s\n", err.Error(), runUrl), + ) + } + + if err != errRunOverridden { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } + } else { + b.CLI.Output(fmt.Sprintf("The run needs to be manually overridden or discarded.\n%s\n", runUrl)) + } + } + + if b.CLI != nil { + b.CLI.Output("------------------------------------------------------------------------") + } + default: + return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status) + } + } + + return nil +} + +func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error { + doneCtx, cancel := context.WithCancel(stopCtx) + result := make(chan error, 2) + + go func() { + defer logging.PanicHandler() + + // Make sure we cancel doneCtx before we return + // so the input command is also canceled. + defer cancel() + + for { + select { + case <-doneCtx.Done(): + return + case <-stopCtx.Done(): + return + case <-time.After(runPollInterval): + // Retrieve the run again to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + result <- generalError("Failed to retrieve run", err) + return + } + + switch keyword { + case "override": + if r.Status != tfe.RunPolicyOverride { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunOverridden + } + } + case "yes": + if !r.Actions.IsConfirmable { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunApproved + } + } + } + + if err != nil { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color( + fmt.Sprintf("[reset][yellow]%s[reset]", err.Error()))) + } + + if err == errRunDiscarded { + err = errApplyDiscarded + if op.PlanMode == plans.DestroyMode { + err = errDestroyDiscarded + } + } + + result <- err + return + } + } + } + }() + + result <- func() error { + v, err := op.UIIn.Input(doneCtx, opts) + if err != nil && err != context.Canceled && stopCtx.Err() != context.Canceled { + return fmt.Errorf("Error asking %s: %v", opts.Id, err) + } + + // We return the error of our parent channel as we don't + // care about the error of the doneCtx which is only used + // within this function. So if the doneCtx was canceled + // because stopCtx was canceled, this will properly return + // a context.Canceled error and otherwise it returns nil. + if doneCtx.Err() == context.Canceled || stopCtx.Err() == context.Canceled { + return stopCtx.Err() + } + + // Make sure we cancel the context here so the loop that + // checks for external changes to the run is ended before + // we start to make changes ourselves. + cancel() + + if v != keyword { + // Retrieve the run again to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return generalError("Failed to retrieve run", err) + } + + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + if op.PlanMode == plans.DestroyMode { + return generalError("Failed to discard destroy", err) + } + return generalError("Failed to discard apply", err) + } + } + + // Even if the run was discarded successfully, we still + // return an error as the apply command was canceled. + if op.PlanMode == plans.DestroyMode { + return errDestroyDiscarded + } + return errApplyDiscarded + } + + return nil + }() + + return <-result +} diff --git a/backend/remote/backend_context.go b/backend/remote/backend_context.go new file mode 100644 index 000000000000..181f32735f56 --- /dev/null +++ b/backend/remote/backend_context.go @@ -0,0 +1,295 @@ +package remote + +import ( + "context" + "fmt" + "log" + "strings" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Context implements backend.Local. +func (b *Remote) LocalRun(op *backend.Operation) (*backend.LocalRun, statemgr.Full, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := &backend.LocalRun{ + PlanOpts: &terraform.PlanOpts{ + Mode: op.PlanMode, + Targets: op.Targets, + }, + } + + op.StateLocker = op.StateLocker.WithContext(context.Background()) + + // Get the remote workspace name. + remoteWorkspaceName := b.getRemoteWorkspaceName(op.Workspace) + + // Get the latest state. + log.Printf("[TRACE] backend/remote: requesting state manager for workspace %q", remoteWorkspaceName) + stateMgr, err := b.StateMgr(op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, diags + } + + log.Printf("[TRACE] backend/remote: requesting state lock for workspace %q", remoteWorkspaceName) + if diags := op.StateLocker.Lock(stateMgr, op.Type.String()); diags.HasErrors() { + return nil, nil, diags + } + + defer func() { + // If we're returning with errors, and thus not producing a valid + // context, we'll want to avoid leaving the remote workspace locked. + if diags.HasErrors() { + diags = diags.Append(op.StateLocker.Unlock()) + } + }() + + log.Printf("[TRACE] backend/remote: reading remote state for workspace %q", remoteWorkspaceName) + if err := stateMgr.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, diags + } + + // Initialize our context options + var opts terraform.ContextOpts + if v := b.ContextOpts; v != nil { + opts = *v + } + + // Copy set options from the operation + opts.UIInput = op.UIIn + + // Load the latest state. If we enter contextFromPlanFile below then the + // state snapshot in the plan file must match this, or else it'll return + // error diagnostics. + log.Printf("[TRACE] backend/remote: retrieving remote state snapshot for workspace %q", remoteWorkspaceName) + ret.InputState = stateMgr.State() + + log.Printf("[TRACE] backend/remote: loading configuration for the current working directory") + config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, diags + } + ret.Config = config + + if op.AllowUnsetVariables { + // If we're not going to use the variables in an operation we'll be + // more lax about them, stubbing out any unset ones as unknown. + // This gives us enough information to produce a consistent context, + // but not enough information to run a real operation (plan, apply, etc) + ret.PlanOpts.SetVariables = stubAllVariables(op.Variables, config.Module.Variables) + } else { + // The underlying API expects us to use the opaque workspace id to request + // variables, so we'll need to look that up using our organization name + // and workspace name. + remoteWorkspaceID, err := b.getRemoteWorkspaceID(context.Background(), op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error finding remote workspace: %w", err)) + return nil, nil, diags + } + + w, err := b.fetchWorkspace(context.Background(), b.organization, op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading workspace: %w", err)) + return nil, nil, diags + } + + if isLocalExecutionMode(w.ExecutionMode) { + log.Printf("[TRACE] skipping retrieving variables from workspace %s/%s (%s), workspace is in Local Execution mode", remoteWorkspaceName, b.organization, remoteWorkspaceID) + } else { + log.Printf("[TRACE] backend/remote: retrieving variables from workspace %s/%s (%s)", remoteWorkspaceName, b.organization, remoteWorkspaceID) + tfeVariables, err := b.client.Variables.List(context.Background(), remoteWorkspaceID, nil) + if err != nil && err != tfe.ErrResourceNotFound { + diags = diags.Append(fmt.Errorf("error loading variables: %w", err)) + return nil, nil, diags + } + if tfeVariables != nil { + if op.Variables == nil { + op.Variables = make(map[string]backend.UnparsedVariableValue) + } + for _, v := range tfeVariables.Items { + if v.Category == tfe.CategoryTerraform { + if _, ok := op.Variables[v.Key]; !ok { + op.Variables[v.Key] = &remoteStoredVariableValue{ + definition: v, + } + } + } + } + } + } + + if op.Variables != nil { + variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, nil, diags + } + ret.PlanOpts.SetVariables = variables + } + } + + tfCtx, ctxDiags := terraform.NewContext(&opts) + diags = diags.Append(ctxDiags) + ret.Core = tfCtx + + log.Printf("[TRACE] backend/remote: finished building terraform.Context") + + return ret, stateMgr, diags +} + +func (b *Remote) getRemoteWorkspaceName(localWorkspaceName string) string { + switch { + case localWorkspaceName == backend.DefaultStateName: + // The default workspace name is a special case, for when the backend + // is configured to with to an exact remote workspace rather than with + // a remote workspace _prefix_. + return b.workspace + case b.prefix != "" && !strings.HasPrefix(localWorkspaceName, b.prefix): + return b.prefix + localWorkspaceName + default: + return localWorkspaceName + } +} + +func (b *Remote) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) { + remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName) + + log.Printf("[TRACE] backend/remote: looking up workspace for %s/%s", b.organization, remoteWorkspaceName) + remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) + if err != nil { + return nil, err + } + + return remoteWorkspace, nil +} + +func (b *Remote) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) { + remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName) + if err != nil { + return "", err + } + + return remoteWorkspace.ID, nil +} + +func stubAllVariables(vv map[string]backend.UnparsedVariableValue, decls map[string]*configs.Variable) terraform.InputValues { + ret := make(terraform.InputValues, len(decls)) + + for name, cfg := range decls { + raw, exists := vv[name] + if !exists { + ret[name] = &terraform.InputValue{ + Value: cty.UnknownVal(cfg.Type), + SourceType: terraform.ValueFromConfig, + } + continue + } + + val, diags := raw.ParseVariableValue(cfg.ParsingMode) + if diags.HasErrors() { + ret[name] = &terraform.InputValue{ + Value: cty.UnknownVal(cfg.Type), + SourceType: terraform.ValueFromConfig, + } + continue + } + ret[name] = val + } + + return ret +} + +// remoteStoredVariableValue is a backend.UnparsedVariableValue implementation +// that translates from the go-tfe representation of stored variables into +// the Terraform Core backend representation of variables. +type remoteStoredVariableValue struct { + definition *tfe.Variable +} + +var _ backend.UnparsedVariableValue = (*remoteStoredVariableValue)(nil) + +func (v *remoteStoredVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var val cty.Value + + switch { + case v.definition.Sensitive: + // If it's marked as sensitive then it's not available for use in + // local operations. We'll use an unknown value as a placeholder for + // it so that operations that don't need it might still work, but + // we'll also produce a warning about it to add context for any + // errors that might result here. + val = cty.DynamicVal + if !v.definition.HCL { + // If it's not marked as HCL then we at least know that the + // value must be a string, so we'll set that in case it allows + // us to do some more precise type checking. + val = cty.UnknownVal(cty.String) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + fmt.Sprintf("Value for var.%s unavailable", v.definition.Key), + fmt.Sprintf("The value of variable %q is marked as sensitive in the remote workspace. This operation always runs locally, so the value for that variable is not available.", v.definition.Key), + )) + + case v.definition.HCL: + // If the variable value is marked as being in HCL syntax, we need to + // parse it the same way as it would be interpreted in a .tfvars + // file because that is how it would get passed to Terraform CLI for + // a remote operation and we want to mimic that result as closely as + // possible. + var exprDiags hcl.Diagnostics + expr, exprDiags := hclsyntax.ParseExpression([]byte(v.definition.Value), "", hcl.Pos{Line: 1, Column: 1}) + if expr != nil { + var moreDiags hcl.Diagnostics + val, moreDiags = expr.Value(nil) + exprDiags = append(exprDiags, moreDiags...) + } else { + // We'll have already put some errors in exprDiags above, so we'll + // just stub out the value here. + val = cty.DynamicVal + } + + // We don't have sufficient context to return decent error messages + // for syntax errors in the remote values, so we'll just return a + // generic message instead for now. + // (More complete error messages will still result from true remote + // operations, because they'll run on the remote system where we've + // materialized the values into a tfvars file we can report from.) + if exprDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid expression for var.%s", v.definition.Key), + fmt.Sprintf("The value of variable %q is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.", v.definition.Key), + )) + } + + default: + // A variable value _not_ marked as HCL is always be a string, given + // literally. + val = cty.StringVal(v.definition.Value) + } + + return &terraform.InputValue{ + Value: val, + + // We mark these as "from input" with the rationale that entering + // variable values into the Terraform Cloud or Enterprise UI is, + // roughly speaking, a similar idea to entering variable values at + // the interactive CLI prompts. It's not a perfect correspondance, + // but it's closer than the other options. + SourceType: terraform.ValueFromInput, + }, diags +} diff --git a/backend/remote/backend_context_test.go b/backend/remote/backend_context_test.go new file mode 100644 index 000000000000..d694bf5511e6 --- /dev/null +++ b/backend/remote/backend_context_test.go @@ -0,0 +1,469 @@ +package remote + +import ( + "context" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "reflect" + "testing" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/zclconf/go-cty/cty" +) + +func TestRemoteStoredVariableValue(t *testing.T) { + tests := map[string]struct { + Def *tfe.Variable + Want cty.Value + WantError string + }{ + "string literal": { + &tfe.Variable{ + Key: "test", + Value: "foo", + HCL: false, + Sensitive: false, + }, + cty.StringVal("foo"), + ``, + }, + "string HCL": { + &tfe.Variable{ + Key: "test", + Value: `"foo"`, + HCL: true, + Sensitive: false, + }, + cty.StringVal("foo"), + ``, + }, + "list HCL": { + &tfe.Variable{ + Key: "test", + Value: `[]`, + HCL: true, + Sensitive: false, + }, + cty.EmptyTupleVal, + ``, + }, + "null HCL": { + &tfe.Variable{ + Key: "test", + Value: `null`, + HCL: true, + Sensitive: false, + }, + cty.NullVal(cty.DynamicPseudoType), + ``, + }, + "literal sensitive": { + &tfe.Variable{ + Key: "test", + HCL: false, + Sensitive: true, + }, + cty.UnknownVal(cty.String), + ``, + }, + "HCL sensitive": { + &tfe.Variable{ + Key: "test", + HCL: true, + Sensitive: true, + }, + cty.DynamicVal, + ``, + }, + "HCL computation": { + // This (stored expressions containing computation) is not a case + // we intentionally supported, but it became possible for remote + // operations in Terraform 0.12 (due to Terraform Cloud/Enterprise + // just writing the HCL verbatim into generated `.tfvars` files). + // We support it here for consistency, and we continue to support + // it in both places for backward-compatibility. In practice, + // there's little reason to do computation in a stored variable + // value because references are not supported. + &tfe.Variable{ + Key: "test", + Value: `[for v in ["a"] : v]`, + HCL: true, + Sensitive: false, + }, + cty.TupleVal([]cty.Value{cty.StringVal("a")}), + ``, + }, + "HCL syntax error": { + &tfe.Variable{ + Key: "test", + Value: `[`, + HCL: true, + Sensitive: false, + }, + cty.DynamicVal, + `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, + }, + "HCL with references": { + &tfe.Variable{ + Key: "test", + Value: `foo.bar`, + HCL: true, + Sensitive: false, + }, + cty.DynamicVal, + `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + v := &remoteStoredVariableValue{ + definition: test.Def, + } + // This ParseVariableValue implementation ignores the parsing mode, + // so we'll just always parse literal here. (The parsing mode is + // selected by the remote server, not by our local configuration.) + gotIV, diags := v.ParseVariableValue(configs.VariableParseLiteral) + if test.WantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) + } + errStr := diags.Err().Error() + if errStr != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) + } + } else { + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + got := gotIV.Value + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + } + }) + } +} + +func TestRemoteContextWithVars(t *testing.T) { + catTerraform := tfe.CategoryTerraform + catEnv := tfe.CategoryEnv + + tests := map[string]struct { + Opts *tfe.VariableCreateOptions + WantError string + }{ + "Terraform variable": { + &tfe.VariableCreateOptions{ + Category: &catTerraform, + }, + `Value for undeclared variable: A variable named "key" was assigned a value, but the root module does not declare a variable of that name. To use this value, add a "variable" block to the configuration.`, + }, + "environment variable": { + &tfe.VariableCreateOptions{ + Category: &catEnv, + }, + ``, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configDir := "./testdata/empty" + + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + defer configCleanup() + + workspaceID, err := b.getRemoteWorkspaceID(context.Background(), backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewLocker(0, view), + Workspace: backend.DefaultStateName, + } + + v := test.Opts + if v.Key == nil { + key := "key" + v.Key = &key + } + b.client.Variables.Create(context.TODO(), workspaceID, *v) + + _, _, diags := b.LocalRun(op) + + if test.WantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) + } + errStr := diags.Err().Error() + if errStr != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) + } + // When Context() returns an error, it should unlock the state, + // so re-locking it is expected to succeed. + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state: %s", err.Error()) + } + } else { + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + // When Context() succeeds, this should fail w/ "workspace already locked" + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { + t.Fatal("unexpected success locking state after Context") + } + } + }) + } +} + +func TestRemoteVariablesDoNotOverride(t *testing.T) { + catTerraform := tfe.CategoryTerraform + + varName1 := "key1" + varName2 := "key2" + varName3 := "key3" + + varValue1 := "value1" + varValue2 := "value2" + varValue3 := "value3" + + tests := map[string]struct { + localVariables map[string]backend.UnparsedVariableValue + remoteVariables []*tfe.VariableCreateOptions + expectedVariables terraform.InputValues + }{ + "no local variables": { + map[string]backend.UnparsedVariableValue{}, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, + { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, + { + Key: &varName3, + Value: &varValue3, + Category: &catTerraform, + }, + }, + terraform.InputValues{ + varName1: &terraform.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &terraform.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &terraform.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + }, + }, + "single conflicting local variable": { + map[string]backend.UnparsedVariableValue{ + varName3: testUnparsedVariableValue(varValue3), + }, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, { + Key: &varName3, + Value: &varValue3, + Category: &catTerraform, + }, + }, + terraform.InputValues{ + varName1: &terraform.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &terraform.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &terraform.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: terraform.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + }, + }, + "no conflicting local variable": { + map[string]backend.UnparsedVariableValue{ + varName3: testUnparsedVariableValue(varValue3), + }, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, + }, + terraform.InputValues{ + varName1: &terraform.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &terraform.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &terraform.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: terraform.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configDir := "./testdata/variables" + + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + defer configCleanup() + + workspaceID, err := b.getRemoteWorkspaceID(context.Background(), backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewLocker(0, view), + Workspace: backend.DefaultStateName, + Variables: test.localVariables, + } + + for _, v := range test.remoteVariables { + b.client.Variables.Create(context.TODO(), workspaceID, *v) + } + + lr, _, diags := b.LocalRun(op) + + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + // When Context() succeeds, this should fail w/ "workspace already locked" + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { + t.Fatal("unexpected success locking state after Context") + } + + actual := lr.PlanOpts.SetVariables + expected := test.expectedVariables + + for expectedKey := range expected { + actualValue := actual[expectedKey] + expectedValue := expected[expectedKey] + + if !reflect.DeepEqual(*actualValue, *expectedValue) { + t.Fatalf("unexpected variable '%s'\ngot: %v\nwant: %v", expectedKey, actualValue, expectedValue) + } + } + }) + } +} + +type testUnparsedVariableValue string + +func (v testUnparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + return &terraform.InputValue{ + Value: cty.StringVal(string(v)), + SourceType: terraform.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, nil +} diff --git a/backend/remote/backend_plan.go b/backend/remote/backend_plan.go new file mode 100644 index 000000000000..8f4a51d961d3 --- /dev/null +++ b/backend/remote/backend_plan.go @@ -0,0 +1,442 @@ +package remote + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" +) + +var planConfigurationVersionsPollInterval = 500 * time.Millisecond + +func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] backend/remote: starting Plan operation") + + var diags tfdiags.Diagnostics + + if !w.Permissions.CanQueueRun { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to generate a plan", + "The provided credentials have insufficient rights to generate a plan. In order "+ + "to generate plans, at least plan permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `The "remote" backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Displaying a saved plan is currently not supported", + `The "remote" backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if op.PlanOutPath != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saving a generated plan is currently not supported", + `The "remote" backend does not support saving the generated execution `+ + `plan locally at this time.`, + )) + } + + if b.hasExplicitVariableValues(op) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Run variables are currently not supported", + fmt.Sprintf( + "The \"remote\" backend does not support setting run variables at this time. "+ + "Currently the only to way to pass variables to the remote backend is by "+ + "creating a '*.auto.tfvars' variables file. This file will automatically "+ + "be loaded by the \"remote\" backend when the workspace is configured to use "+ + "Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+ + "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", + b.hostname, b.organization, op.Workspace, + ), + )) + } + + if !op.HasConfig() && op.PlanMode != plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Plan requires configuration to be present. Planning without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run plan with the "-destroy" `+ + `flag or create a single empty configuration file. Otherwise, please create `+ + `a Terraform configuration file in the path being executed and try again.`, + )) + } + + // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, + // so if there's an error when parsing the RemoteAPIVersion, it's handled as + // equivalent to an API version < 2.3. + currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) + + if len(op.Targets) != 0 { + desiredAPIVersion, _ := version.NewVersion("2.3") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource targeting is not supported", + fmt.Sprintf( + `The host %s does not support the -target option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if !op.PlanRefresh { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning without refresh is not supported", + fmt.Sprintf( + `The host %s does not support the -refresh=false option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if len(op.ForceReplace) != 0 { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning resource replacements is not supported", + fmt.Sprintf( + `The host %s does not support the -replace option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if op.PlanMode == plans.RefreshOnlyMode { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Refresh-only mode is not supported", + fmt.Sprintf( + `The host %s does not support -refresh-only mode for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + return b.plan(stopCtx, cancelCtx, op, w) +} + +func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + if b.CLI != nil { + header := planDefaultHeader + if op.Type == backend.OperationTypeApply { + header = applyDefaultHeader + } + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(header) + "\n")) + } + + configOptions := tfe.ConfigurationVersionCreateOptions{ + AutoQueueRuns: tfe.Bool(false), + Speculative: tfe.Bool(op.Type == backend.OperationTypePlan), + } + + cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions) + if err != nil { + return nil, generalError("Failed to create configuration version", err) + } + + var configDir string + if op.ConfigDir != "" { + // De-normalize the configuration directory path. + configDir, err = filepath.Abs(op.ConfigDir) + if err != nil { + return nil, generalError( + "Failed to get absolute path of the configuration directory: %v", err) + } + + // Make sure to take the working directory into account by removing + // the working directory from the current path. This will result in + // a path that points to the expected root of the workspace. + configDir = filepath.Clean(strings.TrimSuffix( + filepath.Clean(configDir), + filepath.Clean(w.WorkingDirectory), + )) + + // If the workspace has a subdirectory as its working directory then + // our configDir will be some parent directory of the current working + // directory. Users are likely to find that surprising, so we'll + // produce an explicit message about it to be transparent about what + // we are doing and why. + if w.WorkingDirectory != "" && filepath.Base(configDir) != w.WorkingDirectory { + if b.CLI != nil { + b.CLI.Output(fmt.Sprintf(strings.TrimSpace(` +The remote workspace is configured to work with configuration at +%s relative to the target repository. + +Terraform will upload the contents of the following directory, +excluding files or directories as defined by a .terraformignore file +at %s/.terraformignore (if it is present), +in order to capture the filesystem context the remote workspace expects: + %s +`), w.WorkingDirectory, configDir, configDir) + "\n") + } + } + + } else { + // We did a check earlier to make sure we either have a config dir, + // or the plan is run with -destroy. So this else clause will only + // be executed when we are destroying and doesn't need the config. + configDir, err = ioutil.TempDir("", "tf") + if err != nil { + return nil, generalError("Failed to create temporary directory", err) + } + defer os.RemoveAll(configDir) + + // Make sure the configured working directory exists. + err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700) + if err != nil { + return nil, generalError( + "Failed to create temporary working directory", err) + } + } + + err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir) + if err != nil { + return nil, generalError("Failed to upload configuration files", err) + } + + uploaded := false + for i := 0; i < 60 && !uploaded; i++ { + select { + case <-stopCtx.Done(): + return nil, context.Canceled + case <-cancelCtx.Done(): + return nil, context.Canceled + case <-time.After(planConfigurationVersionsPollInterval): + cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) + if err != nil { + return nil, generalError("Failed to retrieve configuration version", err) + } + + if cv.Status == tfe.ConfigurationUploaded { + uploaded = true + } + } + } + + if !uploaded { + return nil, generalError( + "Failed to upload configuration files", errors.New("operation timed out")) + } + + runOptions := tfe.RunCreateOptions{ + ConfigurationVersion: cv, + Refresh: tfe.Bool(op.PlanRefresh), + Workspace: w, + } + + switch op.PlanMode { + case plans.NormalMode: + // okay, but we don't need to do anything special for this + case plans.RefreshOnlyMode: + runOptions.RefreshOnly = tfe.Bool(true) + case plans.DestroyMode: + runOptions.IsDestroy = tfe.Bool(true) + default: + // Shouldn't get here because we should update this for each new + // plan mode we add, mapping it to the corresponding RunCreateOptions + // field. + return nil, generalError( + "Invalid plan mode", + fmt.Errorf("remote backend doesn't support %s", op.PlanMode), + ) + } + + if len(op.Targets) != 0 { + runOptions.TargetAddrs = make([]string, 0, len(op.Targets)) + for _, addr := range op.Targets { + runOptions.TargetAddrs = append(runOptions.TargetAddrs, addr.String()) + } + } + + if len(op.ForceReplace) != 0 { + runOptions.ReplaceAddrs = make([]string, 0, len(op.ForceReplace)) + for _, addr := range op.ForceReplace { + runOptions.ReplaceAddrs = append(runOptions.ReplaceAddrs, addr.String()) + } + } + + r, err := b.client.Runs.Create(stopCtx, runOptions) + if err != nil { + return r, generalError("Failed to create run", err) + } + + // When the lock timeout is set, if the run is still pending and + // cancellable after that period, we attempt to cancel it. + if lockTimeout := op.StateLocker.Timeout(); lockTimeout > 0 { + go func() { + defer logging.PanicHandler() + + select { + case <-stopCtx.Done(): + return + case <-cancelCtx.Done(): + return + case <-time.After(lockTimeout): + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + log.Printf("[ERROR] error reading run: %v", err) + return + } + + if r.Status == tfe.RunPending && r.Actions.IsCancelable { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr))) + } + + // We abuse the auto aprove flag to indicate that we do not + // want to ask if the remote operation should be canceled. + op.AutoApprove = true + + p, err := os.FindProcess(os.Getpid()) + if err != nil { + log.Printf("[ERROR] error searching process ID: %v", err) + return + } + p.Signal(syscall.SIGINT) + } + } + }() + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( + runHeader, b.hostname, b.organization, op.Workspace, r.ID)) + "\n")) + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w) + if err != nil { + return r, err + } + + logs, err := b.client.Plans.Logs(stopCtx, r.Plan.ID) + if err != nil { + return r, generalError("Failed to retrieve logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + if b.CLI != nil { + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return r, generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // If the run is canceled or errored, we still continue to the + // cost-estimation and policy check phases to ensure we render any + // results available. In the case of a hard-failed policy check, the + // status of the run will be "errored", but there is still policy + // information which should be shown. + + // Show any cost estimation output. + if r.CostEstimate != nil { + err = b.costEstimate(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + // Check any configured sentinel policies. + if len(r.PolicyChecks) > 0 { + err = b.checkPolicy(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + return r, nil +} + +const planDefaultHeader = ` +[reset][yellow]Running plan in the remote backend. Output will stream here. Pressing Ctrl-C +will stop streaming the logs, but will not stop the plan running remotely.[reset] + +Preparing the remote plan... +` + +const runHeader = ` +[reset][yellow]To view this run in a browser, visit: +https://%s/app/%s/%s/runs/%s[reset] +` + +// The newline in this error is to make it look good in the CLI! +const lockTimeoutErr = ` +[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation. +[reset] +` diff --git a/backend/remote/backend_plan_test.go b/backend/remote/backend_plan_test.go new file mode 100644 index 000000000000..ef94ae27126b --- /dev/null +++ b/backend/remote/backend_plan_test.go @@ -0,0 +1,1247 @@ +package remote + +import ( + "context" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/cloud" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" +) + +func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationPlanWithTimeout(t, configDir, 0) +} + +func testOperationPlanWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + // Many of our tests use an overridden "null" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/null")) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypePlan, + View: operationView, + DependencyLocks: depLocks, + }, configCleanup, done +} + +func TestRemote_planBasic(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } +} + +func TestRemote_planCanceled(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Stop the run to simulate a Ctrl-C. + run.Stop() + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after cancelled plan: %s", err.Error()) + } +} + +func TestRemote_planLongLine(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-long-line") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWithoutPermissions(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueRun = false + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Insufficient rights to generate a plan") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestRemote_planWithParallelism(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + if b.ContextOpts == nil { + b.ContextOpts = &terraform.ContextOpts{} + } + b.ContextOpts.Parallelism = 3 + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestRemote_planWithPlan(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.PlanFile = &planfile.Reader{} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestRemote_planWithPath(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.PlanOutPath = "./testdata/plan" + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "generated plan is currently not supported") { + t.Fatalf("expected a generated plan error, got: %v", errOutput) + } +} + +func TestRemote_planWithoutRefresh(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + // We should find a run inside the mock client that has refresh set + // to false. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(false, run.Refresh); diff != "" { + t.Errorf("wrong Refresh setting in the created run\n%s", diff) + } + } +} + +func TestRemote_planWithoutRefreshIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Planning without refresh is not supported") { + t.Fatalf("expected not supported error, got: %v", errOutput) + } +} + +func TestRemote_planWithRefreshOnly(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + // We should find a run inside the mock client that has refresh-only set + // to true. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { + t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) + } + } +} + +func TestRemote_planWithRefreshOnlyIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Refresh-only mode is not supported") { + t.Fatalf("expected not supported error, got: %v", errOutput) + } +} + +func TestRemote_planWithTarget(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // When the backend code creates a new run, we'll tweak it so that it + // has a cost estimation object with the "skipped_due_to_targeting" status, + // emulating how a real server is expected to behave in that case. + b.client.Runs.(*cloud.MockRuns).ModifyNewRun = func(client *cloud.MockClient, options tfe.RunCreateOptions, run *tfe.Run) { + const fakeID = "fake" + // This is the cost estimate object embedded in the run itself which + // the backend will use to learn the ID to request from the cost + // estimates endpoint. It's pending to simulate what a freshly-created + // run is likely to look like. + run.CostEstimate = &tfe.CostEstimate{ + ID: fakeID, + Status: "pending", + } + // The backend will then use the main cost estimation API to retrieve + // the same ID indicated in the object above, where we'll then return + // the status "skipped_due_to_targeting" to trigger the special skip + // message in the backend output. + client.CostEstimates.Estimations[fakeID] = &tfe.CostEstimate{ + ID: fakeID, + Status: "skipped_due_to_targeting", + } + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // testBackendDefault above attached a "mock UI" to our backend, so we + // can retrieve its non-error output via the OutputWriter in-memory buffer. + gotOutput := b.CLI.(*cli.MockUi).OutputWriter.String() + if wantOutput := "Not available for this plan, because it was created with the -target option."; !strings.Contains(gotOutput, wantOutput) { + t.Errorf("missing message about skipped cost estimation\ngot:\n%s\nwant substring: %s", gotOutput, wantOutput) + } + + // We should find a run inside the mock client that has the same + // target address we requested above. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { + t.Errorf("wrong TargetAddrs in the created run\n%s", diff) + } + } +} + +func TestRemote_planWithTargetIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + // Set the tfe client's RemoteAPIVersion to an empty string, to mimic + // API versions prior to 2.3. + b.client.SetFakeRemoteAPIVersion("") + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Resource targeting is not supported") { + t.Fatalf("expected a targeting error, got: %v", errOutput) + } +} + +func TestRemote_planWithReplace(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // refresh address we requested above. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { + t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) + } + } +} + +func TestRemote_planWithReplaceIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Planning resource replacements is not supported") { + t.Fatalf("expected not supported error, got: %v", errOutput) + } +} + +func TestRemote_planWithVariables(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-variables") + defer configCleanup() + + op.Variables = testVariables(terraform.ValueFromCLIArg, "foo", "bar") + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "variables are currently not supported") { + t.Fatalf("expected a variables error, got: %v", errOutput) + } +} + +func TestRemote_planNoConfig(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } +} + +func TestRemote_planNoChanges(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-no-changes") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summary: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } +} + +func TestRemote_planForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the remote backend will use + // the local backend with itself as embedded backend. + if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { + t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) + } + defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWithoutOperationsEntitlement(t *testing.T) { + b, bCleanup := testBackendNoOperations(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWorkspaceWithoutOperations(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = "no-operations" + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planLockTimeout(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationPlanWithTimeout(t, "./testdata/plan", 50) + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("expected lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summary in output: %s", output) + } +} + +func TestRemote_planDestroy(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestRemote_planDestroyNoConfig(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestRemote_planWithWorkingDirectory(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("terraform"), + } + + // Configure the workspace to use a custom working directory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-working-directory/terraform") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "The remote workspace is configured to work with configuration") { + t.Fatalf("expected working directory warning: %s", output) + } + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWithWorkingDirectoryFromCurrentPath(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("terraform"), + } + + // Configure the workspace to use a custom working directory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting current working directory: %v", err) + } + + // We need to change into the configuration directory to make sure + // the logic to upload the correct slug is working as expected. + if err := os.Chdir("./testdata/plan-with-working-directory/terraform"); err != nil { + t.Fatalf("error changing directory: %v", err) + } + defer os.Chdir(wd) // Make sure we change back again when were done. + + // For this test we need to give our current directory instead of the + // full path to the configuration as we already changed directories. + op, configCleanup, done := testOperationPlan(t, ".") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planCostEstimation(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cost-estimation") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Resources: 1 of 1 estimated") { + t.Fatalf("expected cost estimate result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planPolicyPass(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-passed") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planPolicyHardFail(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-hard-failed") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planPolicySoftFail(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-soft-failed") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWithRemoteError(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("expected plan error in output: %s", output) + } +} + +func TestRemote_planOtherError(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = "network-error" // custom error response in backend_mock.go + + _, err := b.Operation(context.Background(), op) + if err == nil { + t.Errorf("expected error, got success") + } + + if !strings.Contains(err.Error(), + "the configured \"remote\" backend encountered an unexpected error:\n\nI'm a little teacup") { + t.Fatalf("expected error message, got: %s", err.Error()) + } +} diff --git a/backend/remote/backend_state.go b/backend/remote/backend_state.go new file mode 100644 index 000000000000..223678518d51 --- /dev/null +++ b/backend/remote/backend_state.go @@ -0,0 +1,195 @@ +package remote + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "fmt" + + tfe "github.com/hashicorp/go-tfe" + + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" +) + +type remoteClient struct { + client *tfe.Client + lockInfo *statemgr.LockInfo + organization string + runID string + stateUploadErr bool + workspace *tfe.Workspace + forcePush bool +} + +// Get the remote state. +func (r *remoteClient) Get() (*remote.Payload, error) { + ctx := context.Background() + + sv, err := r.client.StateVersions.ReadCurrent(ctx, r.workspace.ID) + if err != nil { + if err == tfe.ErrResourceNotFound { + // If no state exists, then return nil. + return nil, nil + } + return nil, fmt.Errorf("Error retrieving state: %v", err) + } + + state, err := r.client.StateVersions.Download(ctx, sv.DownloadURL) + if err != nil { + return nil, fmt.Errorf("Error downloading state: %v", err) + } + + // If the state is empty, then return nil. + if len(state) == 0 { + return nil, nil + } + + // Get the MD5 checksum of the state. + sum := md5.Sum(state) + + return &remote.Payload{ + Data: state, + MD5: sum[:], + }, nil +} + +// Put the remote state. +func (r *remoteClient) Put(state []byte) error { + ctx := context.Background() + + // Read the raw state into a Terraform state. + stateFile, err := statefile.Read(bytes.NewReader(state)) + if err != nil { + return fmt.Errorf("Error reading state: %s", err) + } + + ov, err := jsonstate.MarshalOutputs(stateFile.State.RootModule().OutputValues) + if err != nil { + return fmt.Errorf("Error reading output values: %s", err) + } + o, err := json.Marshal(ov) + if err != nil { + return fmt.Errorf("Error converting output values to json: %s", err) + } + + options := tfe.StateVersionCreateOptions{ + Lineage: tfe.String(stateFile.Lineage), + Serial: tfe.Int64(int64(stateFile.Serial)), + MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), + State: tfe.String(base64.StdEncoding.EncodeToString(state)), + Force: tfe.Bool(r.forcePush), + JSONStateOutputs: tfe.String(base64.StdEncoding.EncodeToString(o)), + } + + // If we have a run ID, make sure to add it to the options + // so the state will be properly associated with the run. + if r.runID != "" { + options.Run = &tfe.Run{ID: r.runID} + } + + // Create the new state. + _, err = r.client.StateVersions.Create(ctx, r.workspace.ID, options) + if err != nil { + r.stateUploadErr = true + return fmt.Errorf("Error uploading state: %v", err) + } + + return nil +} + +// Delete the remote state. +func (r *remoteClient) Delete() error { + err := r.client.Workspaces.Delete(context.Background(), r.organization, r.workspace.Name) + if err != nil && err != tfe.ErrResourceNotFound { + return fmt.Errorf("Error deleting workspace %s: %v", r.workspace.Name, err) + } + + return nil +} + +// EnableForcePush to allow the remote client to overwrite state +// by implementing remote.ClientForcePusher +func (r *remoteClient) EnableForcePush() { + r.forcePush = true +} + +// Lock the remote state. +func (r *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { + ctx := context.Background() + + lockErr := &statemgr.LockError{Info: r.lockInfo} + + // Lock the workspace. + _, err := r.client.Workspaces.Lock(ctx, r.workspace.ID, tfe.WorkspaceLockOptions{ + Reason: tfe.String("Locked by Terraform"), + }) + if err != nil { + if err == tfe.ErrWorkspaceLocked { + lockErr.Info = info + err = fmt.Errorf("%s (lock ID: \"%s/%s\")", err, r.organization, r.workspace.Name) + } + lockErr.Err = err + return "", lockErr + } + + r.lockInfo = info + + return r.lockInfo.ID, nil +} + +// Unlock the remote state. +func (r *remoteClient) Unlock(id string) error { + ctx := context.Background() + + // We first check if there was an error while uploading the latest + // state. If so, we will not unlock the workspace to prevent any + // changes from being applied until the correct state is uploaded. + if r.stateUploadErr { + return nil + } + + lockErr := &statemgr.LockError{Info: r.lockInfo} + + // With lock info this should be treated as a normal unlock. + if r.lockInfo != nil { + // Verify the expected lock ID. + if r.lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock ID does not match existing lock") + return lockErr + } + + // Unlock the workspace. + _, err := r.client.Workspaces.Unlock(ctx, r.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil + } + + // Verify the optional force-unlock lock ID. + if r.organization+"/"+r.workspace.Name != id { + lockErr.Err = fmt.Errorf( + "lock ID %q does not match existing lock ID \"%s/%s\"", + id, + r.organization, + r.workspace.Name, + ) + return lockErr + } + + // Force unlock the workspace. + _, err := r.client.Workspaces.ForceUnlock(ctx, r.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} diff --git a/internal/backend/remote/backend_state_test.go b/backend/remote/backend_state_test.go similarity index 83% rename from internal/backend/remote/backend_state_test.go rename to backend/remote/backend_state_test.go index 0503936b8e8f..bb338a28e7b6 100644 --- a/internal/backend/remote/backend_state_test.go +++ b/backend/remote/backend_state_test.go @@ -5,11 +5,11 @@ import ( "os" "testing" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/cloud" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statefile" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/cloud" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statefile" ) func TestRemoteClient_impl(t *testing.T) { diff --git a/backend/remote/backend_test.go b/backend/remote/backend_test.go new file mode 100644 index 000000000000..5c77c3a5ca7d --- /dev/null +++ b/backend/remote/backend_test.go @@ -0,0 +1,724 @@ +package remote + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" +) + +func TestRemote(t *testing.T) { + var _ backend.Enhanced = New(nil) + var _ backend.CLI = New(nil) +} + +func TestRemote_backendDefault(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + backend.TestBackendStates(t, b) + backend.TestBackendStateLocks(t, b, b) + backend.TestBackendStateForceUnlock(t, b, b) +} + +func TestRemote_backendNoDefault(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + backend.TestBackendStates(t, b) +} + +func TestRemote_config(t *testing.T) { + cases := map[string]struct { + config cty.Value + confErr string + valErr string + }{ + "with_a_nonexisting_organization": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("nonexisting"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "organization \"nonexisting\" at host app.terraform.io not found", + }, + "with_an_unknown_host": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("nonexisting.local"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "Failed to request discovery document", + }, + // localhost advertises TFE services, but has no token in the credentials + "without_a_token": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("localhost"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "terraform login localhost", + }, + "with_a_name": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + }, + "with_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.StringVal("my-app-"), + }), + }), + }, + "without_either_a_name_and_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.NullVal(cty.String), + }), + }), + valErr: `Either workspace "name" or "prefix" is required`, + }, + "with_both_a_name_and_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.StringVal("my-app-"), + }), + }), + valErr: `Only one of workspace "name" or "prefix" is allowed`, + }, + "null config": { + config: cty.NullVal(cty.EmptyObject), + }, + } + + for name, tc := range cases { + s := testServer(t) + b := New(testDisco(s)) + + // Validate + _, valDiags := b.PrepareConfig(tc.config) + if (valDiags.Err() != nil || tc.valErr != "") && + (valDiags.Err() == nil || !strings.Contains(valDiags.Err().Error(), tc.valErr)) { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + + // Configure + confDiags := b.Configure(tc.config) + if (confDiags.Err() != nil || tc.confErr != "") && + (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.confErr)) { + t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err()) + } + } +} + +func TestRemote_versionConstraints(t *testing.T) { + cases := map[string]struct { + config cty.Value + prerelease string + version string + result string + }{ + "compatible version": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + version: "0.11.1", + }, + "version too old": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + version: "0.0.1", + result: "upgrade Terraform to >= 0.1.0", + }, + "version too new": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + version: "10.0.1", + result: "downgrade Terraform to <= 10.0.0", + }, + } + + // Save and restore the actual version. + p := tfversion.Prerelease + v := tfversion.Version + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + }() + + for name, tc := range cases { + s := testServer(t) + b := New(testDisco(s)) + + // Set the version for this test. + tfversion.Prerelease = tc.prerelease + tfversion.Version = tc.version + + // Validate + _, valDiags := b.PrepareConfig(tc.config) + if valDiags.HasErrors() { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + + // Configure + confDiags := b.Configure(tc.config) + if (confDiags.Err() != nil || tc.result != "") && + (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.result)) { + t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err()) + } + } +} + +func TestRemote_localBackend(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + local, ok := b.local.(*backendLocal.Local) + if !ok { + t.Fatalf("expected b.local to be \"*local.Local\", got: %T", b.local) + } + + remote, ok := local.Backend.(*Remote) + if !ok { + t.Fatalf("expected local.Backend to be *remote.Remote, got: %T", remote) + } +} + +func TestRemote_addAndRemoveWorkspacesDefault(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + if _, err := b.Workspaces(); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } + + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if _, err := b.StateMgr("prod"); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } + + if err := b.DeleteWorkspace(backend.DefaultStateName, true); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if err := b.DeleteWorkspace("prod", true); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } +} + +func TestRemote_addAndRemoveWorkspacesNoDefault(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + states, err := b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces := []string(nil) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected states %#+v, got %#+v", expectedWorkspaces, states) + } + + if _, err := b.StateMgr(backend.DefaultStateName); err != backend.ErrDefaultWorkspaceNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err) + } + + expectedA := "test_A" + if _, err := b.StateMgr(expectedA); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = append(expectedWorkspaces, expectedA) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } + + expectedB := "test_B" + if _, err := b.StateMgr(expectedB); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = append(expectedWorkspaces, expectedB) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } + + if err := b.DeleteWorkspace(backend.DefaultStateName, true); err != backend.ErrDefaultWorkspaceNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err) + } + + if err := b.DeleteWorkspace(expectedA, true); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = []string{expectedB} + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v got %#+v", expectedWorkspaces, states) + } + + if err := b.DeleteWorkspace(expectedB, true); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = []string(nil) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } +} + +func TestRemote_checkConstraints(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + cases := map[string]struct { + constraints *disco.Constraints + prerelease string + version string + result string + }{ + "compatible version": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Maximum: "0.11.11", + }, + version: "0.11.1", + result: "", + }, + "version too old": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Maximum: "0.11.11", + }, + version: "0.10.1", + result: "upgrade Terraform to >= 0.11.0", + }, + "version too new": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Maximum: "0.11.11", + }, + version: "0.12.0", + result: "downgrade Terraform to <= 0.11.11", + }, + "version excluded - ordered": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Excluding: []string{"0.11.7", "0.11.8"}, + Maximum: "0.11.11", + }, + version: "0.11.7", + result: "upgrade Terraform to > 0.11.8", + }, + "version excluded - unordered": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Excluding: []string{"0.11.8", "0.11.6"}, + Maximum: "0.11.11", + }, + version: "0.11.6", + result: "upgrade Terraform to > 0.11.8", + }, + "list versions": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Maximum: "0.11.11", + }, + version: "0.10.1", + result: "versions >= 0.11.0, <= 0.11.11.", + }, + "list exclusion": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Excluding: []string{"0.11.6"}, + Maximum: "0.11.11", + }, + version: "0.11.6", + result: "excluding version 0.11.6.", + }, + "list exclusions": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Excluding: []string{"0.11.8", "0.11.6"}, + Maximum: "0.11.11", + }, + version: "0.11.6", + result: "excluding versions 0.11.6, 0.11.8.", + }, + } + + // Save and restore the actual version. + p := tfversion.Prerelease + v := tfversion.Version + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + }() + + for name, tc := range cases { + // Set the version for this test. + tfversion.Prerelease = tc.prerelease + tfversion.Version = tc.version + + // Check the constraints. + diags := b.checkConstraints(tc.constraints) + if (diags.Err() != nil || tc.result != "") && + (diags.Err() == nil || !strings.Contains(diags.Err().Error(), tc.result)) { + t.Fatalf("%s: unexpected constraints result: %v", name, diags.Err()) + } + } +} + +func TestRemote_StateMgr_versionCheck(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // Some fixed versions for testing with. This logic is a simple string + // comparison, so we don't need many test cases. + v0135 := version.Must(version.NewSemver("0.13.5")) + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the mock remote workspace Terraform version to match the local + // Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0140.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Now change the remote workspace to a different Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0135.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should fail + want := `Remote workspace Terraform version "0.13.5" does not match local Terraform version "0.14.0"` + if _, err := b.StateMgr(backend.DefaultStateName); err.Error() != want { + t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want) + } +} + +func TestRemote_StateMgr_versionCheckLatest(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the remote workspace to the pseudo-version "latest" + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("latest"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed despite not being a string match + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion(t *testing.T) { + testCases := []struct { + local string + remote string + executionMode string + wantErr bool + }{ + {"0.13.5", "0.13.5", "remote", false}, + {"0.14.0", "0.13.5", "remote", true}, + {"0.14.0", "0.13.5", "local", false}, + {"0.14.0", "0.14.1", "remote", false}, + {"0.14.0", "1.0.99", "remote", false}, + {"0.14.0", "1.1.0", "remote", false}, + {"0.14.0", "1.3.0", "remote", true}, + {"1.2.0", "1.2.99", "remote", false}, + {"1.2.0", "1.3.0", "remote", true}, + {"0.15.0", "latest", "remote", false}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + local := version.Must(version.NewSemver(tc.local)) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace Terraform version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + ExecutionMode: &tc.executionMode, + TerraformVersion: tfe.String(tc.remote), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if tc.wantErr { + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Terraform version mismatch") { + t.Fatalf("unexpected error: %s", got) + } + } else { + if len(diags) != 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + } + }) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // Attempting to check the version against a workspace which doesn't exist + // should result in no errors + diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace") + if len(diags) != 0 { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + // Use a special workspace ID to trigger a 500 error, which should result + // in a failed check + diags = b.VerifyWorkspaceTerraformVersion("network-error") + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") { + t.Fatalf("unexpected error: %s", got) + } + + // Update the mock remote workspace Terraform version to an invalid version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("1.0.cheetarah"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Invalid Terraform version") { + t.Fatalf("unexpected error: %s", got) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // If the ignore flag is set, the behaviour changes + b.IgnoreVersionConflict() + + // Different local & remote versions to cause an error + local := version.Must(version.NewSemver("0.14.0")) + remote := version.Must(version.NewSemver("0.13.5")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace Terraform version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(remote.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + + if got, want := diags[0].Severity(), tfdiags.Warning; got != want { + t.Errorf("wrong severity: got %#v, want %#v", got, want) + } + if got, want := diags[0].Description().Summary, "Terraform version mismatch"; got != want { + t.Errorf("wrong summary: got %s, want %s", got, want) + } + wantDetail := "The local Terraform version (0.14.0) does not match the configured version for remote workspace hashicorp/prod (0.13.5)." + if got := diags[0].Description().Detail; got != wantDetail { + t.Errorf("wrong summary: got %s, want %s", got, wantDetail) + } +} diff --git a/backend/remote/cli.go b/backend/remote/cli.go new file mode 100644 index 000000000000..9a4f24d081c9 --- /dev/null +++ b/backend/remote/cli.go @@ -0,0 +1,20 @@ +package remote + +import ( + "github.com/hashicorp/terraform/backend" +) + +// CLIInit implements backend.CLI +func (b *Remote) CLIInit(opts *backend.CLIOpts) error { + if cli, ok := b.local.(backend.CLI); ok { + if err := cli.CLIInit(opts); err != nil { + return err + } + } + + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ContextOpts = opts.ContextOpts + + return nil +} diff --git a/internal/backend/remote/colorize.go b/backend/remote/colorize.go similarity index 100% rename from internal/backend/remote/colorize.go rename to backend/remote/colorize.go diff --git a/backend/remote/remote_test.go b/backend/remote/remote_test.go new file mode 100644 index 000000000000..b5fbfcf5e70c --- /dev/null +++ b/backend/remote/remote_test.go @@ -0,0 +1,25 @@ +package remote + +import ( + "flag" + "os" + "testing" + "time" + + _ "github.com/hashicorp/terraform/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + + // Make sure TF_FORCE_LOCAL_BACKEND is unset + os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + // Reduce delays to make tests run faster + backoffMin = 1.0 + backoffMax = 1.0 + planConfigurationVersionsPollInterval = 1 * time.Millisecond + runPollInterval = 1 * time.Millisecond + + os.Exit(m.Run()) +} diff --git a/internal/backend/remote/testdata/apply-destroy/apply.log b/backend/remote/testdata/apply-destroy/apply.log similarity index 100% rename from internal/backend/remote/testdata/apply-destroy/apply.log rename to backend/remote/testdata/apply-destroy/apply.log diff --git a/internal/backend/remote/testdata/apply-destroy/main.tf b/backend/remote/testdata/apply-destroy/main.tf similarity index 100% rename from internal/backend/remote/testdata/apply-destroy/main.tf rename to backend/remote/testdata/apply-destroy/main.tf diff --git a/internal/backend/remote/testdata/apply-destroy/plan.log b/backend/remote/testdata/apply-destroy/plan.log similarity index 100% rename from internal/backend/remote/testdata/apply-destroy/plan.log rename to backend/remote/testdata/apply-destroy/plan.log diff --git a/internal/backend/remote/testdata/apply-no-changes/main.tf b/backend/remote/testdata/apply-no-changes/main.tf similarity index 100% rename from internal/backend/remote/testdata/apply-no-changes/main.tf rename to backend/remote/testdata/apply-no-changes/main.tf diff --git a/internal/backend/remote/testdata/apply-no-changes/plan.log b/backend/remote/testdata/apply-no-changes/plan.log similarity index 100% rename from internal/backend/remote/testdata/apply-no-changes/plan.log rename to backend/remote/testdata/apply-no-changes/plan.log diff --git a/internal/backend/remote/testdata/apply-no-changes/policy.log b/backend/remote/testdata/apply-no-changes/policy.log similarity index 100% rename from internal/backend/remote/testdata/apply-no-changes/policy.log rename to backend/remote/testdata/apply-no-changes/policy.log diff --git a/internal/backend/remote/testdata/apply-policy-hard-failed/main.tf b/backend/remote/testdata/apply-policy-hard-failed/main.tf similarity index 100% rename from internal/backend/remote/testdata/apply-policy-hard-failed/main.tf rename to backend/remote/testdata/apply-policy-hard-failed/main.tf diff --git a/internal/backend/remote/testdata/apply-policy-hard-failed/plan.log b/backend/remote/testdata/apply-policy-hard-failed/plan.log similarity index 100% rename from internal/backend/remote/testdata/apply-policy-hard-failed/plan.log rename to backend/remote/testdata/apply-policy-hard-failed/plan.log diff --git a/internal/backend/remote/testdata/apply-policy-hard-failed/policy.log b/backend/remote/testdata/apply-policy-hard-failed/policy.log similarity index 100% rename from internal/backend/remote/testdata/apply-policy-hard-failed/policy.log rename to backend/remote/testdata/apply-policy-hard-failed/policy.log diff --git a/internal/backend/remote/testdata/apply-policy-passed/apply.log b/backend/remote/testdata/apply-policy-passed/apply.log similarity index 100% rename from internal/backend/remote/testdata/apply-policy-passed/apply.log rename to backend/remote/testdata/apply-policy-passed/apply.log diff --git a/internal/backend/remote/testdata/apply-policy-passed/main.tf b/backend/remote/testdata/apply-policy-passed/main.tf similarity index 100% rename from internal/backend/remote/testdata/apply-policy-passed/main.tf rename to backend/remote/testdata/apply-policy-passed/main.tf diff --git a/internal/backend/remote/testdata/apply-policy-passed/plan.log b/backend/remote/testdata/apply-policy-passed/plan.log similarity index 100% rename from internal/backend/remote/testdata/apply-policy-passed/plan.log rename to backend/remote/testdata/apply-policy-passed/plan.log diff --git a/internal/backend/remote/testdata/apply-policy-passed/policy.log b/backend/remote/testdata/apply-policy-passed/policy.log similarity index 100% rename from internal/backend/remote/testdata/apply-policy-passed/policy.log rename to backend/remote/testdata/apply-policy-passed/policy.log diff --git a/internal/backend/remote/testdata/apply-policy-soft-failed/apply.log b/backend/remote/testdata/apply-policy-soft-failed/apply.log similarity index 100% rename from internal/backend/remote/testdata/apply-policy-soft-failed/apply.log rename to backend/remote/testdata/apply-policy-soft-failed/apply.log diff --git a/internal/backend/remote/testdata/apply-policy-soft-failed/main.tf b/backend/remote/testdata/apply-policy-soft-failed/main.tf similarity index 100% rename from internal/backend/remote/testdata/apply-policy-soft-failed/main.tf rename to backend/remote/testdata/apply-policy-soft-failed/main.tf diff --git a/internal/backend/remote/testdata/apply-policy-soft-failed/plan.log b/backend/remote/testdata/apply-policy-soft-failed/plan.log similarity index 100% rename from internal/backend/remote/testdata/apply-policy-soft-failed/plan.log rename to backend/remote/testdata/apply-policy-soft-failed/plan.log diff --git a/internal/backend/remote/testdata/apply-policy-soft-failed/policy.log b/backend/remote/testdata/apply-policy-soft-failed/policy.log similarity index 100% rename from internal/backend/remote/testdata/apply-policy-soft-failed/policy.log rename to backend/remote/testdata/apply-policy-soft-failed/policy.log diff --git a/internal/backend/remote/testdata/apply-variables/apply.log b/backend/remote/testdata/apply-variables/apply.log similarity index 100% rename from internal/backend/remote/testdata/apply-variables/apply.log rename to backend/remote/testdata/apply-variables/apply.log diff --git a/internal/backend/remote/testdata/apply-variables/main.tf b/backend/remote/testdata/apply-variables/main.tf similarity index 100% rename from internal/backend/remote/testdata/apply-variables/main.tf rename to backend/remote/testdata/apply-variables/main.tf diff --git a/internal/backend/remote/testdata/apply-variables/plan.log b/backend/remote/testdata/apply-variables/plan.log similarity index 100% rename from internal/backend/remote/testdata/apply-variables/plan.log rename to backend/remote/testdata/apply-variables/plan.log diff --git a/internal/backend/remote/testdata/apply-with-error/main.tf b/backend/remote/testdata/apply-with-error/main.tf similarity index 100% rename from internal/backend/remote/testdata/apply-with-error/main.tf rename to backend/remote/testdata/apply-with-error/main.tf diff --git a/internal/backend/remote/testdata/apply-with-error/plan.log b/backend/remote/testdata/apply-with-error/plan.log similarity index 100% rename from internal/backend/remote/testdata/apply-with-error/plan.log rename to backend/remote/testdata/apply-with-error/plan.log diff --git a/internal/backend/remote/testdata/apply/apply.log b/backend/remote/testdata/apply/apply.log similarity index 100% rename from internal/backend/remote/testdata/apply/apply.log rename to backend/remote/testdata/apply/apply.log diff --git a/internal/backend/remote/testdata/apply/main.tf b/backend/remote/testdata/apply/main.tf similarity index 100% rename from internal/backend/remote/testdata/apply/main.tf rename to backend/remote/testdata/apply/main.tf diff --git a/internal/backend/remote/testdata/apply/plan.log b/backend/remote/testdata/apply/plan.log similarity index 100% rename from internal/backend/remote/testdata/apply/plan.log rename to backend/remote/testdata/apply/plan.log diff --git a/internal/backend/remote/testdata/empty/.gitignore b/backend/remote/testdata/empty/.gitignore similarity index 100% rename from internal/backend/remote/testdata/empty/.gitignore rename to backend/remote/testdata/empty/.gitignore diff --git a/internal/backend/remote/testdata/plan-cost-estimation/ce.log b/backend/remote/testdata/plan-cost-estimation/ce.log similarity index 100% rename from internal/backend/remote/testdata/plan-cost-estimation/ce.log rename to backend/remote/testdata/plan-cost-estimation/ce.log diff --git a/internal/backend/remote/testdata/plan-cost-estimation/cost-estimate.log b/backend/remote/testdata/plan-cost-estimation/cost-estimate.log similarity index 100% rename from internal/backend/remote/testdata/plan-cost-estimation/cost-estimate.log rename to backend/remote/testdata/plan-cost-estimation/cost-estimate.log diff --git a/internal/backend/remote/testdata/plan-cost-estimation/main.tf b/backend/remote/testdata/plan-cost-estimation/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-cost-estimation/main.tf rename to backend/remote/testdata/plan-cost-estimation/main.tf diff --git a/internal/backend/remote/testdata/plan-cost-estimation/plan.log b/backend/remote/testdata/plan-cost-estimation/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-cost-estimation/plan.log rename to backend/remote/testdata/plan-cost-estimation/plan.log diff --git a/internal/backend/remote/testdata/plan-long-line/main.tf b/backend/remote/testdata/plan-long-line/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-long-line/main.tf rename to backend/remote/testdata/plan-long-line/main.tf diff --git a/internal/backend/remote/testdata/plan-long-line/plan.log b/backend/remote/testdata/plan-long-line/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-long-line/plan.log rename to backend/remote/testdata/plan-long-line/plan.log diff --git a/internal/backend/remote/testdata/plan-no-changes/main.tf b/backend/remote/testdata/plan-no-changes/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-no-changes/main.tf rename to backend/remote/testdata/plan-no-changes/main.tf diff --git a/internal/backend/remote/testdata/plan-no-changes/plan.log b/backend/remote/testdata/plan-no-changes/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-no-changes/plan.log rename to backend/remote/testdata/plan-no-changes/plan.log diff --git a/internal/backend/remote/testdata/plan-no-changes/policy.log b/backend/remote/testdata/plan-no-changes/policy.log similarity index 100% rename from internal/backend/remote/testdata/plan-no-changes/policy.log rename to backend/remote/testdata/plan-no-changes/policy.log diff --git a/internal/backend/remote/testdata/plan-policy-hard-failed/main.tf b/backend/remote/testdata/plan-policy-hard-failed/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-policy-hard-failed/main.tf rename to backend/remote/testdata/plan-policy-hard-failed/main.tf diff --git a/internal/backend/remote/testdata/plan-policy-hard-failed/plan.log b/backend/remote/testdata/plan-policy-hard-failed/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-policy-hard-failed/plan.log rename to backend/remote/testdata/plan-policy-hard-failed/plan.log diff --git a/internal/backend/remote/testdata/plan-policy-hard-failed/policy.log b/backend/remote/testdata/plan-policy-hard-failed/policy.log similarity index 100% rename from internal/backend/remote/testdata/plan-policy-hard-failed/policy.log rename to backend/remote/testdata/plan-policy-hard-failed/policy.log diff --git a/internal/backend/remote/testdata/plan-policy-passed/main.tf b/backend/remote/testdata/plan-policy-passed/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-policy-passed/main.tf rename to backend/remote/testdata/plan-policy-passed/main.tf diff --git a/internal/backend/remote/testdata/plan-policy-passed/plan.log b/backend/remote/testdata/plan-policy-passed/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-policy-passed/plan.log rename to backend/remote/testdata/plan-policy-passed/plan.log diff --git a/internal/backend/remote/testdata/plan-policy-passed/policy.log b/backend/remote/testdata/plan-policy-passed/policy.log similarity index 100% rename from internal/backend/remote/testdata/plan-policy-passed/policy.log rename to backend/remote/testdata/plan-policy-passed/policy.log diff --git a/internal/backend/remote/testdata/plan-policy-soft-failed/main.tf b/backend/remote/testdata/plan-policy-soft-failed/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-policy-soft-failed/main.tf rename to backend/remote/testdata/plan-policy-soft-failed/main.tf diff --git a/internal/backend/remote/testdata/plan-policy-soft-failed/plan.log b/backend/remote/testdata/plan-policy-soft-failed/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-policy-soft-failed/plan.log rename to backend/remote/testdata/plan-policy-soft-failed/plan.log diff --git a/internal/backend/remote/testdata/plan-policy-soft-failed/policy.log b/backend/remote/testdata/plan-policy-soft-failed/policy.log similarity index 100% rename from internal/backend/remote/testdata/plan-policy-soft-failed/policy.log rename to backend/remote/testdata/plan-policy-soft-failed/policy.log diff --git a/internal/backend/remote/testdata/plan-variables/main.tf b/backend/remote/testdata/plan-variables/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-variables/main.tf rename to backend/remote/testdata/plan-variables/main.tf diff --git a/internal/backend/remote/testdata/plan-variables/plan.log b/backend/remote/testdata/plan-variables/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-variables/plan.log rename to backend/remote/testdata/plan-variables/plan.log diff --git a/internal/backend/remote/testdata/plan-with-error/main.tf b/backend/remote/testdata/plan-with-error/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-with-error/main.tf rename to backend/remote/testdata/plan-with-error/main.tf diff --git a/internal/backend/remote/testdata/plan-with-error/plan.log b/backend/remote/testdata/plan-with-error/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-with-error/plan.log rename to backend/remote/testdata/plan-with-error/plan.log diff --git a/internal/backend/remote/testdata/plan-with-working-directory/terraform/main.tf b/backend/remote/testdata/plan-with-working-directory/terraform/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan-with-working-directory/terraform/main.tf rename to backend/remote/testdata/plan-with-working-directory/terraform/main.tf diff --git a/internal/backend/remote/testdata/plan-with-working-directory/terraform/plan.log b/backend/remote/testdata/plan-with-working-directory/terraform/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan-with-working-directory/terraform/plan.log rename to backend/remote/testdata/plan-with-working-directory/terraform/plan.log diff --git a/internal/backend/remote/testdata/plan/main.tf b/backend/remote/testdata/plan/main.tf similarity index 100% rename from internal/backend/remote/testdata/plan/main.tf rename to backend/remote/testdata/plan/main.tf diff --git a/internal/backend/remote/testdata/plan/plan.log b/backend/remote/testdata/plan/plan.log similarity index 100% rename from internal/backend/remote/testdata/plan/plan.log rename to backend/remote/testdata/plan/plan.log diff --git a/internal/backend/remote/testdata/variables/main.tf b/backend/remote/testdata/variables/main.tf similarity index 100% rename from internal/backend/remote/testdata/variables/main.tf rename to backend/remote/testdata/variables/main.tf diff --git a/backend/remote/testing.go b/backend/remote/testing.go new file mode 100644 index 000000000000..074c3cd8cacb --- /dev/null +++ b/backend/remote/testing.go @@ -0,0 +1,321 @@ +package remote + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "path" + "testing" + "time" + + tfe "github.com/hashicorp/go-tfe" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/auth" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/cloud" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/terraform/version" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" +) + +const ( + testCred = "test-auth-token" +) + +var ( + tfeHost = svchost.Hostname(defaultHostname) + credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + tfeHost: {"token": testCred}, + }) +) + +// mockInput is a mock implementation of terraform.UIInput. +type mockInput struct { + answers map[string]string +} + +func (m *mockInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { + v, ok := m.answers[opts.Id] + if !ok { + return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) + } + if v == "wait-for-external-update" { + select { + case <-ctx.Done(): + case <-time.After(time.Minute): + } + } + delete(m.answers, opts.Id) + return v, nil +} + +func testInput(t *testing.T, answers map[string]string) *mockInput { + return &mockInput{answers: answers} +} + +func testBackendDefault(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }) + return testBackend(t, obj) +} + +func testBackendNoDefault(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.StringVal("my-app-"), + }), + }) + return testBackend(t, obj) +} + +func testBackendNoOperations(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("no-operations"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }) + return testBackend(t, obj) +} + +func testRemoteClient(t *testing.T) remote.Client { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + raw, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("error: %v", err) + } + + return raw.(*remote.State).Client +} + +func testBackend(t *testing.T, obj cty.Value) (*Remote, func()) { + s := testServer(t) + b := New(testDisco(s)) + + // Configure the backend so the client is created. + newObj, valDiags := b.PrepareConfig(obj) + if len(valDiags) != 0 { + t.Fatal(valDiags.ErrWithWarnings()) + } + obj = newObj + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + t.Fatal(confDiags.ErrWithWarnings()) + } + + // Get a new mock client. + mc := cloud.NewMockClient() + + // Replace the services we use with our mock services. + b.CLI = cli.NewMockUi() + b.client.Applies = mc.Applies + b.client.ConfigurationVersions = mc.ConfigurationVersions + b.client.CostEstimates = mc.CostEstimates + b.client.Organizations = mc.Organizations + b.client.Plans = mc.Plans + b.client.PolicyChecks = mc.PolicyChecks + b.client.Runs = mc.Runs + b.client.StateVersions = mc.StateVersions + b.client.Variables = mc.Variables + b.client.Workspaces = mc.Workspaces + + // Set local to a local test backend. + b.local = testLocalBackend(t, b) + + ctx := context.Background() + + // Create the organization. + _, err := b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ + Name: tfe.String(b.organization), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + + // Create the default workspace if required. + if b.workspace != "" { + _, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.workspace), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + } + + return b, s.Close +} + +func testLocalBackend(t *testing.T, remote *Remote) backend.Enhanced { + b := backendLocal.NewWithBackend(remote) + + // Add a test provider to the local backend. + p := backendLocal.TestLocalProvider(t, b, "null", &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "null_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + return b +} + +// testServer returns a *httptest.Server used for local testing. +func testServer(t *testing.T) *httptest.Server { + mux := http.NewServeMux() + + // Respond to service discovery calls. + mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{ + "state.v2": "/api/v2/", + "tfe.v2.1": "/api/v2/", + "versions.v1": "/v1/versions/" +}`) + }) + + // Respond to service version constraints calls. + mux.HandleFunc("/v1/versions/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, fmt.Sprintf(`{ + "service": "%s", + "product": "terraform", + "minimum": "0.1.0", + "maximum": "10.0.0" +}`, path.Base(r.URL.Path))) + }) + + // Respond to pings to get the API version header. + mux.HandleFunc("/api/v2/ping", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.4") + }) + + // Respond to the initial query to read the hashicorp org entitlements. + mux.HandleFunc("/api/v2/organizations/hashicorp/entitlement-set", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-GExadygjSbKP8hsY", + "type": "entitlement-sets", + "attributes": { + "operations": true, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }) + + // Respond to the initial query to read the no-operations org entitlements. + mux.HandleFunc("/api/v2/organizations/no-operations/entitlement-set", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-ufxa3y8jSbKP8hsT", + "type": "entitlement-sets", + "attributes": { + "operations": false, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }) + + // All tests that are assumed to pass will use the hashicorp organization, + // so for all other organization requests we will return a 404. + mux.HandleFunc("/api/v2/organizations/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + io.WriteString(w, `{ + "errors": [ + { + "status": "404", + "title": "not found" + } + ] +}`) + }) + + return httptest.NewServer(mux) +} + +// testDisco returns a *disco.Disco mapping app.terraform.io and +// localhost to a local test server. +func testDisco(s *httptest.Server) *disco.Disco { + services := map[string]interface{}{ + "state.v2": fmt.Sprintf("%s/api/v2/", s.URL), + "tfe.v2.1": fmt.Sprintf("%s/api/v2/", s.URL), + "versions.v1": fmt.Sprintf("%s/v1/versions/", s.URL), + } + d := disco.NewWithCredentialsSource(credsSrc) + d.SetUserAgent(httpclient.TerraformUserAgent(version.String())) + + d.ForceHostServices(svchost.Hostname(defaultHostname), services) + d.ForceHostServices(svchost.Hostname("localhost"), services) + return d +} + +type unparsedVariableValue struct { + value string + source terraform.ValueSourceType +} + +func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + return &terraform.InputValue{ + Value: cty.StringVal(v.value), + SourceType: v.source, + }, tfdiags.Diagnostics{} +} + +// testVariable returns a backend.UnparsedVariableValue used for testing. +func testVariables(s terraform.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue { + vars := make(map[string]backend.UnparsedVariableValue, len(vs)) + for _, v := range vs { + vars[v] = &unparsedVariableValue{ + value: v, + source: s, + } + } + return vars +} diff --git a/backend/testing.go b/backend/testing.go new file mode 100644 index 000000000000..fa7034b3edda --- /dev/null +++ b/backend/testing.go @@ -0,0 +1,425 @@ +package backend + +import ( + "reflect" + "sort" + "testing" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" +) + +// TestBackendConfig validates and configures the backend with the +// given configuration. +func TestBackendConfig(t *testing.T, b Backend, c hcl.Body) Backend { + t.Helper() + + t.Logf("TestBackendConfig on %T with %#v", b, c) + + var diags tfdiags.Diagnostics + + // To make things easier for test authors, we'll allow a nil body here + // (even though that's not normally valid) and just treat it as an empty + // body. + if c == nil { + c = hcl.EmptyBody() + } + + schema := b.ConfigSchema() + spec := schema.DecoderSpec() + obj, decDiags := hcldec.Decode(c, spec, nil) + diags = diags.Append(decDiags) + + newObj, valDiags := b.PrepareConfig(obj) + diags = diags.Append(valDiags.InConfigBody(c, "")) + + // it's valid for a Backend to have warnings (e.g. a Deprecation) as such we should only raise on errors + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + obj = newObj + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + confDiags = confDiags.InConfigBody(c, "") + t.Fatal(confDiags.ErrWithWarnings()) + } + + return b +} + +// TestWrapConfig takes a raw data structure and converts it into a +// synthetic hcl.Body to use for testing. +// +// The given structure should only include values that can be accepted by +// hcl2shim.HCL2ValueFromConfigValue. If incompatible values are given, +// this function will panic. +func TestWrapConfig(raw map[string]interface{}) hcl.Body { + obj := hcl2shim.HCL2ValueFromConfigValue(raw) + return configs.SynthBody("", obj.AsValueMap()) +} + +// TestBackend will test the functionality of a Backend. The backend is +// assumed to already be configured. This will test state functionality. +// If the backend reports it doesn't support multi-state by returning the +// error ErrWorkspacesNotSupported, then it will not test that. +func TestBackendStates(t *testing.T, b Backend) { + t.Helper() + + noDefault := false + if _, err := b.StateMgr(DefaultStateName); err != nil { + if err == ErrDefaultWorkspaceNotSupported { + noDefault = true + } else { + t.Fatalf("error: %v", err) + } + } + + workspaces, err := b.Workspaces() + if err != nil { + if err == ErrWorkspacesNotSupported { + t.Logf("TestBackend: workspaces not supported in %T, skipping", b) + return + } + t.Fatalf("error: %v", err) + } + + // Test it starts with only the default + if !noDefault && (len(workspaces) != 1 || workspaces[0] != DefaultStateName) { + t.Fatalf("should only have the default workspace to start: %#v", workspaces) + } + + // Create a couple states + foo, err := b.StateMgr("foo") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := foo.State(); v.HasManagedResourceInstanceObjects() { + t.Fatalf("should be empty: %s", v) + } + + bar, err := b.StateMgr("bar") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := bar.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := bar.State(); v.HasManagedResourceInstanceObjects() { + t.Fatalf("should be empty: %s", v) + } + + // Verify they are distinct states that can be read back from storage + { + // We'll use two distinct states here and verify that changing one + // does not also change the other. + fooState := states.NewState() + barState := states.NewState() + + // write a known state to foo + if err := foo.WriteState(fooState); err != nil { + t.Fatal("error writing foo state:", err) + } + if err := foo.PersistState(nil); err != nil { + t.Fatal("error persisting foo state:", err) + } + + // We'll make "bar" different by adding a fake resource state to it. + barState.SyncWrapper().SetResourceInstanceCurrent( + addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }, + }.Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte("{}"), + Status: states.ObjectReady, + SchemaVersion: 0, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + // write a distinct known state to bar + if err := bar.WriteState(barState); err != nil { + t.Fatalf("bad: %s", err) + } + if err := bar.PersistState(nil); err != nil { + t.Fatalf("bad: %s", err) + } + + // verify that foo is unchanged with the existing state manager + if err := foo.RefreshState(); err != nil { + t.Fatal("error refreshing foo:", err) + } + fooState = foo.State() + if fooState.HasManagedResourceInstanceObjects() { + t.Fatal("after writing a resource to bar, foo now has resources too") + } + + // fetch foo again from the backend + foo, err = b.StateMgr("foo") + if err != nil { + t.Fatal("error re-fetching state:", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatal("error refreshing foo:", err) + } + fooState = foo.State() + if fooState.HasManagedResourceInstanceObjects() { + t.Fatal("after writing a resource to bar and re-reading foo, foo now has resources too") + } + + // fetch the bar again from the backend + bar, err = b.StateMgr("bar") + if err != nil { + t.Fatal("error re-fetching state:", err) + } + if err := bar.RefreshState(); err != nil { + t.Fatal("error refreshing bar:", err) + } + barState = bar.State() + if !barState.HasManagedResourceInstanceObjects() { + t.Fatal("after writing a resource instance object to bar and re-reading it, the object has vanished") + } + } + + // Verify we can now list them + { + // we determined that named stated are supported earlier + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"bar", "default", "foo"} + if noDefault { + expected = []string{"bar", "foo"} + } + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) + } + } + + // Delete some workspaces + if err := b.DeleteWorkspace("foo", true); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the default state can't be deleted + if err := b.DeleteWorkspace(DefaultStateName, true); err == nil { + t.Fatal("expected error") + } + + // Create and delete the foo workspace again. + // Make sure that there are no leftover artifacts from a deleted state + // preventing re-creation. + foo, err = b.StateMgr("foo") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := foo.State(); v.HasManagedResourceInstanceObjects() { + t.Fatalf("should be empty: %s", v) + } + // and delete it again + if err := b.DeleteWorkspace("foo", true); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify deletion + { + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"bar", "default"} + if noDefault { + expected = []string{"bar"} + } + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) + } + } +} + +// TestBackendStateLocks will test the locking functionality of the remote +// state backend. +func TestBackendStateLocks(t *testing.T, b1, b2 Backend) { + t.Helper() + testLocks(t, b1, b2, false) +} + +// TestBackendStateForceUnlock verifies that the lock error is the expected +// type, and the lock can be unlocked using the ID reported in the error. +// Remote state backends that support -force-unlock should call this in at +// least one of the acceptance tests. +func TestBackendStateForceUnlock(t *testing.T, b1, b2 Backend) { + t.Helper() + testLocks(t, b1, b2, true) +} + +// TestBackendStateLocksInWS will test the locking functionality of the remote +// state backend. +func TestBackendStateLocksInWS(t *testing.T, b1, b2 Backend, ws string) { + t.Helper() + testLocksInWorkspace(t, b1, b2, false, ws) +} + +// TestBackendStateForceUnlockInWS verifies that the lock error is the expected +// type, and the lock can be unlocked using the ID reported in the error. +// Remote state backends that support -force-unlock should call this in at +// least one of the acceptance tests. +func TestBackendStateForceUnlockInWS(t *testing.T, b1, b2 Backend, ws string) { + t.Helper() + testLocksInWorkspace(t, b1, b2, true, ws) +} + +func testLocks(t *testing.T, b1, b2 Backend, testForceUnlock bool) { + testLocksInWorkspace(t, b1, b2, testForceUnlock, DefaultStateName) +} + +func testLocksInWorkspace(t *testing.T, b1, b2 Backend, testForceUnlock bool, workspace string) { + t.Helper() + + // Get the default state for each + b1StateMgr, err := b1.StateMgr(DefaultStateName) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b1StateMgr.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // Fast exit if this doesn't support locking at all + if _, ok := b1StateMgr.(statemgr.Locker); !ok { + t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1) + return + } + + t.Logf("TestBackend: testing state locking for %T", b1) + + b2StateMgr, err := b2.StateMgr(DefaultStateName) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b2StateMgr.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // Reassign so its obvious whats happening + lockerA := b1StateMgr.(statemgr.Locker) + lockerB := b2StateMgr.(statemgr.Locker) + + infoA := statemgr.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := statemgr.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // Make sure we can still get the statemgr.Full from another instance even + // when locked. This should only happen when a state is loaded via the + // backend, and as a remote state. + _, err = b2.StateMgr(DefaultStateName) + if err != nil { + t.Errorf("failed to read locked state from another backend instance: %s", err) + } + + // If the lock ID is blank, assume locking is disabled + if lockIDA == "" { + t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1) + return + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Errorf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } + + // test the equivalent of -force-unlock, by using the id from the error + // output. + if !testForceUnlock { + return + } + + // get a new ID + infoA.ID, err = uuid.GenerateUUID() + if err != nil { + panic(err) + } + + lockIDA, err = lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get re lock A:", err) + } + unlock := func() { + err := lockerA.Unlock(lockIDA) + if err != nil { + t.Fatal(err) + } + } + + _, err = lockerB.Lock(infoB) + if err == nil { + unlock() + t.Fatal("client B obtained lock while held by client A") + } + + infoErr, ok := err.(*statemgr.LockError) + if !ok { + unlock() + t.Fatalf("expected type *statemgr.LockError, got : %#v", err) + } + + // try to unlock with the second unlocker, using the ID from the error + if err := lockerB.Unlock(infoErr.Info.ID); err != nil { + unlock() + t.Fatalf("could not unlock with the reported ID %q: %s", infoErr.Info.ID, err) + } +} diff --git a/internal/backend/unparsed_value.go b/backend/unparsed_value.go similarity index 98% rename from internal/backend/unparsed_value.go rename to backend/unparsed_value.go index e7eadea9a1f8..bb39c5bdf5e6 100644 --- a/internal/backend/unparsed_value.go +++ b/backend/unparsed_value.go @@ -4,9 +4,9 @@ import ( "fmt" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/backend/unparsed_value_test.go b/backend/unparsed_value_test.go similarity index 98% rename from internal/backend/unparsed_value_test.go rename to backend/unparsed_value_test.go index 8807d243d782..9180dabe18a2 100644 --- a/internal/backend/unparsed_value_test.go +++ b/backend/unparsed_value_test.go @@ -8,9 +8,9 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) func TestUnparsedValue(t *testing.T) { diff --git a/internal/builtin/providers/README b/builtin/providers/README similarity index 100% rename from internal/builtin/providers/README rename to builtin/providers/README diff --git a/internal/builtin/providers/terraform/data_source_state.go b/builtin/providers/terraform/data_source_state.go similarity index 95% rename from internal/builtin/providers/terraform/data_source_state.go rename to builtin/providers/terraform/data_source_state.go index f69a835343cc..695c9b10218a 100644 --- a/internal/builtin/providers/terraform/data_source_state.go +++ b/builtin/providers/terraform/data_source_state.go @@ -4,14 +4,14 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/backend/remote" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/backend/remote" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" - backendInit "github.com/hashicorp/terraform/internal/backend/init" + backendInit "github.com/hashicorp/terraform/backend/init" ) func dataSourceRemoteStateGetSchema() providers.Schema { diff --git a/internal/builtin/providers/terraform/data_source_state_test.go b/builtin/providers/terraform/data_source_state_test.go similarity index 97% rename from internal/builtin/providers/terraform/data_source_state_test.go rename to builtin/providers/terraform/data_source_state_test.go index cf0e3c2a6b0c..d438285a10bd 100644 --- a/internal/builtin/providers/terraform/data_source_state_test.go +++ b/builtin/providers/terraform/data_source_state_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/apparentlymart/go-dump/dump" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/builtin/providers/terraform/provider.go b/builtin/providers/terraform/provider.go new file mode 100644 index 000000000000..3be80acd462b --- /dev/null +++ b/builtin/providers/terraform/provider.go @@ -0,0 +1,138 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/providers" +) + +// Provider is an implementation of providers.Interface +type Provider struct{} + +// NewProvider returns a new terraform provider +func NewProvider() providers.Interface { + return &Provider{} +} + +// GetSchema returns the complete schema for the provider. +func (p *Provider) GetProviderSchema() providers.GetProviderSchemaResponse { + return providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "terraform_remote_state": dataSourceRemoteStateGetSchema(), + }, + ResourceTypes: map[string]providers.Schema{ + "terraform_data": dataStoreResourceSchema(), + }, + } +} + +// ValidateProviderConfig is used to validate the configuration values. +func (p *Provider) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { + // At this moment there is nothing to configure for the terraform provider, + // so we will happily return without taking any action + var res providers.ValidateProviderConfigResponse + res.PreparedConfig = req.Config + return res +} + +// ValidateDataResourceConfig is used to validate the data source configuration values. +func (p *Provider) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { + // FIXME: move the backend configuration validate call that's currently + // inside the read method into here so that we can catch provider configuration + // errors in terraform validate as well as during terraform plan. + var res providers.ValidateDataResourceConfigResponse + + // This should not happen + if req.TypeName != "terraform_remote_state" { + res.Diagnostics.Append(fmt.Errorf("Error: unsupported data source %s", req.TypeName)) + return res + } + + diags := dataSourceRemoteStateValidate(req.Config) + res.Diagnostics = diags + + return res +} + +// Configure configures and initializes the provider. +func (p *Provider) ConfigureProvider(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + // At this moment there is nothing to configure for the terraform provider, + // so we will happily return without taking any action + var res providers.ConfigureProviderResponse + return res +} + +// ReadDataSource returns the data source's current state. +func (p *Provider) ReadDataSource(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + // call function + var res providers.ReadDataSourceResponse + + // This should not happen + if req.TypeName != "terraform_remote_state" { + res.Diagnostics.Append(fmt.Errorf("Error: unsupported data source %s", req.TypeName)) + return res + } + + newState, diags := dataSourceRemoteStateRead(req.Config) + + res.State = newState + res.Diagnostics = diags + + return res +} + +// Stop is called when the provider should halt any in-flight actions. +func (p *Provider) Stop() error { + log.Println("[DEBUG] terraform provider cannot Stop") + return nil +} + +// All the Resource-specific functions are below. +// The terraform provider supplies a single data source, `terraform_remote_state` +// and no resources. + +// UpgradeResourceState is called when the state loader encounters an +// instance state whose schema version is less than the one reported by the +// currently-used version of the corresponding provider, and the upgraded +// result is used for any further processing. +func (p *Provider) UpgradeResourceState(req providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + return upgradeDataStoreResourceState(req) +} + +// ReadResource refreshes a resource and returns its current state. +func (p *Provider) ReadResource(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return readDataStoreResourceState(req) +} + +// PlanResourceChange takes the current state and proposed state of a +// resource, and returns the planned final state. +func (p *Provider) PlanResourceChange(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return planDataStoreResourceChange(req) +} + +// ApplyResourceChange takes the planned state for a resource, which may +// yet contain unknown computed values, and applies the changes returning +// the final state. +func (p *Provider) ApplyResourceChange(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return applyDataStoreResourceChange(req) +} + +// ImportResourceState requests that the given resource be imported. +func (p *Provider) ImportResourceState(req providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + if req.TypeName == "terraform_data" { + return importDataStore(req) + } + + panic("unimplemented - terraform_remote_state has no resources") +} + +// ValidateResourceConfig is used to to validate the resource configuration values. +func (p *Provider) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + return validateDataStoreResourceConfig(req) +} + +// Close is a noop for this provider, since it's run in-process. +func (p *Provider) Close() error { + return nil +} diff --git a/builtin/providers/terraform/provider_test.go b/builtin/providers/terraform/provider_test.go new file mode 100644 index 000000000000..fecf720d2f2f --- /dev/null +++ b/builtin/providers/terraform/provider_test.go @@ -0,0 +1,10 @@ +package terraform + +import ( + backendInit "github.com/hashicorp/terraform/backend/init" +) + +func init() { + // Initialize the backends + backendInit.Init(nil) +} diff --git a/builtin/providers/terraform/resource_data.go b/builtin/providers/terraform/resource_data.go new file mode 100644 index 000000000000..fe6c6ce97fb5 --- /dev/null +++ b/builtin/providers/terraform/resource_data.go @@ -0,0 +1,169 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func dataStoreResourceSchema() providers.Schema { + return providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "input": {Type: cty.DynamicPseudoType, Optional: true}, + "output": {Type: cty.DynamicPseudoType, Computed: true}, + "triggers_replace": {Type: cty.DynamicPseudoType, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + } +} + +func validateDataStoreResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + if req.Config.IsNull() { + return resp + } + + // Core does not currently validate computed values are not set in the + // configuration. + for _, attr := range []string{"id", "output"} { + if !req.Config.GetAttr(attr).IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf(`%q attribute is read-only`, attr)) + } + } + return resp +} + +func upgradeDataStoreResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + ty := dataStoreResourceSchema().Block.ImpliedType() + val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.UpgradedState = val + return resp +} + +func readDataStoreResourceState(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + resp.NewState = req.PriorState + return resp +} + +func planDataStoreResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + if req.ProposedNewState.IsNull() { + // destroy op + resp.PlannedState = req.ProposedNewState + return resp + } + + planned := req.ProposedNewState.AsValueMap() + + input := req.ProposedNewState.GetAttr("input") + trigger := req.ProposedNewState.GetAttr("triggers_replace") + + switch { + case req.PriorState.IsNull(): + // Create + // Set the id value to unknown. + planned["id"] = cty.UnknownVal(cty.String) + + // Output type must always match the input, even when it's null. + if input.IsNull() { + planned["output"] = input + } else { + planned["output"] = cty.UnknownVal(input.Type()) + } + + resp.PlannedState = cty.ObjectVal(planned) + return resp + + case !req.PriorState.GetAttr("triggers_replace").RawEquals(trigger): + // trigger changed, so we need to replace the entire instance + resp.RequiresReplace = append(resp.RequiresReplace, cty.GetAttrPath("triggers_replace")) + planned["id"] = cty.UnknownVal(cty.String) + + // We need to check the input for the replacement instance to compute a + // new output. + if input.IsNull() { + planned["output"] = input + } else { + planned["output"] = cty.UnknownVal(input.Type()) + } + + case !req.PriorState.GetAttr("input").RawEquals(input): + // only input changed, so we only need to re-compute output + planned["output"] = cty.UnknownVal(input.Type()) + } + + resp.PlannedState = cty.ObjectVal(planned) + return resp +} + +var testUUIDHook func() string + +func applyDataStoreResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.PlannedState.IsNull() { + resp.NewState = req.PlannedState + return resp + } + + newState := req.PlannedState.AsValueMap() + + if !req.PlannedState.GetAttr("output").IsKnown() { + newState["output"] = req.PlannedState.GetAttr("input") + } + + if !req.PlannedState.GetAttr("id").IsKnown() { + idString, err := uuid.GenerateUUID() + // Terraform would probably never get this far without a good random + // source, but catch the error anyway. + if err != nil { + diag := tfdiags.AttributeValue( + tfdiags.Error, + "Error generating id", + err.Error(), + cty.GetAttrPath("id"), + ) + + resp.Diagnostics = resp.Diagnostics.Append(diag) + } + + if testUUIDHook != nil { + idString = testUUIDHook() + } + + newState["id"] = cty.StringVal(idString) + } + + resp.NewState = cty.ObjectVal(newState) + + return resp +} + +// TODO: This isn't very useful even for examples, because terraform_data has +// no way to refresh the full resource value from only the import ID. This +// minimal implementation allows the import to succeed, and can be extended +// once the configuration is available during import. +func importDataStore(req providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + schema := dataStoreResourceSchema() + v := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal(req.ID), + }) + state, err := schema.Block.CoerceValue(v) + resp.Diagnostics = resp.Diagnostics.Append(err) + + resp.ImportedResources = []providers.ImportedResource{ + { + TypeName: req.TypeName, + State: state, + }, + } + return resp +} diff --git a/builtin/providers/terraform/resource_data_test.go b/builtin/providers/terraform/resource_data_test.go new file mode 100644 index 000000000000..d1b8930be7b9 --- /dev/null +++ b/builtin/providers/terraform/resource_data_test.go @@ -0,0 +1,382 @@ +package terraform + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/providers" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func TestManagedDataValidate(t *testing.T) { + cfg := map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.NullVal(cty.String), + } + + // empty + req := providers.ValidateResourceConfigRequest{ + TypeName: "terraform_data", + Config: cty.ObjectVal(cfg), + } + + resp := validateDataStoreResourceConfig(req) + if resp.Diagnostics.HasErrors() { + t.Error("empty config error:", resp.Diagnostics.ErrWithWarnings()) + } + + // invalid computed values + cfg["output"] = cty.StringVal("oops") + req.Config = cty.ObjectVal(cfg) + + resp = validateDataStoreResourceConfig(req) + if !resp.Diagnostics.HasErrors() { + t.Error("expected error") + } + + msg := resp.Diagnostics.Err().Error() + if !strings.Contains(msg, "attribute is read-only") { + t.Error("unexpected error", msg) + } +} + +func TestManagedDataUpgradeState(t *testing.T) { + schema := dataStoreResourceSchema() + ty := schema.Block.ImpliedType() + + state := cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.ListVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), + }), + "id": cty.StringVal("not-quite-unique"), + }) + + jsState, err := ctyjson.Marshal(state, ty) + if err != nil { + t.Fatal(err) + } + + // empty + req := providers.UpgradeResourceStateRequest{ + TypeName: "terraform_data", + RawStateJSON: jsState, + } + + resp := upgradeDataStoreResourceState(req) + if resp.Diagnostics.HasErrors() { + t.Error("upgrade state error:", resp.Diagnostics.ErrWithWarnings()) + } + + if !resp.UpgradedState.RawEquals(state) { + t.Errorf("prior state was:\n%#v\nupgraded state is:\n%#v\n", state, resp.UpgradedState) + } +} + +func TestManagedDataRead(t *testing.T) { + req := providers.ReadResourceRequest{ + TypeName: "terraform_data", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.ListVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + } + + resp := readDataStoreResourceState(req) + if resp.Diagnostics.HasErrors() { + t.Fatal("unexpected error", resp.Diagnostics.ErrWithWarnings()) + } + + if !resp.NewState.RawEquals(req.PriorState) { + t.Errorf("prior state was:\n%#v\nnew state is:\n%#v\n", req.PriorState, resp.NewState) + } +} + +func TestManagedDataPlan(t *testing.T) { + schema := dataStoreResourceSchema().Block + ty := schema.ImpliedType() + + for name, tc := range map[string]struct { + prior cty.Value + proposed cty.Value + planned cty.Value + }{ + "create": { + prior: cty.NullVal(ty), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.NullVal(cty.String), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String), + }), + }, + + "create-typed-null-input": { + prior: cty.NullVal(ty), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.String), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.NullVal(cty.String), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.String), + "output": cty.NullVal(cty.String), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String), + }), + }, + + "create-output": { + prior: cty.NullVal(ty), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.NullVal(cty.String), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.UnknownVal(cty.String), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String), + }), + }, + + "update-input": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.UnknownVal(cty.List(cty.String)), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.UnknownVal(cty.List(cty.String)), + "output": cty.UnknownVal(cty.List(cty.String)), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "update-trigger": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.StringVal("new-value"), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.UnknownVal(cty.String), + "triggers_replace": cty.StringVal("new-value"), + "id": cty.UnknownVal(cty.String), + }), + }, + + "update-input-trigger": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("value"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.StringVal("input"), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new value"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.UnknownVal(cty.List(cty.String)), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new value"), + }), + "id": cty.UnknownVal(cty.String), + }), + }, + } { + t.Run("plan-"+name, func(t *testing.T) { + req := providers.PlanResourceChangeRequest{ + TypeName: "terraform_data", + PriorState: tc.prior, + ProposedNewState: tc.proposed, + } + + resp := planDataStoreResourceChange(req) + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } + + if !resp.PlannedState.RawEquals(tc.planned) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n", tc.planned, resp.PlannedState) + } + }) + } +} + +func TestManagedDataApply(t *testing.T) { + testUUIDHook = func() string { + return "not-quite-unique" + } + defer func() { + testUUIDHook = nil + }() + + schema := dataStoreResourceSchema().Block + ty := schema.ImpliedType() + + for name, tc := range map[string]struct { + prior cty.Value + planned cty.Value + state cty.Value + }{ + "create": { + prior: cty.NullVal(ty), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "create-output": { + prior: cty.NullVal(ty), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.UnknownVal(cty.String), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "update-input": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.UnknownVal(cty.List(cty.String)), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "update-trigger": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.UnknownVal(cty.String), + "triggers_replace": cty.StringVal("new-value"), + "id": cty.UnknownVal(cty.String), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.StringVal("new-value"), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "update-input-trigger": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("value"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.UnknownVal(cty.List(cty.String)), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new value"), + }), + "id": cty.UnknownVal(cty.String), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new value"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + }, + } { + t.Run("apply-"+name, func(t *testing.T) { + req := providers.ApplyResourceChangeRequest{ + TypeName: "terraform_data", + PriorState: tc.prior, + PlannedState: tc.planned, + } + + resp := applyDataStoreResourceChange(req) + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } + + if !resp.NewState.RawEquals(tc.state) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n", tc.state, resp.NewState) + } + }) + } +} diff --git a/internal/builtin/providers/terraform/testdata/basic.tfstate b/builtin/providers/terraform/testdata/basic.tfstate similarity index 100% rename from internal/builtin/providers/terraform/testdata/basic.tfstate rename to builtin/providers/terraform/testdata/basic.tfstate diff --git a/internal/builtin/providers/terraform/testdata/complex_outputs.tfstate b/builtin/providers/terraform/testdata/complex_outputs.tfstate similarity index 100% rename from internal/builtin/providers/terraform/testdata/complex_outputs.tfstate rename to builtin/providers/terraform/testdata/complex_outputs.tfstate diff --git a/internal/builtin/providers/terraform/testdata/empty.tfstate b/builtin/providers/terraform/testdata/empty.tfstate similarity index 100% rename from internal/builtin/providers/terraform/testdata/empty.tfstate rename to builtin/providers/terraform/testdata/empty.tfstate diff --git a/internal/builtin/providers/terraform/testdata/null_outputs.tfstate b/builtin/providers/terraform/testdata/null_outputs.tfstate similarity index 100% rename from internal/builtin/providers/terraform/testdata/null_outputs.tfstate rename to builtin/providers/terraform/testdata/null_outputs.tfstate diff --git a/builtin/provisioners/file/resource_provisioner.go b/builtin/provisioners/file/resource_provisioner.go new file mode 100644 index 000000000000..68d4580e0d8d --- /dev/null +++ b/builtin/provisioners/file/resource_provisioner.go @@ -0,0 +1,207 @@ +package file + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" +) + +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, + } +} + +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} + +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "source": { + Type: cty.String, + Optional: true, + }, + + "content": { + Type: cty.String, + Optional: true, + }, + + "destination": { + Type: cty.String, + Required: true, + }, + }, + } + resp.Provisioner = schema + return resp +} + +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + cfg, err := p.GetSchema().Provisioner.CoerceValue(req.Config) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + + source := cfg.GetAttr("source") + content := cfg.GetAttr("content") + + switch { + case !source.IsNull() && !content.IsNull(): + resp.Diagnostics = resp.Diagnostics.Append(errors.New("Cannot set both 'source' and 'content'")) + return resp + case source.IsNull() && content.IsNull(): + resp.Diagnostics = resp.Diagnostics.Append(errors.New("Must provide one of 'source' or 'content'")) + return resp + } + + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + if req.Connection.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "file provisioner error", + "Missing connection configuration for provisioner.", + )) + return resp + } + + comm, err := communicator.New(req.Connection) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "file provisioner error", + err.Error(), + )) + return resp + } + + // Get the source + src, deleteSource, err := getSrc(req.Config) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "file provisioner error", + err.Error(), + )) + return resp + } + if deleteSource { + defer os.Remove(src) + } + + // Begin the file copy + dst := req.Config.GetAttr("destination").AsString() + if err := copyFiles(p.ctx, comm, src, dst); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "file provisioner error", + err.Error(), + )) + return resp + } + + return resp +} + +// getSrc returns the file to use as source +func getSrc(v cty.Value) (string, bool, error) { + content := v.GetAttr("content") + src := v.GetAttr("source") + + switch { + case !content.IsNull(): + file, err := ioutil.TempFile("", "tf-file-content") + if err != nil { + return "", true, err + } + + if _, err = file.WriteString(content.AsString()); err != nil { + return "", true, err + } + + return file.Name(), true, nil + + case !src.IsNull(): + expansion, err := homedir.Expand(src.AsString()) + return expansion, false, err + + default: + panic("source and content cannot both be null") + } +} + +// copyFiles is used to copy the files from a source to a destination +func copyFiles(ctx context.Context, comm communicator.Communicator, src, dst string) error { + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + // Wait and retry until we establish the connection + err := communicator.Retry(retryCtx, func() error { + return comm.Connect(nil) + }) + if err != nil { + return err + } + + // disconnect when the context is canceled, which will close this after + // Apply as well. + go func() { + <-ctx.Done() + comm.Disconnect() + }() + + info, err := os.Stat(src) + if err != nil { + return err + } + + // If we're uploading a directory, short circuit and do that + if info.IsDir() { + if err := comm.UploadDir(dst, src); err != nil { + return fmt.Errorf("Upload failed: %v", err) + } + return nil + } + + // We're uploading a file... + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + err = comm.Upload(dst, f) + if err != nil { + return fmt.Errorf("Upload failed: %v", err) + } + + return err +} + +func (p *provisioner) Stop() error { + p.cancel() + return nil +} + +func (p *provisioner) Close() error { + return nil +} diff --git a/builtin/provisioners/file/resource_provisioner_test.go b/builtin/provisioners/file/resource_provisioner_test.go new file mode 100644 index 000000000000..2e70814af745 --- /dev/null +++ b/builtin/provisioners/file/resource_provisioner_test.go @@ -0,0 +1,118 @@ +package file + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceProvider_Validate_good_source(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("/tmp/foo"), + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } +} + +func TestResourceProvider_Validate_good_content(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "content": cty.StringVal("value to copy"), + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } +} + +func TestResourceProvider_Validate_good_unknown_variable_value(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "content": cty.UnknownVal(cty.String), + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } +} + +func TestResourceProvider_Validate_bad_not_destination(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("nope"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") + } +} + +func TestResourceProvider_Validate_bad_no_source(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") + } +} + +func TestResourceProvider_Validate_bad_to_many_src(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("nope"), + "content": cty.StringVal("vlue to copy"), + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") + } +} + +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() +} + +func TestResourceProvisioner_connectionRequired(t *testing.T) { + p := New() + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{}) + if !resp.Diagnostics.HasErrors() { + t.Fatal("expected error") + } + + got := resp.Diagnostics.Err().Error() + if !strings.Contains(got, "Missing connection") { + t.Fatalf("expected 'Missing connection' error: got %q", got) + } +} diff --git a/builtin/provisioners/local-exec/resource_provisioner.go b/builtin/provisioners/local-exec/resource_provisioner.go new file mode 100644 index 000000000000..387e9e9d9af6 --- /dev/null +++ b/builtin/provisioners/local-exec/resource_provisioner.go @@ -0,0 +1,221 @@ +package localexec + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "runtime" + + "github.com/armon/circbuf" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/go-linereader" + "github.com/zclconf/go-cty/cty" +) + +const ( + // maxBufSize limits how much output we collect from a local + // invocation. This is to prevent TF memory usage from growing + // to an enormous amount due to a faulty process. + maxBufSize = 8 * 1024 +) + +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, + } +} + +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} + +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "command": { + Type: cty.String, + Required: true, + }, + "interpreter": { + Type: cty.List(cty.String), + Optional: true, + }, + "working_dir": { + Type: cty.String, + Optional: true, + }, + "environment": { + Type: cty.Map(cty.String), + Optional: true, + }, + "quiet": { + Type: cty.Bool, + Optional: true, + }, + }, + } + + resp.Provisioner = schema + return resp +} + +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + if _, err := p.GetSchema().Provisioner.CoerceValue(req.Config); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Invalid local-exec provisioner configuration", + err.Error(), + )) + } + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + command := req.Config.GetAttr("command").AsString() + if command == "" { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Invalid local-exec provisioner command", + "The command must be a non-empty string.", + )) + return resp + } + + envVal := req.Config.GetAttr("environment") + var env []string + + if !envVal.IsNull() { + for k, v := range envVal.AsValueMap() { + if !v.IsNull() { + entry := fmt.Sprintf("%s=%s", k, v.AsString()) + env = append(env, entry) + } + } + } + + // Execute the command using a shell + intrVal := req.Config.GetAttr("interpreter") + + var cmdargs []string + if !intrVal.IsNull() && intrVal.LengthInt() > 0 { + for _, v := range intrVal.AsValueSlice() { + if !v.IsNull() { + cmdargs = append(cmdargs, v.AsString()) + } + } + } else { + if runtime.GOOS == "windows" { + cmdargs = []string{"cmd", "/C"} + } else { + cmdargs = []string{"/bin/sh", "-c"} + } + } + + cmdargs = append(cmdargs, command) + + workingdir := "" + if wdVal := req.Config.GetAttr("working_dir"); !wdVal.IsNull() { + workingdir = wdVal.AsString() + } + + // Set up the reader that will read the output from the command. + // We use an os.Pipe so that the *os.File can be passed directly to the + // process, and not rely on goroutines copying the data which may block. + // See golang.org/issue/18874 + pr, pw, err := os.Pipe() + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "local-exec provisioner error", + fmt.Sprintf("Failed to initialize pipe for output: %s", err), + )) + return resp + } + + var cmdEnv []string + cmdEnv = os.Environ() + cmdEnv = append(cmdEnv, env...) + + // Set up the command + cmd := exec.CommandContext(p.ctx, cmdargs[0], cmdargs[1:]...) + cmd.Stderr = pw + cmd.Stdout = pw + // Dir specifies the working directory of the command. + // If Dir is the empty string (this is default), runs the command + // in the calling process's current directory. + cmd.Dir = workingdir + // Env specifies the environment of the command. + // By default will use the calling process's environment + cmd.Env = cmdEnv + + output, _ := circbuf.NewBuffer(maxBufSize) + + // Write everything we read from the pipe to the output buffer too + tee := io.TeeReader(pr, output) + + // copy the teed output to the UI output + copyDoneCh := make(chan struct{}) + go copyUIOutput(req.UIOutput, tee, copyDoneCh) + + // Output what we're about to run + if quietVal := req.Config.GetAttr("quiet"); !quietVal.IsNull() && quietVal.True() { + req.UIOutput.Output("local-exec: Executing: Suppressed by quiet=true") + } else { + req.UIOutput.Output(fmt.Sprintf("Executing: %q", cmdargs)) + } + + // Start the command + err = cmd.Start() + if err == nil { + err = cmd.Wait() + } + + // Close the write-end of the pipe so that the goroutine mirroring output + // ends properly. + pw.Close() + + // Cancelling the command may block the pipe reader if the file descriptor + // was passed to a child process which hasn't closed it. In this case the + // copyOutput goroutine will just hang out until exit. + select { + case <-copyDoneCh: + case <-p.ctx.Done(): + } + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "local-exec provisioner error", + fmt.Sprintf("Error running command '%s': %v. Output: %s", command, err, output.Bytes()), + )) + return resp + } + + return resp +} + +func (p *provisioner) Stop() error { + p.cancel() + return nil +} + +func (p *provisioner) Close() error { + return nil +} + +func copyUIOutput(o provisioners.UIOutput, r io.Reader, doneCh chan<- struct{}) { + defer close(doneCh) + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} diff --git a/builtin/provisioners/local-exec/resource_provisioner_test.go b/builtin/provisioners/local-exec/resource_provisioner_test.go new file mode 100644 index 000000000000..f10a3ac1e7b6 --- /dev/null +++ b/builtin/provisioners/local-exec/resource_provisioner_test.go @@ -0,0 +1,252 @@ +package localexec + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform/provisioners" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceProvider_Apply(t *testing.T) { + defer os.Remove("test_out") + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo foo > test_out"), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + + if resp.Diagnostics.HasErrors() { + t.Fatalf("err: %v", resp.Diagnostics.Err()) + } + + // Check the file + raw, err := ioutil.ReadFile("test_out") + if err != nil { + t.Fatalf("err: %v", err) + } + + actual := strings.TrimSpace(string(raw)) + expected := "foo" + if actual != expected { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceProvider_stop(t *testing.T) { + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + // bash/zsh/ksh will exec a single command in the same process. This + // makes certain there's a subprocess in the shell. + "command": cty.StringVal("sleep 30; sleep 30"), + })) + if err != nil { + t.Fatal(err) + } + + doneCh := make(chan struct{}) + startTime := time.Now() + go func() { + defer close(doneCh) + // The functionality of p.Apply is tested in TestResourceProvider_Apply. + // Because p.Apply is called in a goroutine, trying to t.Fatal() on its + // result would be ignored or would cause a panic if the parent goroutine + // has already completed. + _ = p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + }() + + mustExceed := (50 * time.Millisecond) + select { + case <-doneCh: + t.Fatalf("expected to finish sometime after %s finished in %s", mustExceed, time.Since(startTime)) + case <-time.After(mustExceed): + t.Logf("correctly took longer than %s", mustExceed) + } + + // Stop it + stopTime := time.Now() + p.Stop() + + maxTempl := "expected to finish under %s, finished in %s" + finishWithin := (2 * time.Second) + select { + case <-doneCh: + t.Logf(maxTempl, finishWithin, time.Since(stopTime)) + case <-time.After(finishWithin): + t.Fatalf(maxTempl, finishWithin, time.Since(stopTime)) + } +} + +func TestResourceProvider_ApplyCustomInterpreter(t *testing.T) { + output := cli.NewMockUi() + p := New() + + schema := p.GetSchema().Provisioner + + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "interpreter": cty.ListVal([]cty.Value{cty.StringVal("echo"), cty.StringVal("is")}), + "command": cty.StringVal("not really an interpreter"), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + got := strings.TrimSpace(output.OutputWriter.String()) + want := `Executing: ["echo" "is" "not really an interpreter"] +is not really an interpreter` + if got != want { + t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) + } +} + +func TestResourceProvider_ApplyCustomWorkingDirectory(t *testing.T) { + testdir := "working_dir_test" + os.Mkdir(testdir, 0755) + defer os.Remove(testdir) + + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "working_dir": cty.StringVal(testdir), + "command": cty.StringVal("echo `pwd`"), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + dir, err := os.Getwd() + if err != nil { + t.Fatalf("err: %v", err) + } + + got := strings.TrimSpace(output.OutputWriter.String()) + want := "Executing: [\"/bin/sh\" \"-c\" \"echo `pwd`\"]\n" + dir + "/" + testdir + if got != want { + t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) + } +} + +func TestResourceProvider_ApplyCustomEnv(t *testing.T) { + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo $FOO $BAR $BAZ"), + "environment": cty.MapVal(map[string]cty.Value{ + "FOO": cty.StringVal("BAR"), + "BAR": cty.StringVal("1"), + "BAZ": cty.StringVal("true"), + }), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + got := strings.TrimSpace(output.OutputWriter.String()) + want := `Executing: ["/bin/sh" "-c" "echo $FOO $BAR $BAZ"] +BAR 1 true` + if got != want { + t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) + } +} + +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() +} + +func TestResourceProvisioner_nullsInOptionals(t *testing.T) { + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + for i, cfg := range []cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "environment": cty.MapVal(map[string]cty.Value{ + "FOO": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "environment": cty.NullVal(cty.Map(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "interpreter": cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), + }), + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "interpreter": cty.NullVal(cty.List(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "working_dir": cty.NullVal(cty.String), + }), + } { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + + cfg, err := schema.CoerceValue(cfg) + if err != nil { + t.Fatal(err) + } + + // verifying there are no panics + p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: cfg, + UIOutput: output, + }) + }) + } +} diff --git a/builtin/provisioners/remote-exec/resource_provisioner.go b/builtin/provisioners/remote-exec/resource_provisioner.go new file mode 100644 index 000000000000..abb729cd0c37 --- /dev/null +++ b/builtin/provisioners/remote-exec/resource_provisioner.go @@ -0,0 +1,294 @@ +package remoteexec + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/go-linereader" + "github.com/zclconf/go-cty/cty" +) + +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, + } +} + +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} + +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "inline": { + Type: cty.List(cty.String), + Optional: true, + }, + "script": { + Type: cty.String, + Optional: true, + }, + "scripts": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + } + + resp.Provisioner = schema + return resp +} + +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + cfg, err := p.GetSchema().Provisioner.CoerceValue(req.Config) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Invalid remote-exec provisioner configuration", + err.Error(), + )) + return resp + } + + inline := cfg.GetAttr("inline") + script := cfg.GetAttr("script") + scripts := cfg.GetAttr("scripts") + + set := 0 + if !inline.IsNull() { + set++ + } + if !script.IsNull() { + set++ + } + if !scripts.IsNull() { + set++ + } + if set != 1 { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Invalid remote-exec provisioner configuration", + `Only one of "inline", "script", or "scripts" must be set`, + )) + } + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + if req.Connection.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "remote-exec provisioner error", + "Missing connection configuration for provisioner.", + )) + return resp + } + + comm, err := communicator.New(req.Connection) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "remote-exec provisioner error", + err.Error(), + )) + return resp + } + + // Collect the scripts + scripts, err := collectScripts(req.Config) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "remote-exec provisioner error", + err.Error(), + )) + return resp + } + for _, s := range scripts { + defer s.Close() + } + + // Copy and execute each script + if err := runScripts(p.ctx, req.UIOutput, comm, scripts); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "remote-exec provisioner error", + err.Error(), + )) + return resp + } + + return resp +} + +func (p *provisioner) Stop() error { + p.cancel() + return nil +} + +func (p *provisioner) Close() error { + return nil +} + +// generateScripts takes the configuration and creates a script from each inline config +func generateScripts(inline cty.Value) ([]string, error) { + var lines []string + for _, l := range inline.AsValueSlice() { + if l.IsNull() { + return nil, errors.New("invalid null string in 'scripts'") + } + + s := l.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'scripts'") + } + lines = append(lines, s) + } + lines = append(lines, "") + + return []string{strings.Join(lines, "\n")}, nil +} + +// collectScripts is used to collect all the scripts we need +// to execute in preparation for copying them. +func collectScripts(v cty.Value) ([]io.ReadCloser, error) { + // Check if inline + if inline := v.GetAttr("inline"); !inline.IsNull() { + scripts, err := generateScripts(inline) + if err != nil { + return nil, err + } + + var r []io.ReadCloser + for _, script := range scripts { + r = append(r, ioutil.NopCloser(bytes.NewReader([]byte(script)))) + } + + return r, nil + } + + // Collect scripts + var scripts []string + if script := v.GetAttr("script"); !script.IsNull() { + s := script.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'script'") + } + scripts = append(scripts, s) + } + + if scriptList := v.GetAttr("scripts"); !scriptList.IsNull() { + for _, script := range scriptList.AsValueSlice() { + if script.IsNull() { + return nil, errors.New("invalid null string in 'script'") + } + s := script.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'script'") + } + scripts = append(scripts, s) + } + } + + // Open all the scripts + var fhs []io.ReadCloser + for _, s := range scripts { + fh, err := os.Open(s) + if err != nil { + for _, fh := range fhs { + fh.Close() + } + return nil, fmt.Errorf("Failed to open script '%s': %v", s, err) + } + fhs = append(fhs, fh) + } + + // Done, return the file handles + return fhs, nil +} + +// runScripts is used to copy and execute a set of scripts +func runScripts(ctx context.Context, o provisioners.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + // Wait and retry until we establish the connection + err := communicator.Retry(retryCtx, func() error { + return comm.Connect(o) + }) + if err != nil { + return err + } + + // Wait for the context to end and then disconnect + go func() { + <-ctx.Done() + comm.Disconnect() + }() + + for _, script := range scripts { + var cmd *remote.Cmd + + outR, outW := io.Pipe() + errR, errW := io.Pipe() + defer outW.Close() + defer errW.Close() + + go copyUIOutput(o, outR) + go copyUIOutput(o, errR) + + remotePath := comm.ScriptPath() + + if err := comm.UploadScript(remotePath, script); err != nil { + return fmt.Errorf("Failed to upload script: %v", err) + } + + cmd = &remote.Cmd{ + Command: remotePath, + Stdout: outW, + Stderr: errW, + } + if err := comm.Start(cmd); err != nil { + return fmt.Errorf("Error starting script: %v", err) + } + + if err := cmd.Wait(); err != nil { + return err + } + + // Upload a blank follow up file in the same path to prevent residual + // script contents from remaining on remote machine + empty := bytes.NewReader([]byte("")) + if err := comm.Upload(remotePath, empty); err != nil { + // This feature is best-effort. + log.Printf("[WARN] Failed to upload empty follow up script: %v", err) + } + } + + return nil +} + +func copyUIOutput(o provisioners.UIOutput, r io.Reader) { + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} diff --git a/builtin/provisioners/remote-exec/resource_provisioner_test.go b/builtin/provisioners/remote-exec/resource_provisioner_test.go new file mode 100644 index 000000000000..328ffd787158 --- /dev/null +++ b/builtin/provisioners/remote-exec/resource_provisioner_test.go @@ -0,0 +1,320 @@ +package remoteexec + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "testing" + "time" + + "strings" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/provisioners" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceProvider_Validate_good(t *testing.T) { + c := cty.ObjectVal(map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{cty.StringVal("echo foo")}), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: c, + }) + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } +} + +func TestResourceProvider_Validate_bad(t *testing.T) { + c := cty.ObjectVal(map[string]cty.Value{ + "invalid": cty.StringVal("nope"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: c, + }) + if !resp.Diagnostics.HasErrors() { + t.Fatalf("Should have errors") + } +} + +var expectedScriptOut = `cd /tmp +wget http://foobar +exit 0 +` + +func TestResourceProvider_generateScript(t *testing.T) { + inline := cty.ListVal([]cty.Value{ + cty.StringVal("cd /tmp"), + cty.StringVal("wget http://foobar"), + cty.StringVal("exit 0"), + }) + + out, err := generateScripts(inline) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(out) != 1 { + t.Fatal("expected 1 out") + } + + if out[0] != expectedScriptOut { + t.Fatalf("bad: %v", out) + } +} + +func TestResourceProvider_generateScriptEmptyInline(t *testing.T) { + inline := cty.ListVal([]cty.Value{cty.StringVal("")}) + + _, err := generateScripts(inline) + if err == nil { + t.Fatal("expected error, got none") + } + + if !strings.Contains(err.Error(), "empty string") { + t.Fatalf("expected empty string error, got: %s", err) + } +} + +func TestResourceProvider_CollectScripts_inline(t *testing.T) { + conf := map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{ + cty.StringVal("cd /tmp"), + cty.StringVal("wget http://foobar"), + cty.StringVal("exit 0"), + }), + } + + scripts, err := collectScripts(cty.ObjectVal(conf)) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(scripts) != 1 { + t.Fatalf("bad: %v", scripts) + } + + var out bytes.Buffer + _, err = io.Copy(&out, scripts[0]) + if err != nil { + t.Fatalf("err: %v", err) + } + + if out.String() != expectedScriptOut { + t.Fatalf("bad: %v", out.String()) + } +} + +func TestResourceProvider_CollectScripts_script(t *testing.T) { + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{ + cty.StringVal("testdata/script1.sh"), + }), + })) + if err != nil { + t.Fatal(err) + } + + scripts, err := collectScripts(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(scripts) != 1 { + t.Fatalf("bad: %v", scripts) + } + + var out bytes.Buffer + _, err = io.Copy(&out, scripts[0]) + if err != nil { + t.Fatalf("err: %v", err) + } + + if out.String() != expectedScriptOut { + t.Fatalf("bad: %v", out.String()) + } +} + +func TestResourceProvider_CollectScripts_scripts(t *testing.T) { + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{ + cty.StringVal("testdata/script1.sh"), + cty.StringVal("testdata/script1.sh"), + cty.StringVal("testdata/script1.sh"), + }), + })) + if err != nil { + log.Fatal(err) + } + + scripts, err := collectScripts(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(scripts) != 3 { + t.Fatalf("bad: %v", scripts) + } + + for idx := range scripts { + var out bytes.Buffer + _, err = io.Copy(&out, scripts[idx]) + if err != nil { + t.Fatalf("err: %v", err) + } + + if out.String() != expectedScriptOut { + t.Fatalf("bad: %v", out.String()) + } + } +} + +func TestResourceProvider_CollectScripts_scriptsEmpty(t *testing.T) { + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{cty.StringVal("")}), + })) + if err != nil { + t.Fatal(err) + } + + _, err = collectScripts(conf) + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), "empty string") { + t.Fatalf("Expected empty string error, got: %s", err) + } +} + +func TestProvisionerTimeout(t *testing.T) { + o := cli.NewMockUi() + c := new(communicator.MockCommunicator) + + disconnected := make(chan struct{}) + c.DisconnectFunc = func() error { + close(disconnected) + return nil + } + + completed := make(chan struct{}) + c.CommandFunc = func(cmd *remote.Cmd) error { + defer close(completed) + cmd.Init() + time.Sleep(2 * time.Second) + cmd.SetExitStatus(0, nil) + return nil + } + c.ConnTimeout = time.Second + c.UploadScripts = map[string]string{"hello": "echo hello"} + c.RemoteScriptPath = "hello" + + conf := map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{cty.StringVal("echo hello")}), + } + + scripts, err := collectScripts(cty.ObjectVal(conf)) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + done := make(chan struct{}) + + var runErr error + go func() { + defer close(done) + runErr = runScripts(ctx, o, c, scripts) + }() + + select { + case <-disconnected: + t.Fatal("communicator disconnected before command completed") + case <-completed: + } + + <-done + if runErr != nil { + t.Fatal(err) + } +} + +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() +} + +func TestResourceProvisioner_connectionRequired(t *testing.T) { + p := New() + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{}) + if !resp.Diagnostics.HasErrors() { + t.Fatal("expected error") + } + + got := resp.Diagnostics.Err().Error() + if !strings.Contains(got, "Missing connection") { + t.Fatalf("expected 'Missing connection' error: got %q", got) + } +} + +func TestResourceProvisioner_nullsInOptionals(t *testing.T) { + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + for i, cfg := range []cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "script": cty.StringVal("echo"), + "inline": cty.NullVal(cty.List(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "script": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.NullVal(cty.List(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + }), + }), + } { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + + cfg, err := schema.CoerceValue(cfg) + if err != nil { + t.Fatal(err) + } + + // verifying there are no panics + p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: cfg, + UIOutput: output, + }) + }) + } +} diff --git a/internal/builtin/provisioners/remote-exec/testdata/script1.sh b/builtin/provisioners/remote-exec/testdata/script1.sh similarity index 100% rename from internal/builtin/provisioners/remote-exec/testdata/script1.sh rename to builtin/provisioners/remote-exec/testdata/script1.sh diff --git a/checkpoint.go b/checkpoint.go index 31cc29bf7527..5885bb345e7e 100644 --- a/checkpoint.go +++ b/checkpoint.go @@ -6,8 +6,8 @@ import ( "path/filepath" "github.com/hashicorp/go-checkpoint" - "github.com/hashicorp/terraform/internal/command" - "github.com/hashicorp/terraform/internal/command/cliconfig" + "github.com/hashicorp/terraform/command" + "github.com/hashicorp/terraform/command/cliconfig" ) func init() { diff --git a/internal/checks/doc.go b/checks/doc.go similarity index 100% rename from internal/checks/doc.go rename to checks/doc.go diff --git a/checks/state.go b/checks/state.go new file mode 100644 index 000000000000..4d4cb3dca4b6 --- /dev/null +++ b/checks/state.go @@ -0,0 +1,290 @@ +package checks + +import ( + "fmt" + "sort" + "sync" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" +) + +// State is a container for state tracking of all of the the checks declared in +// a particular Terraform configuration and their current statuses. +// +// A State object is mutable during plan and apply operations but should +// otherwise be treated as a read-only snapshot of the status of checks +// at a particular moment. +// +// The checks State tracks a few different concepts: +// - configuration objects: items in the configuration which statically +// declare some checks associated with zero or more checkable objects. +// - checkable objects: dynamically-determined objects that are each +// associated with one configuration object. +// - checks: a single check that is declared as part of a configuration +// object and then resolved once for each of its associated checkable +// objects. +// - check statuses: the current state of a particular check associated +// with a particular checkable object. +// +// This container type is concurrency-safe for both reads and writes through +// its various methods. +type State struct { + mu sync.Mutex + + statuses addrs.Map[addrs.ConfigCheckable, *configCheckableState] + failureMsgs addrs.Map[addrs.Check, string] +} + +// configCheckableState is an internal part of type State that represents +// the evaluation status for a particular addrs.ConfigCheckable address. +// +// Its initial state, at the beginning of a run, is that it doesn't even know +// how many checkable objects will be dynamically-declared yet. Terraform Core +// will notify the State object of the associated Checkables once +// it has decided the appropriate expansion of that configuration object, +// and then will gradually report the results of each check once the graph +// walk reaches it. +// +// This must be accessed only while holding the mutex inside the associated +// State object. +type configCheckableState struct { + // checkTypes captures the expected number of checks of each type + // associated with object declared by this configuration construct. Since + // checks are statically declared (even though the checkable objects + // aren't) we can compute this only from the configuration. + checkTypes map[addrs.CheckType]int + + // objects represents the set of dynamic checkable objects associated + // with this configuration construct. This is initially nil to represent + // that we don't know the objects yet, and is replaced by a non-nil map + // once Terraform Core reports the expansion of this configuration + // construct. + // + // The leaf Status values will initially be StatusUnknown + // and then gradually updated by Terraform Core as it visits the + // individual checkable objects and reports their status. + objects addrs.Map[addrs.Checkable, map[addrs.CheckType][]Status] +} + +// NOTE: For the "Report"-prefixed methods that we use to gradually update +// the structure with results during a plan or apply operation, see the +// state_report.go file also in this package. + +// NewState returns a new State object representing the check statuses of +// objects declared in the given configuration. +// +// The configuration determines which configuration objects and associated +// checks we'll be expecting to see, so that we can seed their statuses as +// all unknown until we see affirmative reports sent by the Report-prefixed +// methods on Checks. +func NewState(config *configs.Config) *State { + return &State{ + statuses: initialStatuses(config), + } +} + +// ConfigHasChecks returns true if and only if the given address refers to +// a configuration object that this State object is expecting to recieve +// statuses for. +// +// Other methods of Checks will typically panic if given a config address +// that would not have returned true from ConfigHasChecked. +func (c *State) ConfigHasChecks(addr addrs.ConfigCheckable) bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.statuses.Has(addr) +} + +// AllConfigAddrs returns all of the addresses of all configuration objects +// that could potentially produce checkable objects at runtime. +// +// This is a good starting point for reporting on the outcome of all of the +// configured checks at the configuration level of granularity, e.g. for +// automated testing reports where we want to report the status of all +// configured checks even if the graph walk aborted before we reached any +// of their objects. +func (c *State) AllConfigAddrs() addrs.Set[addrs.ConfigCheckable] { + c.mu.Lock() + defer c.mu.Unlock() + return c.statuses.Keys() +} + +// ObjectAddrs returns the addresses of individual checkable objects belonging +// to the configuration object with the given address. +// +// This will panic if the given address isn't a known configuration object +// that has checks. +func (c *State) ObjectAddrs(configAddr addrs.ConfigCheckable) addrs.Set[addrs.Checkable] { + c.mu.Lock() + defer c.mu.Unlock() + + st, ok := c.statuses.GetOk(configAddr) + if !ok { + panic(fmt.Sprintf("unknown configuration object %s", configAddr)) + } + + ret := addrs.MakeSet[addrs.Checkable]() + for _, elem := range st.objects.Elems { + ret.Add(elem.Key) + } + return ret + +} + +// AggregateCheckStatus returns a summarization of all of the check results +// for a particular configuration object into a single status. +// +// The given address must refer to an object within the configuration that +// this Checks was instantiated from, or this method will panic. +func (c *State) AggregateCheckStatus(addr addrs.ConfigCheckable) Status { + c.mu.Lock() + defer c.mu.Unlock() + + st, ok := c.statuses.GetOk(addr) + if !ok { + panic(fmt.Sprintf("request for status of unknown configuration object %s", addr)) + } + + if st.objects.Elems == nil { + // If we don't even know how many objects we have for this + // configuration construct then that summarizes as unknown. + // (Note: this is different than Elems being a non-nil empty map, + // which means that we know there are zero objects and therefore + // the aggregate result will be pass to pass below.) + return StatusUnknown + } + + // Otherwise, our result depends on how many of our known objects are + // in each status. + errorCount := 0 + failCount := 0 + unknownCount := 0 + + for _, objects := range st.objects.Elems { + for _, checks := range objects.Value { + for _, status := range checks { + switch status { + case StatusPass: + // ok + case StatusFail: + failCount++ + case StatusError: + errorCount++ + default: + unknownCount++ + } + } + } + } + + return summarizeCheckStatuses(errorCount, failCount, unknownCount) +} + +// ObjectCheckStatus returns a summarization of all of the check results +// for a particular checkable object into a single status. +// +// The given address must refer to a checkable object that Terraform Core +// previously reported while doing a graph walk, or this method will panic. +func (c *State) ObjectCheckStatus(addr addrs.Checkable) Status { + c.mu.Lock() + defer c.mu.Unlock() + + configAddr := addr.ConfigCheckable() + + st, ok := c.statuses.GetOk(configAddr) + if !ok { + panic(fmt.Sprintf("request for status of unknown object %s", addr)) + } + if st.objects.Elems == nil { + panic(fmt.Sprintf("request for status of %s before establishing the checkable objects for %s", addr, configAddr)) + } + checks, ok := st.objects.GetOk(addr) + if !ok { + panic(fmt.Sprintf("request for status of unknown object %s", addr)) + } + + errorCount := 0 + failCount := 0 + unknownCount := 0 + for _, statuses := range checks { + for _, status := range statuses { + switch status { + case StatusPass: + // ok + case StatusFail: + failCount++ + case StatusError: + errorCount++ + default: + unknownCount++ + } + } + } + return summarizeCheckStatuses(errorCount, failCount, unknownCount) +} + +// ObjectFailureMessages returns the zero or more failure messages reported +// for the object with the given address. +// +// Failure messages are recorded only for checks whose status is StatusFail, +// but since this aggregates together the results of all of the checks +// on the given object it's possible for there to be a mixture of failures +// and errors at the same time, which would aggregate as StatusError in +// ObjectCheckStatus's result because errors are defined as "stronger" +// than failures. +func (c *State) ObjectFailureMessages(addr addrs.Checkable) []string { + var ret []string + + configAddr := addr.ConfigCheckable() + + st, ok := c.statuses.GetOk(configAddr) + if !ok { + panic(fmt.Sprintf("request for status of unknown object %s", addr)) + } + if st.objects.Elems == nil { + panic(fmt.Sprintf("request for status of %s before establishing the checkable objects for %s", addr, configAddr)) + } + checksByType, ok := st.objects.GetOk(addr) + if !ok { + panic(fmt.Sprintf("request for status of unknown object %s", addr)) + } + + for checkType, checks := range checksByType { + for i, status := range checks { + if status == StatusFail { + checkAddr := addrs.NewCheck(addr, checkType, i) + msg := c.failureMsgs.Get(checkAddr) + if msg != "" { + ret = append(ret, msg) + } + } + } + } + + // We always return the messages in a lexical sort order just so that + // it'll be consistent between runs if we still have the same problems. + sort.Strings(ret) + + return ret +} + +func summarizeCheckStatuses(errorCount, failCount, unknownCount int) Status { + switch { + case errorCount > 0: + // If we saw any errors then we'll treat the whole thing as errored. + return StatusError + case failCount > 0: + // If anything failed then this whole configuration construct failed. + return StatusFail + case unknownCount > 0: + // If nothing failed but we still have unknowns then our outcome isn't + // known yet. + return StatusUnknown + default: + // If we have no failures and no unknowns then either we have all + // passes or no checkable objects at all, both of which summarize as + // a pass. + return StatusPass + } +} diff --git a/internal/checks/state_init.go b/checks/state_init.go similarity index 95% rename from internal/checks/state_init.go rename to checks/state_init.go index 6714243b5c3b..5b2cadc4faad 100644 --- a/internal/checks/state_init.go +++ b/checks/state_init.go @@ -1,8 +1,8 @@ package checks import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) func initialStatuses(cfg *configs.Config) addrs.Map[addrs.ConfigCheckable, *configCheckableState] { diff --git a/internal/checks/state_report.go b/checks/state_report.go similarity index 98% rename from internal/checks/state_report.go rename to checks/state_report.go index ccf35ac13826..01b9c13a4398 100644 --- a/internal/checks/state_report.go +++ b/checks/state_report.go @@ -3,7 +3,7 @@ package checks import ( "fmt" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // These are the "Report"-prefixed methods of Checks used by Terraform Core diff --git a/checks/state_test.go b/checks/state_test.go new file mode 100644 index 000000000000..f528f94e2eda --- /dev/null +++ b/checks/state_test.go @@ -0,0 +1,208 @@ +package checks + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/initwd" +) + +func TestChecksHappyPath(t *testing.T) { + const fixtureDir = "testdata/happypath" + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := initwd.NewModuleInstaller(loader.ModulesDir(), nil) + _, instDiags := inst.InstallModules(context.Background(), fixtureDir, true, initwd.ModuleInstallHooksImpl{}) + if instDiags.HasErrors() { + t.Fatal(instDiags.Err()) + } + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + ///////////////////////////////////////////////////////////////////////// + + cfg, hclDiags := loader.LoadConfig(fixtureDir) + if hclDiags.HasErrors() { + t.Fatalf("invalid configuration: %s", hclDiags.Error()) + } + + resourceA := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "a", + }.InModule(addrs.RootModule) + resourceNoChecks := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "no_checks", + }.InModule(addrs.RootModule) + resourceNonExist := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "nonexist", + }.InModule(addrs.RootModule) + rootOutput := addrs.OutputValue{ + Name: "a", + }.InModule(addrs.RootModule) + moduleChild := addrs.RootModule.Child("child") + resourceB := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "b", + }.InModule(moduleChild) + resourceC := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "c", + }.InModule(moduleChild) + childOutput := addrs.OutputValue{ + Name: "b", + }.InModule(moduleChild) + + // First some consistency checks to make sure our configuration is the + // shape we are relying on it to be. + if addr := resourceA; cfg.Module.ResourceByAddr(addr.Resource) == nil { + t.Fatalf("configuration does not include %s", addr) + } + if addr := resourceB; cfg.Children["child"].Module.ResourceByAddr(addr.Resource) == nil { + t.Fatalf("configuration does not include %s", addr) + } + if addr := resourceNoChecks; cfg.Module.ResourceByAddr(addr.Resource) == nil { + t.Fatalf("configuration does not include %s", addr) + } + if addr := resourceNonExist; cfg.Module.ResourceByAddr(addr.Resource) != nil { + t.Fatalf("configuration includes %s, which is not supposed to exist", addr) + } + + ///////////////////////////////////////////////////////////////////////// + + checks := NewState(cfg) + + missing := 0 + if addr := resourceA; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := resourceB; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := resourceC; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := rootOutput; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := childOutput; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := resourceNoChecks; checks.ConfigHasChecks(addr) { + t.Errorf("checks detected for %s, even though it has none", addr) + } + if addr := resourceNonExist; checks.ConfigHasChecks(addr) { + t.Errorf("checks detected for %s, even though it doesn't exist", addr) + } + if missing > 0 { + t.Fatalf("missing some configuration objects we'd need for subsequent testing") + } + + ///////////////////////////////////////////////////////////////////////// + + // Everything should start with status unknown. + + { + wantConfigAddrs := addrs.MakeSet[addrs.ConfigCheckable]( + resourceA, + resourceB, + resourceC, + rootOutput, + childOutput, + ) + gotConfigAddrs := checks.AllConfigAddrs() + if diff := cmp.Diff(wantConfigAddrs, gotConfigAddrs); diff != "" { + t.Errorf("wrong detected config addresses\n%s", diff) + } + + for _, configAddr := range gotConfigAddrs { + if got, want := checks.AggregateCheckStatus(configAddr), StatusUnknown; got != want { + t.Errorf("incorrect initial aggregate check status for %s: %s, but want %s", configAddr, got, want) + } + } + } + + ///////////////////////////////////////////////////////////////////////// + + // The following are steps that would normally be done by Terraform Core + // as part of visiting checkable objects during the graph walk. We're + // simulating a likely sequence of calls here for testing purposes, but + // Terraform Core won't necessarily visit all of these in exactly the + // same order every time and so this is just one possible valid ordering + // of calls. + + resourceInstA := resourceA.Resource.Absolute(addrs.RootModuleInstance).Instance(addrs.NoKey) + rootOutputInst := rootOutput.OutputValue.Absolute(addrs.RootModuleInstance) + moduleChildInst := addrs.RootModuleInstance.Child("child", addrs.NoKey) + resourceInstB := resourceB.Resource.Absolute(moduleChildInst).Instance(addrs.NoKey) + resourceInstC0 := resourceC.Resource.Absolute(moduleChildInst).Instance(addrs.IntKey(0)) + resourceInstC1 := resourceC.Resource.Absolute(moduleChildInst).Instance(addrs.IntKey(1)) + childOutputInst := childOutput.OutputValue.Absolute(moduleChildInst) + + checks.ReportCheckableObjects(resourceA, addrs.MakeSet[addrs.Checkable](resourceInstA)) + checks.ReportCheckResult(resourceInstA, addrs.ResourcePrecondition, 0, StatusPass) + checks.ReportCheckResult(resourceInstA, addrs.ResourcePrecondition, 1, StatusPass) + checks.ReportCheckResult(resourceInstA, addrs.ResourcePostcondition, 0, StatusPass) + + checks.ReportCheckableObjects(resourceB, addrs.MakeSet[addrs.Checkable](resourceInstB)) + checks.ReportCheckResult(resourceInstB, addrs.ResourcePrecondition, 0, StatusPass) + + checks.ReportCheckableObjects(resourceC, addrs.MakeSet[addrs.Checkable](resourceInstC0, resourceInstC1)) + checks.ReportCheckResult(resourceInstC0, addrs.ResourcePostcondition, 0, StatusPass) + checks.ReportCheckResult(resourceInstC1, addrs.ResourcePostcondition, 0, StatusPass) + + checks.ReportCheckableObjects(childOutput, addrs.MakeSet[addrs.Checkable](childOutputInst)) + checks.ReportCheckResult(childOutputInst, addrs.OutputPrecondition, 0, StatusPass) + + checks.ReportCheckableObjects(rootOutput, addrs.MakeSet[addrs.Checkable](rootOutputInst)) + checks.ReportCheckResult(rootOutputInst, addrs.OutputPrecondition, 0, StatusPass) + + ///////////////////////////////////////////////////////////////////////// + + // This "section" is simulating what we might do to report the results + // of the checks after a run completes. + + { + configCount := 0 + for _, configAddr := range checks.AllConfigAddrs() { + configCount++ + if got, want := checks.AggregateCheckStatus(configAddr), StatusPass; got != want { + t.Errorf("incorrect final aggregate check status for %s: %s, but want %s", configAddr, got, want) + } + } + if got, want := configCount, 5; got != want { + t.Errorf("incorrect number of known config addresses %d; want %d", got, want) + } + } + + { + objAddrs := addrs.MakeSet[addrs.Checkable]( + resourceInstA, + rootOutputInst, + resourceInstB, + resourceInstC0, + resourceInstC1, + childOutputInst, + ) + for _, addr := range objAddrs { + if got, want := checks.ObjectCheckStatus(addr), StatusPass; got != want { + t.Errorf("incorrect final check status for object %s: %s, but want %s", addr, got, want) + } + } + } +} diff --git a/internal/checks/status.go b/checks/status.go similarity index 100% rename from internal/checks/status.go rename to checks/status.go diff --git a/internal/checks/status_string.go b/checks/status_string.go similarity index 100% rename from internal/checks/status_string.go rename to checks/status_string.go diff --git a/internal/checks/testdata/happypath/checks-happypath.tf b/checks/testdata/happypath/checks-happypath.tf similarity index 100% rename from internal/checks/testdata/happypath/checks-happypath.tf rename to checks/testdata/happypath/checks-happypath.tf diff --git a/internal/checks/testdata/happypath/child/checks-happypath-child.tf b/checks/testdata/happypath/child/checks-happypath-child.tf similarity index 100% rename from internal/checks/testdata/happypath/child/checks-happypath-child.tf rename to checks/testdata/happypath/child/checks-happypath-child.tf diff --git a/cloud/backend.go b/cloud/backend.go new file mode 100644 index 000000000000..0ad3490e0ee9 --- /dev/null +++ b/cloud/backend.go @@ -0,0 +1,1213 @@ +package cloud + +import ( + "context" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + "time" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" + + backendLocal "github.com/hashicorp/terraform/backend/local" +) + +const ( + defaultHostname = "app.terraform.io" + defaultParallelism = 10 + tfeServiceID = "tfe.v2" + headerSourceKey = "X-Terraform-Integration" + headerSourceValue = "cloud" + genericHostname = "localterraform.com" +) + +// Cloud is an implementation of EnhancedBackend in service of the Terraform Cloud/Enterprise +// integration for Terraform CLI. This backend is not intended to be surfaced at the user level and +// is instead an implementation detail of cloud.Cloud. +type Cloud struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ContextOpts are the base context options to set when initializing a + // new Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // client is the Terraform Cloud/Enterprise API client. + client *tfe.Client + + // lastRetry is set to the last time a request was retried. + lastRetry time.Time + + // hostname of Terraform Cloud or Terraform Enterprise + hostname string + + // token for Terraform Cloud or Terraform Enterprise + token string + + // organization is the organization that contains the target workspaces. + organization string + + // WorkspaceMapping contains strategies for mapping CLI workspaces in the working directory + // to remote Terraform Cloud workspaces. + WorkspaceMapping WorkspaceMapping + + // services is used for service discovery + services *disco.Disco + + // renderer is used for rendering JSON plan output and streamed logs. + renderer *jsonformat.Renderer + + // local allows local operations, where Terraform Cloud serves as a state storage backend. + local backend.Enhanced + + // forceLocal, if true, will force the use of the local backend. + forceLocal bool + + // opLock locks operations + opLock sync.Mutex + + // ignoreVersionConflict, if true, will disable the requirement that the + // local Terraform version matches the remote workspace's configured + // version. This will also cause VerifyWorkspaceTerraformVersion to return + // a warning diagnostic instead of an error. + ignoreVersionConflict bool + + runningInAutomation bool + + // input stores the value of the -input flag, since it will be used + // to determine whether or not to ask the user for approval of a run. + input bool +} + +var _ backend.Backend = (*Cloud)(nil) +var _ backend.Enhanced = (*Cloud)(nil) +var _ backend.Local = (*Cloud)(nil) + +// New creates a new initialized cloud backend. +func New(services *disco.Disco) *Cloud { + return &Cloud{ + services: services, + } +} + +// ConfigSchema implements backend.Enhanced. +func (b *Cloud) ConfigSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "hostname": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionHostname, + }, + "organization": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionOrganization, + }, + "token": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionToken, + }, + }, + + BlockTypes: map[string]*configschema.NestedBlock{ + "workspaces": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionName, + }, + "tags": { + Type: cty.Set(cty.String), + Optional: true, + Description: schemaDescriptionTags, + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + } +} + +// PrepareConfig implements backend.Backend. +func (b *Cloud) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return obj, diags + } + + // check if organization is specified in the config. + if val := obj.GetAttr("organization"); val.IsNull() || val.AsString() == "" { + // organization is specified in the config but is invalid, so + // we'll fallback on TF_CLOUD_ORGANIZATION + if val := os.Getenv("TF_CLOUD_ORGANIZATION"); val == "" { + diags = diags.Append(missingConfigAttributeAndEnvVar("organization", "TF_CLOUD_ORGANIZATION")) + } + } + + WorkspaceMapping := WorkspaceMapping{} + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + WorkspaceMapping.Name = val.AsString() + } + if val := workspaces.GetAttr("tags"); !val.IsNull() { + err := gocty.FromCtyValue(val, &WorkspaceMapping.Tags) + if err != nil { + log.Panicf("An unxpected error occurred: %s", err) + } + } + } else { + WorkspaceMapping.Name = os.Getenv("TF_WORKSPACE") + } + + switch WorkspaceMapping.Strategy() { + // Make sure have a workspace mapping strategy present + case WorkspaceNoneStrategy: + diags = diags.Append(invalidWorkspaceConfigMissingValues) + // Make sure that a workspace name is configured. + case WorkspaceInvalidStrategy: + diags = diags.Append(invalidWorkspaceConfigMisconfiguration) + } + + return obj, diags +} + +// configureGenericHostname aliases the cloud backend hostname configuration +// as a generic "localterraform.com" hostname. This was originally added as a +// Terraform Enterprise feature and is useful for re-using whatever the +// Cloud/Enterprise backend host is in nested module sources in order +// to prevent code churn when re-using config between multiple +// Terraform Enterprise environments. +func (b *Cloud) configureGenericHostname() { + // This won't be an error for the given constant value + genericHost, _ := svchost.ForComparison(genericHostname) + + // This won't be an error because, by this time, the hostname has been parsed and + // service discovery requests made against it. + targetHost, _ := svchost.ForComparison(b.hostname) + + b.services.Alias(genericHost, targetHost) +} + +// Configure implements backend.Enhanced. +func (b *Cloud) Configure(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return diags + } + + diagErr := b.setConfigurationFields(obj) + if diagErr.HasErrors() { + return diagErr + } + + // Discover the service URL to confirm that it provides the Terraform Cloud/Enterprise API + service, err := b.discover() + + // Check for errors before we continue. + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + + // First we'll retrieve the token from the configuration + var token string + if val := obj.GetAttr("token"); !val.IsNull() { + token = val.AsString() + } + + // Get the token from the CLI Config File in the credentials section + // if no token was not set in the configuration + if token == "" { + token, err = b.cliConfigToken() + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + } + + // Return an error if we still don't have a token at this point. + if token == "" { + loginCommand := "terraform login" + if b.hostname != defaultHostname { + loginCommand = loginCommand + " " + b.hostname + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required token could not be found", + fmt.Sprintf( + "Run the following command to generate a token for %s:\n %s", + b.hostname, + loginCommand, + ), + )) + return diags + } + + b.token = token + b.configureGenericHostname() + + if b.client == nil { + cfg := &tfe.Config{ + Address: service.String(), + BasePath: service.Path, + Token: token, + Headers: make(http.Header), + RetryLogHook: b.retryLogHook, + } + + // Set the version header to the current version. + cfg.Headers.Set(tfversion.Header, tfversion.Version) + cfg.Headers.Set(headerSourceKey, headerSourceValue) + + // Create the TFC/E API client. + b.client, err = tfe.NewClient(cfg) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create the Terraform Cloud/Enterprise client", + fmt.Sprintf( + `Encountered an unexpected error while creating the `+ + `Terraform Cloud/Enterprise client: %s.`, err, + ), + )) + return diags + } + } + + // Check if the organization exists by reading its entitlements. + entitlements, err := b.client.Organizations.ReadEntitlements(context.Background(), b.organization) + if err != nil { + if err == tfe.ErrResourceNotFound { + err = fmt.Errorf("organization %q at host %s not found.\n\n"+ + "Please ensure that the organization and hostname are correct "+ + "and that your API token for %s is valid.", + b.organization, b.hostname, b.hostname) + } + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + fmt.Sprintf("Failed to read organization %q at host %s", b.organization, b.hostname), + fmt.Sprintf("Encountered an unexpected error while reading the "+ + "organization settings: %s", err), + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + return diags + } + + if ws, ok := os.LookupEnv("TF_WORKSPACE"); ok { + if ws == b.WorkspaceMapping.Name || b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { + diag := b.validWorkspaceEnvVar(context.Background(), b.organization, ws) + if diag != nil { + diags = diags.Append(diag) + return diags + } + } + } + + // Check for the minimum version of Terraform Enterprise required. + // + // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, + // so if there's an error when parsing the RemoteAPIVersion, it's handled as + // equivalent to an API version < 2.3. + currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) + desiredAPIVersion, _ := version.NewVersion("2.5") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + log.Printf("[TRACE] API version check failed; want: >= %s, got: %s", desiredAPIVersion.Original(), currentAPIVersion) + if b.runningInAutomation { + // It should never be possible for this Terraform process to be mistakenly + // used internally within an unsupported Terraform Enterprise install - but + // just in case it happens, give an actionable error. + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Unsupported Terraform Enterprise version", + cloudIntegrationUsedInUnsupportedTFE, + ), + ) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported Terraform Enterprise version", + `The 'cloud' option is not supported with this version of Terraform Enterprise.`, + ), + ) + } + } + + // Configure a local backend for when we need to run operations locally. + b.local = backendLocal.NewWithBackend(b) + b.forceLocal = b.forceLocal || !entitlements.Operations + + // Enable retries for server errors as the backend is now fully configured. + b.client.RetryServerErrors(true) + + return diags +} + +func (b *Cloud) setConfigurationFields(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Get the hostname. + b.hostname = os.Getenv("TF_CLOUD_HOSTNAME") + if val := obj.GetAttr("hostname"); !val.IsNull() && val.AsString() != "" { + b.hostname = val.AsString() + } else if b.hostname == "" { + b.hostname = defaultHostname + } + + // We can have two options, setting the organization via the config + // or using TF_CLOUD_ORGANIZATION. Since PrepareConfig() validates that one of these + // values must exist, we'll initially set it to the env var and override it if + // specified in the configuration. + b.organization = os.Getenv("TF_CLOUD_ORGANIZATION") + + // Check if the organization is present and valid in the config. + if val := obj.GetAttr("organization"); !val.IsNull() && val.AsString() != "" { + b.organization = val.AsString() + } + + // Get the workspaces configuration block and retrieve the + // default workspace name. + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + + // PrepareConfig checks that you cannot set both of these. + if val := workspaces.GetAttr("name"); !val.IsNull() { + b.WorkspaceMapping.Name = val.AsString() + } + if val := workspaces.GetAttr("tags"); !val.IsNull() { + var tags []string + err := gocty.FromCtyValue(val, &tags) + if err != nil { + log.Panicf("An unexpected error occurred: %s", err) + } + + b.WorkspaceMapping.Tags = tags + } + } else { + b.WorkspaceMapping.Name = os.Getenv("TF_WORKSPACE") + } + + // Determine if we are forced to use the local backend. + b.forceLocal = os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" + + return diags +} + +// discover the TFC/E API service URL and version constraints. +func (b *Cloud) discover() (*url.URL, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return nil, err + } + + host, err := b.services.Discover(hostname) + if err != nil { + var serviceDiscoErr *disco.ErrServiceDiscoveryNetworkRequest + + switch { + case errors.As(err, &serviceDiscoErr): + err = fmt.Errorf("a network issue prevented cloud configuration; %w", err) + return nil, err + default: + return nil, err + } + } + + service, err := host.ServiceURL(tfeServiceID) + // Return the error, unless its a disco.ErrVersionNotSupported error. + if _, ok := err.(*disco.ErrVersionNotSupported); !ok && err != nil { + return nil, err + } + + return service, err +} + +// cliConfigToken returns the token for this host as configured in the credentials +// section of the CLI Config File. If no token was configured, an empty +// string will be returned instead. +func (b *Cloud) cliConfigToken() (string, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return "", err + } + creds, err := b.services.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", b.hostname, err) + return "", nil + } + if creds != nil { + return creds.Token(), nil + } + return "", nil +} + +// retryLogHook is invoked each time a request is retried allowing the +// backend to log any connection issues to prevent data loss. +func (b *Cloud) retryLogHook(attemptNum int, resp *http.Response) { + if b.CLI != nil { + // Ignore the first retry to make sure any delayed output will + // be written to the console before we start logging retries. + // + // The retry logic in the TFE client will retry both rate limited + // requests and server errors, but in the cloud backend we only + // care about server errors so we ignore rate limit (429) errors. + if attemptNum == 0 || (resp != nil && resp.StatusCode == 429) { + // Reset the last retry time. + b.lastRetry = time.Now() + return + } + + if attemptNum == 1 { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(initialRetryError))) + } else { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace( + fmt.Sprintf(repeatedRetryError, time.Since(b.lastRetry).Round(time.Second))))) + } + } +} + +// Workspaces implements backend.Enhanced, returning a filtered list of workspace names according to +// the workspace mapping strategy configured. +func (b *Cloud) Workspaces() ([]string, error) { + // Create a slice to contain all the names. + var names []string + + // If configured for a single workspace, return that exact name only. The StateMgr for this + // backend will automatically create the remote workspace if it does not yet exist. + if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy { + names = append(names, b.WorkspaceMapping.Name) + return names, nil + } + + // Otherwise, multiple workspaces are being mapped. Query Terraform Cloud for all the remote + // workspaces by the provided mapping strategy. + options := &tfe.WorkspaceListOptions{} + if b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { + taglist := strings.Join(b.WorkspaceMapping.Tags, ",") + options.Tags = taglist + } + + for { + wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) + if err != nil { + return nil, err + } + + for _, w := range wl.Items { + names = append(names, w.Name) + } + + // Exit the loop when we've seen all pages. + if wl.CurrentPage >= wl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = wl.NextPage + } + + // Sort the result so we have consistent output. + sort.StringSlice(names).Sort() + + return names, nil +} + +// DeleteWorkspace implements backend.Enhanced. +func (b *Cloud) DeleteWorkspace(name string, force bool) error { + if name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + + if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy { + return backend.ErrWorkspacesNotSupported + } + + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) + if err == tfe.ErrResourceNotFound { + return nil // If the workspace does not exist, succeed + } + + if err != nil { + return fmt.Errorf("failed to retrieve workspace %s: %v", name, err) + } + + // Configure the remote workspace name. + State := &State{tfeClient: b.client, organization: b.organization, workspace: workspace} + return State.Delete(force) +} + +// StateMgr implements backend.Enhanced. +func (b *Cloud) StateMgr(name string) (statemgr.Full, error) { + var remoteTFVersion string + + if name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + + if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy && name != b.WorkspaceMapping.Name { + return nil, backend.ErrWorkspacesNotSupported + } + + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) + if err != nil && err != tfe.ErrResourceNotFound { + return nil, fmt.Errorf("Failed to retrieve workspace %s: %v", name, err) + } + if workspace != nil { + remoteTFVersion = workspace.TerraformVersion + } + + if err == tfe.ErrResourceNotFound { + // Create a workspace + options := tfe.WorkspaceCreateOptions{ + Name: tfe.String(name), + Tags: b.WorkspaceMapping.tfeTags(), + } + + log.Printf("[TRACE] cloud: Creating Terraform Cloud workspace %s/%s", b.organization, name) + workspace, err = b.client.Workspaces.Create(context.Background(), b.organization, options) + if err != nil { + return nil, fmt.Errorf("Error creating workspace %s: %v", name, err) + } + + remoteTFVersion = workspace.TerraformVersion + + // Attempt to set the new workspace to use this version of Terraform. This + // can fail if there's no enabled tool_version whose name matches our + // version string, but that's expected sometimes -- just warn and continue. + versionOptions := tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(tfversion.String()), + } + _, err := b.client.Workspaces.UpdateByID(context.Background(), workspace.ID, versionOptions) + if err == nil { + remoteTFVersion = tfversion.String() + } else { + // TODO: Ideally we could rely on the client to tell us what the actual + // problem was, but we currently can't get enough context from the error + // object to do a nicely formatted message, so we're just assuming the + // issue was that the version wasn't available since that's probably what + // happened. + log.Printf("[TRACE] cloud: Attempted to select version %s for TFC workspace; unavailable, so %s will be used instead.", tfversion.String(), workspace.TerraformVersion) + if b.CLI != nil { + versionUnavailable := fmt.Sprintf(unavailableTerraformVersion, tfversion.String(), workspace.TerraformVersion) + b.CLI.Output(b.Colorize().Color(versionUnavailable)) + } + } + } + + if b.workspaceTagsRequireUpdate(workspace, b.WorkspaceMapping) { + options := tfe.WorkspaceAddTagsOptions{ + Tags: b.WorkspaceMapping.tfeTags(), + } + log.Printf("[TRACE] cloud: Adding tags for Terraform Cloud workspace %s/%s", b.organization, name) + err = b.client.Workspaces.AddTags(context.Background(), workspace.ID, options) + if err != nil { + return nil, fmt.Errorf("Error updating workspace %s: %v", name, err) + } + } + + // This is a fallback error check. Most code paths should use other + // mechanisms to check the version, then set the ignoreVersionConflict + // field to true. This check is only in place to ensure that we don't + // accidentally upgrade state with a new code path, and the version check + // logic is coarser and simpler. + if !b.ignoreVersionConflict { + // Explicitly ignore the pseudo-version "latest" here, as it will cause + // plan and apply to always fail. + if remoteTFVersion != tfversion.String() && remoteTFVersion != "latest" { + return nil, fmt.Errorf("Remote workspace Terraform version %q does not match local Terraform version %q", remoteTFVersion, tfversion.String()) + } + } + + return &State{tfeClient: b.client, organization: b.organization, workspace: workspace}, nil +} + +// Operation implements backend.Enhanced. +func (b *Cloud) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + // Retrieve the workspace for this operation. + w, err := b.fetchWorkspace(ctx, b.organization, op.Workspace) + if err != nil { + return nil, err + } + + // Terraform remote version conflicts are not a concern for operations. We + // are in one of three states: + // + // - Running remotely, in which case the local version is irrelevant; + // - Workspace configured for local operations, in which case the remote + // version is meaningless; + // - Forcing local operations, which should only happen in the Terraform Cloud worker, in + // which case the Terraform versions by definition match. + b.IgnoreVersionConflict() + + // Check if we need to use the local backend to run the operation. + if b.forceLocal || isLocalExecutionMode(w.ExecutionMode) { + // Record that we're forced to run operations locally to allow the + // command package UI to operate correctly + b.forceLocal = true + return b.local.Operation(ctx, op) + } + + // Set the remote workspace name. + op.Workspace = w.Name + + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation, *tfe.Workspace) (*tfe.Run, error) + switch op.Type { + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + case backend.OperationTypeRefresh: + // The `terraform refresh` command has been deprecated in favor of `terraform apply -refresh-state`. + // Rather than respond with an error telling the user to run the other command we can just run + // that command instead. We will tell the user what we are doing, and then do it. + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(refreshToApplyRefresh) + "\n")) + } + op.PlanMode = plans.RefreshOnlyMode + op.PlanRefresh = true + op.AutoApprove = true + f = b.opApply + default: + return nil, fmt.Errorf( + "\n\nTerraform Cloud does not support the %q operation.", op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + PlanEmpty: true, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + // Do it. + go func() { + defer done() + defer stop() + defer cancel() + + defer b.opLock.Unlock() + + r, opErr := f(stopCtx, cancelCtx, op, w) + if opErr != nil && opErr != context.Canceled { + var diags tfdiags.Diagnostics + diags = diags.Append(opErr) + op.ReportResult(runningOp, diags) + return + } + + if r == nil && opErr == context.Canceled { + runningOp.Result = backend.OperationFailure + return + } + + if r != nil { + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + var diags tfdiags.Diagnostics + diags = diags.Append(generalError("Failed to retrieve run", err)) + op.ReportResult(runningOp, diags) + return + } + + // Record if there are any changes. + runningOp.PlanEmpty = !r.HasChanges + + if opErr == context.Canceled { + if err := b.cancel(cancelCtx, op, r); err != nil { + var diags tfdiags.Diagnostics + diags = diags.Append(generalError("Failed to retrieve run", err)) + op.ReportResult(runningOp, diags) + return + } + } + + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + runningOp.Result = backend.OperationFailure + } + } + }() + + // Return the running operation. + return runningOp, nil +} + +func (b *Cloud) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.Actions.IsCancelable { + // Only ask if the remote operation should be canceled + // if the auto approve flag is not set. + if !op.AutoApprove { + v, err := op.UIIn.Input(cancelCtx, &terraform.InputOpts{ + Id: "cancel", + Query: "\nDo you want to cancel the remote operation?", + Description: "Only 'yes' will be accepted to cancel.", + }) + if err != nil { + return generalError("Failed asking to cancel", err) + } + if v != "yes" { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled))) + } + return nil + } + } else { + if b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + } + + // Try to cancel the remote operation. + err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{}) + if err != nil { + return generalError("Failed to cancel run", err) + } + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled))) + } + } + + return nil +} + +// IgnoreVersionConflict allows commands to disable the fall-back check that +// the local Terraform version matches the remote workspace's configured +// Terraform version. This should be called by commands where this check is +// unnecessary, such as those performing remote operations, or read-only +// operations. It will also be called if the user uses a command-line flag to +// override this check. +func (b *Cloud) IgnoreVersionConflict() { + b.ignoreVersionConflict = true +} + +// VerifyWorkspaceTerraformVersion compares the local Terraform version against +// the workspace's configured Terraform version. If they are compatible, this +// means that there are no state compatibility concerns, so it returns no +// diagnostics. +// +// If the versions aren't compatible, it returns an error (or, if +// b.ignoreVersionConflict is set, a warning). +func (b *Cloud) VerifyWorkspaceTerraformVersion(workspaceName string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + workspace, err := b.getRemoteWorkspace(context.Background(), workspaceName) + if err != nil { + // If the workspace doesn't exist, there can be no compatibility + // problem, so we can return. This is most likely to happen when + // migrating state from a local backend to a new workspace. + if err == tfe.ErrResourceNotFound { + return nil + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error looking up workspace", + fmt.Sprintf("Workspace read failed: %s", err), + )) + return diags + } + + // If the workspace has the pseudo-version "latest", all bets are off. We + // cannot reasonably determine what the intended Terraform version is, so + // we'll skip version verification. + if workspace.TerraformVersion == "latest" { + return nil + } + + // If the workspace has execution-mode set to local, the remote Terraform + // version is effectively meaningless, so we'll skip version verification. + if isLocalExecutionMode(workspace.ExecutionMode) { + return nil + } + + remoteConstraint, err := version.NewConstraint(workspace.TerraformVersion) + if err != nil { + message := fmt.Sprintf( + "The remote workspace specified an invalid Terraform version or constraint (%s), "+ + "and it isn't possible to determine whether the local Terraform version (%s) is compatible.", + workspace.TerraformVersion, + tfversion.String(), + ) + diags = diags.Append(incompatibleWorkspaceTerraformVersion(message, b.ignoreVersionConflict)) + return diags + } + + remoteVersion, _ := version.NewSemver(workspace.TerraformVersion) + + // We can use a looser version constraint if the workspace specifies a + // literal Terraform version, and it is not a prerelease. The latter + // restriction is because we cannot compare prerelease versions with any + // operator other than simple equality. + if remoteVersion != nil && remoteVersion.Prerelease() == "" { + v014 := version.Must(version.NewSemver("0.14.0")) + v130 := version.Must(version.NewSemver("1.3.0")) + + // Versions from 0.14 through the early 1.x series should be compatible + // (though we don't know about 1.3 yet). + if remoteVersion.GreaterThanOrEqual(v014) && remoteVersion.LessThan(v130) { + early1xCompatible, err := version.NewConstraint(fmt.Sprintf(">= 0.14.0, < %s", v130.String())) + if err != nil { + panic(err) + } + remoteConstraint = early1xCompatible + } + + // Any future new state format will require at least a minor version + // increment, so x.y.* will always be compatible with each other. + if remoteVersion.GreaterThanOrEqual(v130) { + rwvs := remoteVersion.Segments64() + if len(rwvs) >= 3 { + // ~> x.y.0 + minorVersionCompatible, err := version.NewConstraint(fmt.Sprintf("~> %d.%d.0", rwvs[0], rwvs[1])) + if err != nil { + panic(err) + } + remoteConstraint = minorVersionCompatible + } + } + } + + // Re-parsing tfversion.String because tfversion.SemVer omits the prerelease + // prefix, and we want to allow constraints like `~> 1.2.0-beta1`. + fullTfversion := version.Must(version.NewSemver(tfversion.String())) + + if remoteConstraint.Check(fullTfversion) { + return diags + } + + message := fmt.Sprintf( + "The local Terraform version (%s) does not meet the version requirements for remote workspace %s/%s (%s).", + tfversion.String(), + b.organization, + workspace.Name, + remoteConstraint, + ) + diags = diags.Append(incompatibleWorkspaceTerraformVersion(message, b.ignoreVersionConflict)) + return diags +} + +func (b *Cloud) IsLocalOperations() bool { + return b.forceLocal +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is guaranteed to always return a non-nil value and so useful +// as a helper to wrap any potentially colored strings. +// +// TODO SvH: Rename this back to Colorize as soon as we can pass -no-color. +// +//lint:ignore U1000 see above todo +func (b *Cloud) cliColorize() *colorstring.Colorize { + if b.CLIColor != nil { + return b.CLIColor + } + + return &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } +} + +func (b *Cloud) workspaceTagsRequireUpdate(workspace *tfe.Workspace, workspaceMapping WorkspaceMapping) bool { + if workspaceMapping.Strategy() != WorkspaceTagsStrategy { + return false + } + + existingTags := map[string]struct{}{} + for _, t := range workspace.TagNames { + existingTags[t] = struct{}{} + } + + for _, tag := range workspaceMapping.Tags { + if _, ok := existingTags[tag]; !ok { + return true + } + } + + return false +} + +type WorkspaceMapping struct { + Name string + Tags []string +} + +type workspaceStrategy string + +const ( + WorkspaceTagsStrategy workspaceStrategy = "tags" + WorkspaceNameStrategy workspaceStrategy = "name" + WorkspaceNoneStrategy workspaceStrategy = "none" + WorkspaceInvalidStrategy workspaceStrategy = "invalid" +) + +func (wm WorkspaceMapping) Strategy() workspaceStrategy { + switch { + case len(wm.Tags) > 0 && wm.Name == "": + return WorkspaceTagsStrategy + case len(wm.Tags) == 0 && wm.Name != "": + return WorkspaceNameStrategy + case len(wm.Tags) == 0 && wm.Name == "": + return WorkspaceNoneStrategy + default: + // Any other combination is invalid as each strategy is mutually exclusive + return WorkspaceInvalidStrategy + } +} + +func isLocalExecutionMode(execMode string) bool { + return execMode == "local" +} + +func (b *Cloud) fetchWorkspace(ctx context.Context, organization string, workspace string) (*tfe.Workspace, error) { + // Retrieve the workspace for this operation. + w, err := b.client.Workspaces.Read(ctx, organization, workspace) + if err != nil { + switch err { + case context.Canceled: + return nil, err + case tfe.ErrResourceNotFound: + return nil, fmt.Errorf( + "workspace %s not found\n\n"+ + "For security, Terraform Cloud returns '404 Not Found' responses for resources\n"+ + "for resources that a user doesn't have access to, in addition to resources that\n"+ + "do not exist. If the resource does exist, please check the permissions of the provided token.", + workspace, + ) + default: + err := fmt.Errorf( + "Terraform Cloud returned an unexpected error:\n\n%s", + err, + ) + return nil, err + } + } + + return w, nil +} + +// validWorkspaceEnvVar ensures we have selected a valid workspace using TF_WORKSPACE: +// First, it ensures the workspace specified by TF_WORKSPACE exists in the organization +// Second, if tags are specified in the configuration, it ensures TF_WORKSPACE belongs to the set +// of available workspaces with those given tags. +func (b *Cloud) validWorkspaceEnvVar(ctx context.Context, organization, workspace string) tfdiags.Diagnostic { + // first ensure the workspace exists + _, err := b.client.Workspaces.Read(ctx, organization, workspace) + if err != nil && err != tfe.ErrResourceNotFound { + return tfdiags.Sourceless( + tfdiags.Error, + "Terraform Cloud returned an unexpected error", + err.Error(), + ) + } + + if err == tfe.ErrResourceNotFound { + return tfdiags.Sourceless( + tfdiags.Error, + "Invalid workspace selection", + fmt.Sprintf(`Terraform failed to find workspace %q in organization %s.`, workspace, organization), + ) + } + + // if the configuration has specified tags, we need to ensure TF_WORKSPACE + // is a valid member + if b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { + opts := &tfe.WorkspaceListOptions{} + opts.Tags = strings.Join(b.WorkspaceMapping.Tags, ",") + + for { + wl, err := b.client.Workspaces.List(ctx, b.organization, opts) + if err != nil { + return tfdiags.Sourceless( + tfdiags.Error, + "Terraform Cloud returned an unexpected error", + err.Error(), + ) + } + + for _, ws := range wl.Items { + if ws.Name == workspace { + return nil + } + } + + if wl.CurrentPage >= wl.TotalPages { + break + } + + opts.PageNumber = wl.NextPage + } + + return tfdiags.Sourceless( + tfdiags.Error, + "Invalid workspace selection", + fmt.Sprintf( + "Terraform failed to find workspace %q with the tags specified in your configuration:\n[%s]", + workspace, + strings.ReplaceAll(opts.Tags, ",", ", "), + ), + ) + } + + return nil +} + +func (wm WorkspaceMapping) tfeTags() []*tfe.Tag { + var tags []*tfe.Tag + + if wm.Strategy() != WorkspaceTagsStrategy { + return tags + } + + for _, tag := range wm.Tags { + t := tfe.Tag{Name: tag} + tags = append(tags, &t) + } + + return tags +} + +func generalError(msg string, err error) error { + var diags tfdiags.Diagnostics + + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + + switch err { + case context.Canceled: + return err + case tfe.ErrResourceNotFound: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + "For security, Terraform Cloud returns '404 Not Found' responses for resources\n"+ + "for resources that a user doesn't have access to, in addition to resources that\n"+ + "do not exist. If the resource does exist, please check the permissions of the provided token.", + )) + return diags.Err() + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `Terraform Cloud returned an unexpected error. Sometimes `+ + `this is caused by network connection problems, in which case you could retry `+ + `the command. If the issue persists please open a support ticket to get help `+ + `resolving the problem.`, + )) + return diags.Err() + } +} + +// The newline in this error is to make it look good in the CLI! +const initialRetryError = ` +[reset][yellow]There was an error connecting to Terraform Cloud. Please do not exit +Terraform to prevent data loss! Trying to restore the connection... +[reset] +` + +const repeatedRetryError = ` +[reset][yellow]Still trying to restore the connection... (%s elapsed)[reset] +` + +const operationCanceled = ` +[reset][red]The remote operation was successfully cancelled.[reset] +` + +const operationNotCanceled = ` +[reset][red]The remote operation was not cancelled.[reset] +` + +const refreshToApplyRefresh = `[bold][yellow]Proceeding with 'terraform apply -refresh-only -auto-approve'.[reset]` + +const unavailableTerraformVersion = ` +[reset][yellow]The local Terraform version (%s) is not available in Terraform Cloud, or your +organization does not have access to it. The new workspace will use %s. You can +change this later in the workspace settings.[reset]` + +const cloudIntegrationUsedInUnsupportedTFE = ` +This version of Terraform Cloud/Enterprise does not support the state mechanism +attempting to be used by the platform. This should never happen. + +Please reach out to HashiCorp Support to resolve this issue.` + +var ( + workspaceConfigurationHelp = fmt.Sprintf( + `The 'workspaces' block configures how Terraform CLI maps its workspaces for this single +configuration to workspaces within a Terraform Cloud organization. Two strategies are available: + +[bold]tags[reset] - %s + +[bold]name[reset] - %s`, schemaDescriptionTags, schemaDescriptionName) + + schemaDescriptionHostname = `The Terraform Enterprise hostname to connect to. This optional argument defaults to app.terraform.io +for use with Terraform Cloud.` + + schemaDescriptionOrganization = `The name of the organization containing the targeted workspace(s).` + + schemaDescriptionToken = `The token used to authenticate with Terraform Cloud/Enterprise. Typically this argument should not +be set, and 'terraform login' used instead; your credentials will then be fetched from your CLI +configuration file or configured credential helper.` + + schemaDescriptionTags = `A set of tags used to select remote Terraform Cloud workspaces to be used for this single +configuration. New workspaces will automatically be tagged with these tag values. Generally, this +is the primary and recommended strategy to use. This option conflicts with "name".` + + schemaDescriptionName = `The name of a single Terraform Cloud workspace to be used with this configuration. +When configured, only the specified workspace can be used. This option conflicts with "tags".` +) diff --git a/cloud/backend_apply.go b/cloud/backend_apply.go new file mode 100644 index 000000000000..cc82980862c5 --- /dev/null +++ b/cloud/backend_apply.go @@ -0,0 +1,228 @@ +package cloud + +import ( + "bufio" + "context" + "encoding/json" + "io" + "log" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Cloud) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] cloud: starting Apply operation") + + var diags tfdiags.Diagnostics + + // We should remove the `CanUpdate` part of this test, but for now + // (to remain compatible with tfe.v2.1) we'll leave it in here. + if !w.Permissions.CanUpdate && !w.Permissions.CanQueueApply { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to apply changes", + "The provided credentials have insufficient rights to apply changes. In order "+ + "to apply changes at least write permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if w.VCSRepo != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Apply not allowed for workspaces with a VCS connection", + "A workspace that is connected to a VCS requires the VCS-driven workflow "+ + "to ensure that the VCS remains the single source of truth.", + )) + return nil, diags.Err() + } + + if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `Terraform Cloud does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Applying a saved plan is currently not supported", + `Terraform Cloud currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if !op.HasConfig() && op.PlanMode != plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Apply requires configuration to be present. Applying without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run 'terraform destroy' which `+ + `does not require any configuration files.`, + )) + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + // Run the plan phase. + r, err := b.plan(stopCtx, cancelCtx, op, w) + if err != nil { + return r, err + } + + // This check is also performed in the plan method to determine if + // the policies should be checked, but we need to check the values + // here again to determine if we are done and should return. + if !r.HasChanges || r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return r, nil + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run cannot be confirmed. + if !op.AutoApprove && !r.Actions.IsConfirmable { + return r, nil + } + + mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove + + if mustConfirm && b.input { + opts := &terraform.InputOpts{Id: "approve"} + + if op.PlanMode == plans.DestroyMode { + opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + opts.Description = "Terraform will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + } else { + opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?" + opts.Description = "Terraform will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + err = b.confirm(stopCtx, op, opts, r, "yes") + if err != nil && err != errRunApproved { + return r, err + } + } else if mustConfirm && !b.input { + return r, errApplyNeedsUIConfirmation + } else { + // If we don't need to ask for confirmation, insert a blank + // line to separate the ouputs. + if b.CLI != nil { + b.CLI.Output("") + } + } + + if !op.AutoApprove && err != errRunApproved { + if err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}); err != nil { + return r, generalError("Failed to approve the apply command", err) + } + } + + // Retrieve the run to get task stages. + // Task Stages are calculated upfront so we only need to call this once for the run. + taskStages, err := b.runTaskStages(stopCtx, b.client, r.ID) + if err != nil { + return r, err + } + + if stage, ok := taskStages[tfe.PreApply]; ok { + if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Pre-apply Tasks"); err != nil { + return r, err + } + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w) + if err != nil { + return r, err + } + + err = b.renderApplyLogs(stopCtx, r) + if err != nil { + return r, err + } + + return r, nil +} + +func (b *Cloud) renderApplyLogs(ctx context.Context, run *tfe.Run) error { + logs, err := b.client.Applies.Logs(ctx, run.Apply.ID) + if err != nil { + return err + } + + if b.CLI != nil { + reader := bufio.NewReaderSize(logs, 64*1024) + skip := 0 + + for next := true; next; { + var l, line []byte + var err error + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + + line = append(line, l...) + } + + // Apply logs show the same Terraform info logs as shown in the plan logs + // (which contain version and os/arch information), we therefore skip to prevent duplicate output. + if skip < 3 { + skip++ + continue + } + + if next || len(line) > 0 { + log := &jsonformat.JSONLog{} + if err := json.Unmarshal(line, log); err != nil { + // If we can not parse the line as JSON, we will simply + // print the line. This maintains backwards compatibility for + // users who do not wish to enable structured output in their + // workspace. + b.CLI.Output(string(line)) + continue + } + + if b.renderer != nil { + // Otherwise, we will print the log + err := b.renderer.RenderLog(log) + if err != nil { + return err + } + } + } + } + } + + return nil +} + +const applyDefaultHeader = ` +[reset][yellow]Running apply in Terraform Cloud. Output will stream here. Pressing Ctrl-C +will cancel the remote apply if it's still pending. If the apply started it +will stop streaming the logs, but will not stop the apply running remotely.[reset] + +Preparing the remote apply... +` diff --git a/cloud/backend_apply_test.go b/cloud/backend_apply_test.go new file mode 100644 index 000000000000..3bffa3b55aad --- /dev/null +++ b/cloud/backend_apply_test.go @@ -0,0 +1,1898 @@ +package cloud + +import ( + "context" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + gomock "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + tfe "github.com/hashicorp/go-tfe" + mocks "github.com/hashicorp/go-tfe/mocks" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + tfversion "github.com/hashicorp/terraform/version" + "github.com/mitchellh/cli" +) + +func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationApplyWithTimeout(t, configDir, 0) +} + +func testOperationApplyWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + // Many of our tests use an overridden "null" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/null")) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypeApply, + View: operationView, + DependencyLocks: depLocks, + }, configCleanup, done +} + +func TestCloud_applyBasic(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyJSONBasic(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + if !strings.Contains(gotOut, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summary in output: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyJSONWithOutputs(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-outputs") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + outp := close(t) + gotOut := outp.Stdout() + expectedSimpleOutput := `simple = [ + "some", + "list", + ]` + expectedSensitiveOutput := `secret = (sensitive value)` + expectedComplexOutput := `complex = { + keyA = { + someList = [ + 1, + 2, + 3, + ] + } + keyB = { + someBool = true + someStr = "hello" + } + }` + + if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + if !strings.Contains(gotOut, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summary in output: %s", gotOut) + } + if !strings.Contains(gotOut, "Outputs:") { + t.Fatalf("expected output header: %s", gotOut) + } + if !strings.Contains(gotOut, expectedSimpleOutput) { + t.Fatalf("expected output: %s, got: %s", expectedSimpleOutput, gotOut) + } + if !strings.Contains(gotOut, expectedSensitiveOutput) { + t.Fatalf("expected output: %s, got: %s", expectedSensitiveOutput, gotOut) + } + if !strings.Contains(gotOut, expectedComplexOutput) { + t.Fatalf("expected output: %s, got: %s", expectedComplexOutput, gotOut) + } + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyCanceled(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Stop the run to simulate a Ctrl-C. + run.Stop() + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after cancelling apply: %s", err.Error()) + } +} + +func TestCloud_applyWithoutPermissions(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueApply = false + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Insufficient rights to apply changes") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestCloud_applyWithVCS(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + // Create a named workspace with a VCS. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + VCSRepo: &tfe.VCSRepoOptions{}, + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "not allowed for workspaces with a VCS") { + t.Fatalf("expected a VCS error, got: %v", errOutput) + } +} + +func TestCloud_applyWithParallelism(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + if b.ContextOpts == nil { + b.ContextOpts = &terraform.ContextOpts{} + } + b.ContextOpts.Parallelism = 3 + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestCloud_applyWithPlan(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.PlanFile = &planfile.Reader{} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestCloud_applyWithoutRefresh(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.PlanRefresh = false + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has refresh set + // to false. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(false, run.Refresh); diff != "" { + t.Errorf("wrong Refresh setting in the created run\n%s", diff) + } + } +} + +func TestCloud_applyWithRefreshOnly(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has refresh-only set + // to true. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { + t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) + } + } +} + +func TestCloud_applyWithTarget(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // target address we requested above. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { + t.Errorf("wrong TargetAddrs in the created run\n%s", diff) + } + } +} + +func TestCloud_applyWithReplace(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // refresh address we requested above. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { + t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) + } + } +} + +func TestCloud_applyWithRequiredVariables(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-variables") + defer configCleanup() + defer done(t) + + op.Variables = testVariables(terraform.ValueFromNamedFile, "foo") // "bar" variable value missing + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + // The usual error of a required variable being missing is deferred and the operation + // is successful + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("unexpected TFC header in output: %s", output) + } +} + +func TestCloud_applyNoConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after failed apply: %s", err.Error()) + } +} + +func TestCloud_applyNoChanges(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-no-changes") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summery: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } +} + +func TestCloud_applyNoApprove(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Apply discarded") { + t.Fatalf("expected an apply discarded error, got: %v", errOutput) + } +} + +func TestCloud_applyAutoApprove(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + ctrl := gomock.NewController(t) + + applyMock := mocks.NewMockApplies(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs := strings.NewReader(applySuccessOneResourceAdded) + applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + b.client.Applies = applyMock + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyApprovedExternally(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "wait-for-external-update", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + ctx := context.Background() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) + + wl, err := b.client.Workspaces.List( + ctx, + b.organization, + nil, + ) + if err != nil { + t.Fatalf("unexpected error listing workspaces: %v", err) + } + if len(wl.Items) != 1 { + t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) + } + + rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) + if err != nil { + t.Fatalf("unexpected error listing runs: %v", err) + } + if len(rl.Items) != 1 { + t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) + } + + err = b.client.Runs.Apply(context.Background(), rl.Items[0].ID, tfe.RunApplyOptions{}) + if err != nil { + t.Fatalf("unexpected error approving run: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "approved using the UI or API") { + t.Fatalf("expected external approval in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyDiscardedExternally(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "wait-for-external-update", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + ctx := context.Background() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) + + wl, err := b.client.Workspaces.List( + ctx, + b.organization, + nil, + ) + if err != nil { + t.Fatalf("unexpected error listing workspaces: %v", err) + } + if len(wl.Items) != 1 { + t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) + } + + rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) + if err != nil { + t.Fatalf("unexpected error listing runs: %v", err) + } + if len(rl.Items) != 1 { + t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) + } + + err = b.client.Runs.Discard(context.Background(), rl.Items[0].ID, tfe.RunDiscardOptions{}) + if err != nil { + t.Fatalf("unexpected error discarding run: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "discarded using the UI or API") { + t.Fatalf("expected external discard output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestCloud_applyWithAutoApprove(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + ctrl := gomock.NewController(t) + + applyMock := mocks.NewMockApplies(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs := strings.NewReader(applySuccessOneResourceAdded) + applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + b.client.Applies = applyMock + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + op.AutoApprove = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the cloud backend will use + // the local backend with itself as embedded backend. + if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { + t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) + } + defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + if !run.State.HasManagedResourceInstanceObjects() { + t.Fatalf("expected resources in state") + } +} + +func TestCloud_applyWorkspaceWithoutOperations(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "no-operations" + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + if !run.State.HasManagedResourceInstanceObjects() { + t.Fatalf("expected resources in state") + } +} + +func TestCloud_applyLockTimeout(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.WorkspaceMapping.Name) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationApplyWithTimeout(t, "./testdata/apply", 50*time.Millisecond) + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("expected lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summery in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestCloud_applyDestroy(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-destroy") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.PlanMode = plans.DestroyMode + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyDestroyNoConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } +} + +func TestCloud_applyJSONWithProvisioner(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-provisioner") + defer configCleanup() + defer done(t) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + outp := close(t) + gotOut := outp.Stdout() + if !strings.Contains(gotOut, "null_resource.foo: Provisioning with 'local-exec'") { + t.Fatalf("expected provisioner local-exec start in logs: %s", gotOut) + } + + if !strings.Contains(gotOut, "null_resource.foo: (local-exec):") { + t.Fatalf("expected provisioner local-exec progress in logs: %s", gotOut) + } + + if !strings.Contains(gotOut, "Hello World!") { + t.Fatalf("expected provisioner local-exec output in logs: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyJSONWithProvisionerError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-provisioner-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "local-exec provisioner error") { + t.Fatalf("unexpected error in apply logs: %s", gotOut) + } +} + +func TestCloud_applyPolicyPass(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-passed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyPolicyHardFail(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-hard-failed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answers, got: %v", input.answers) + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestCloud_applyPolicySoftFail(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.AutoApprove = false + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyPolicySoftFailAutoApproveSuccess(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + ctrl := gomock.NewController(t) + + policyCheckMock := mocks.NewMockPolicyChecks(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs := strings.NewReader(fmt.Sprintf("%s\n%s", sentinelSoftFail, applySuccessOneResourceAdded)) + + pc := &tfe.PolicyCheck{ + ID: "pc-1", + Actions: &tfe.PolicyActions{ + IsOverridable: true, + }, + Permissions: &tfe.PolicyPermissions{ + CanOverride: true, + }, + Scope: tfe.PolicyScopeOrganization, + Status: tfe.PolicySoftFailed, + } + policyCheckMock.EXPECT().Read(gomock.Any(), gomock.Any()).Return(pc, nil) + policyCheckMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + policyCheckMock.EXPECT().Override(gomock.Any(), gomock.Any()).Return(nil, nil) + b.client.PolicyChecks = policyCheckMock + applyMock := mocks.NewMockApplies(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs = strings.NewReader("\n\n\n1 added, 0 changed, 0 destroyed") + applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + b.client.Applies = applyMock + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + + input := testInput(t, map[string]string{}) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to success due to auto-approve") + } + + if run.PlanEmpty { + t.Fatalf("expected plan to not be empty, plan opertion completed without error") + } + + if len(input.answers) != 0 { + t.Fatalf("expected no answers, got: %v", input.answers) + } + + errOutput := viewOutput.Stderr() + if strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected no policy check errors, instead got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check to be false, insead got: %s", output) + } + if !strings.Contains(output, "Apply complete!") { + t.Fatalf("expected apply to be complete, instead got: %s", output) + } + + if !strings.Contains(output, "Resources: 1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected resources, instead got: %s", output) + } +} + +func TestCloud_applyPolicySoftFailAutoApprove(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + ctrl := gomock.NewController(t) + + applyMock := mocks.NewMockApplies(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs := strings.NewReader(applySuccessOneResourceAdded) + applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + b.client.Applies = applyMock + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + op.AutoApprove = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 2 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyWithRemoteError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("expected apply error in output: %s", output) + } +} + +func TestCloud_applyJSONWithRemoteError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "Unsupported block type") { + t.Fatalf("unexpected plan error in output: %s", gotOut) + } +} + +func TestCloud_applyVersionCheck(t *testing.T) { + testCases := map[string]struct { + localVersion string + remoteVersion string + forceLocal bool + executionMode string + wantErr string + }{ + "versions can be different for remote apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + executionMode: "remote", + }, + "versions can be different for local apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + executionMode: "local", + }, + "force local with remote operations and different versions is acceptable": { + localVersion: "0.14.0", + remoteVersion: "0.14.0-acme-provider-bundle", + forceLocal: true, + executionMode: "remote", + }, + "no error if versions are identical": { + localVersion: "0.14.0", + remoteVersion: "0.14.0", + forceLocal: true, + executionMode: "remote", + }, + "no error if force local but workspace has remote operations disabled": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + forceLocal: true, + executionMode: "local", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // SETUP: Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // SETUP: Set local version for the test case + tfversion.Prerelease = "" + tfversion.Version = tc.localVersion + tfversion.SemVer = version.Must(version.NewSemver(tc.localVersion)) + + // SETUP: Set force local for the test case + b.forceLocal = tc.forceLocal + + ctx := context.Background() + + // SETUP: set the operations and Terraform Version fields on the + // remote workspace + _, err := b.client.Workspaces.Update( + ctx, + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + ExecutionMode: tfe.String(tc.executionMode), + TerraformVersion: tfe.String(tc.remoteVersion), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + // RUN: prepare the apply operation and run it + op, configCleanup, opDone := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer opDone(t) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // RUN: wait for completion + <-run.Done() + output := done(t) + + if tc.wantErr != "" { + // ASSERT: if the test case wants an error, check for failure + // and the error message + if run.Result != backend.OperationFailure { + t.Fatalf("expected run to fail, but result was %#v", run.Result) + } + errOutput := output.Stderr() + if !strings.Contains(errOutput, tc.wantErr) { + t.Fatalf("missing error %q\noutput: %s", tc.wantErr, errOutput) + } + } else { + // ASSERT: otherwise, check for success and appropriate output + // based on whether the run should be local or remote + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + output := b.CLI.(*cli.MockUi).OutputWriter.String() + hasRemote := strings.Contains(output, "Running apply in Terraform Cloud") + hasSummary := strings.Contains(output, "1 added, 0 changed, 0 destroyed") + hasResources := run.State.HasManagedResourceInstanceObjects() + if !tc.forceLocal && !isLocalExecutionMode(tc.executionMode) { + if !hasRemote { + t.Errorf("missing TFC header in output: %s", output) + } + if !hasSummary { + t.Errorf("expected apply summary in output: %s", output) + } + } else { + if hasRemote { + t.Errorf("unexpected TFC header in output: %s", output) + } + if !hasResources { + t.Errorf("expected resources in state") + } + } + } + }) + } +} + +const applySuccessOneResourceAdded = ` +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. +` + +const sentinelSoftFail = ` +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" +` diff --git a/internal/cloud/backend_cli.go b/cloud/backend_cli.go similarity index 81% rename from internal/cloud/backend_cli.go rename to cloud/backend_cli.go index 21d0399b6c31..afcd61026792 100644 --- a/internal/cloud/backend_cli.go +++ b/cloud/backend_cli.go @@ -1,8 +1,8 @@ package cloud import ( - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/jsonformat" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/jsonformat" ) // CLIInit implements backend.CLI diff --git a/internal/cloud/backend_colorize.go b/cloud/backend_colorize.go similarity index 100% rename from internal/cloud/backend_colorize.go rename to cloud/backend_colorize.go diff --git a/cloud/backend_common.go b/cloud/backend_common.go new file mode 100644 index 000000000000..7632cc4a89d2 --- /dev/null +++ b/cloud/backend_common.go @@ -0,0 +1,635 @@ +package cloud + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-retryablehttp" + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/jsonapi" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terraform" +) + +var ( + backoffMin = 1000.0 + backoffMax = 3000.0 + + runPollInterval = 3 * time.Second +) + +// backoff will perform exponential backoff based on the iteration and +// limited by the provided min and max (in milliseconds) durations. +func backoff(min, max float64, iter int) time.Duration { + backoff := math.Pow(2, float64(iter)/5) * min + if backoff > max { + backoff = max + } + return time.Duration(backoff) * time.Millisecond +} + +func (b *Cloud) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) { + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return r, stopCtx.Err() + case <-cancelCtx.Done(): + return r, cancelCtx.Err() + case <-time.After(backoff(backoffMin, backoffMax, i)): + // Timer up, show status + } + + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run is no longer pending. + if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed { + if i == 0 && opType == "plan" && b.CLI != nil { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType))) + } + if i > 0 && b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + return r, nil + } + + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + position := 0 + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + + // Retrieve the workspace used to run this operation in. + w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + // If the workspace is locked the run will not be queued and we can + // update the status without making any expensive calls. + if w.Locked && w.CurrentRun != nil { + cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID) + if err != nil { + return r, generalError("Failed to retrieve current run", err) + } + if cr.Status == tfe.RunPending { + b.CLI.Output(b.Colorize().Color( + "Waiting for the manually locked workspace to be unlocked..." + elapsed)) + continue + } + } + + // Skip checking the workspace queue when we are the current run. + if w.CurrentRun == nil || w.CurrentRun.ID != r.ID { + found := false + options := &tfe.RunListOptions{} + runlist: + for { + rl, err := b.client.Runs.List(stopCtx, w.ID, options) + if err != nil { + return r, generalError("Failed to retrieve run list", err) + } + + // Loop through all runs to calculate the workspace queue position. + for _, item := range rl.Items { + if !found { + if r.ID == item.ID { + found = true + } + continue + } + + // If the run is in a final state, ignore it and continue. + switch item.Status { + case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored: + continue + case tfe.RunPlanned: + if op.Type == backend.OperationTypePlan { + continue + } + } + + // Increase the workspace queue position. + position++ + + // Stop searching when we reached the current run. + if w.CurrentRun != nil && w.CurrentRun.ID == item.ID { + break runlist + } + } + + // Exit the loop when we've seen all pages. + if rl.CurrentPage >= rl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rl.NextPage + } + + if position > 0 { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d run(s) to finish before being queued...%s", + position, + elapsed, + ))) + continue + } + } + + options := tfe.ReadRunQueueOptions{} + search: + for { + rq, err := b.client.Organizations.ReadRunQueue(stopCtx, b.organization, options) + if err != nil { + return r, generalError("Failed to retrieve queue", err) + } + + // Search through all queued items to find our run. + for _, item := range rq.Items { + if r.ID == item.ID { + position = item.PositionInQueue + break search + } + } + + // Exit the loop when we've seen all pages. + if rq.CurrentPage >= rq.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rq.NextPage + } + + if position > 0 { + c, err := b.client.Organizations.ReadCapacity(stopCtx, b.organization) + if err != nil { + return r, generalError("Failed to retrieve capacity", err) + } + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d queued run(s) to finish before starting...%s", + position-c.Running, + elapsed, + ))) + continue + } + + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for the %s to start...%s", opType, elapsed))) + } + } +} + +func (b *Cloud) waitTaskStage(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run, stageID string, outputTitle string) error { + integration := &IntegrationContext{ + B: b, + StopContext: stopCtx, + CancelContext: cancelCtx, + Op: op, + Run: r, + } + return b.runTaskStage(integration, integration.BeginOutput(outputTitle), stageID) +} + +func (b *Cloud) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.CostEstimate == nil { + return nil + } + + msgPrefix := "Cost Estimation" + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return stopCtx.Err() + case <-cancelCtx.Done(): + return cancelCtx.Err() + case <-time.After(backoff(backoffMin, backoffMax, i)): + } + + // Retrieve the cost estimate to get its current status. + ce, err := b.client.CostEstimates.Read(stopCtx, r.CostEstimate.ID) + if err != nil { + return generalError("Failed to retrieve cost estimate", err) + } + + // If the run is canceled or errored, but the cost-estimate still has + // no result, there is nothing further to render. + if ce.Status != tfe.CostEstimateFinished { + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return nil + } + } + + // checking if i == 0 so as to avoid printing this starting horizontal-rule + // every retry, and that it only prints it on the first (i=0) attempt. + if b.CLI != nil && i == 0 { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + + switch ce.Status { + case tfe.CostEstimateFinished: + delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64) + if err != nil { + return generalError("Unexpected error", err) + } + + sign := "+" + if delta < 0 { + sign = "-" + } + + deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1) + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount))) + b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr))) + + if len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply { + b.CLI.Output("\n------------------------------------------------------------------------") + } + } + + return nil + case tfe.CostEstimatePending, tfe.CostEstimateQueued: + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) + b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n")) + } + continue + case tfe.CostEstimateSkippedDueToTargeting: + b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) + b.CLI.Output("Not available for this plan, because it was created with the -target option.") + b.CLI.Output("\n------------------------------------------------------------------------") + return nil + case tfe.CostEstimateErrored: + b.CLI.Output(msgPrefix + " errored.\n") + b.CLI.Output("\n------------------------------------------------------------------------") + return nil + case tfe.CostEstimateCanceled: + return fmt.Errorf(msgPrefix + " canceled.") + default: + return fmt.Errorf("Unknown or unexpected cost estimate state: %s", ce.Status) + } + } +} + +func (b *Cloud) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + for i, pc := range r.PolicyChecks { + // Read the policy check logs. This is a blocking call that will only + // return once the policy check is complete. + logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + // Retrieve the policy check to get its current status. + pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check", err) + } + + // If the run is canceled or errored, but the policy check still has + // no result, there is nothing further to render. + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + switch pc.Status { + case tfe.PolicyPending, tfe.PolicyQueued, tfe.PolicyUnreachable: + continue + } + } + + var msgPrefix string + switch pc.Scope { + case tfe.PolicyScopeOrganization: + msgPrefix = "Organization Policy Check" + case tfe.PolicyScopeWorkspace: + msgPrefix = "Workspace Policy Check" + default: + msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope) + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) + } + + if b.CLI != nil { + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + switch pc.Status { + case tfe.PolicyPasses: + if (r.HasChanges && op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------") + } + continue + case tfe.PolicyErrored: + return fmt.Errorf(msgPrefix + " errored.") + case tfe.PolicyHardFailed: + return fmt.Errorf(msgPrefix + " hard failed.") + case tfe.PolicySoftFailed: + runUrl := fmt.Sprintf(runHeader, b.hostname, b.organization, op.Workspace, r.ID) + + if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil || + !pc.Actions.IsOverridable || !pc.Permissions.CanOverride { + return fmt.Errorf(msgPrefix + " soft failed.\n" + runUrl) + } + + if op.AutoApprove { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } + } else if !b.input { + return errPolicyOverrideNeedsUIConfirmation + } else { + opts := &terraform.InputOpts{ + Id: "override", + Query: "\nDo you want to override the soft failed policy check?", + Description: "Only 'override' will be accepted to override.", + } + err = b.confirm(stopCtx, op, opts, r, "override") + if err != nil && err != errRunOverridden { + return fmt.Errorf( + fmt.Sprintf("Failed to override: %s\n%s\n", err.Error(), runUrl), + ) + } + + if err != errRunOverridden { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } + } else { + b.CLI.Output(fmt.Sprintf("The run needs to be manually overridden or discarded.\n%s\n", runUrl)) + } + } + + if b.CLI != nil { + b.CLI.Output("------------------------------------------------------------------------") + } + default: + return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status) + } + } + + return nil +} + +func (b *Cloud) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error { + doneCtx, cancel := context.WithCancel(stopCtx) + result := make(chan error, 2) + + go func() { + // Make sure we cancel doneCtx before we return + // so the input command is also canceled. + defer cancel() + + for { + select { + case <-doneCtx.Done(): + return + case <-stopCtx.Done(): + return + case <-time.After(runPollInterval): + // Retrieve the run again to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + result <- generalError("Failed to retrieve run", err) + return + } + + switch keyword { + case "override": + if r.Status != tfe.RunPolicyOverride && r.Status != tfe.RunPostPlanAwaitingDecision { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunOverridden + } + } + case "yes": + if !r.Actions.IsConfirmable { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunApproved + } + } + } + + if err != nil { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color( + fmt.Sprintf("[reset][yellow]%s[reset]", err.Error()))) + } + + if err == errRunDiscarded { + err = errApplyDiscarded + if op.PlanMode == plans.DestroyMode { + err = errDestroyDiscarded + } + } + + result <- err + return + } + } + } + }() + + result <- func() error { + v, err := op.UIIn.Input(doneCtx, opts) + if err != nil && err != context.Canceled && stopCtx.Err() != context.Canceled { + return fmt.Errorf("Error asking %s: %v", opts.Id, err) + } + + // We return the error of our parent channel as we don't + // care about the error of the doneCtx which is only used + // within this function. So if the doneCtx was canceled + // because stopCtx was canceled, this will properly return + // a context.Canceled error and otherwise it returns nil. + if doneCtx.Err() == context.Canceled || stopCtx.Err() == context.Canceled { + return stopCtx.Err() + } + + // Make sure we cancel the context here so the loop that + // checks for external changes to the run is ended before + // we start to make changes ourselves. + cancel() + + if v != keyword { + // Retrieve the run again to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return generalError("Failed to retrieve run", err) + } + + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + if op.PlanMode == plans.DestroyMode { + return generalError("Failed to discard destroy", err) + } + return generalError("Failed to discard apply", err) + } + } + + // Even if the run was discarded successfully, we still + // return an error as the apply command was canceled. + if op.PlanMode == plans.DestroyMode { + return errDestroyDiscarded + } + return errApplyDiscarded + } + + return nil + }() + + return <-result +} + +// This method will fetch the redacted plan output and marshal the response into +// a struct the jsonformat.Renderer expects. +// +// Note: Apologies for the lengthy definition, this is a result of not being able to mock receiver methods +var readRedactedPlan func(context.Context, url.URL, string, string) (*jsonformat.Plan, error) = func(ctx context.Context, baseURL url.URL, token string, planID string) (*jsonformat.Plan, error) { + client := retryablehttp.NewClient() + client.RetryMax = 10 + client.RetryWaitMin = 100 * time.Millisecond + client.RetryWaitMax = 400 * time.Millisecond + client.Logger = logging.HCLogger() + + u, err := baseURL.Parse(fmt.Sprintf( + "plans/%s/json-output-redacted", url.QueryEscape(planID))) + if err != nil { + return nil, err + } + + req, err := retryablehttp.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Accept", "application/json") + + p := &jsonformat.Plan{} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err = checkResponseCode(resp); err != nil { + return nil, err + } + + if err := json.NewDecoder(resp.Body).Decode(p); err != nil { + return nil, err + } + + return p, nil +} + +func checkResponseCode(r *http.Response) error { + if r.StatusCode >= 200 && r.StatusCode <= 299 { + return nil + } + + var errs []string + var err error + + switch r.StatusCode { + case 401: + return tfe.ErrUnauthorized + case 404: + return tfe.ErrResourceNotFound + } + + errs, err = decodeErrorPayload(r) + if err != nil { + return err + } + + return errors.New(strings.Join(errs, "\n")) +} + +func decodeErrorPayload(r *http.Response) ([]string, error) { + // Decode the error payload. + var errs []string + errPayload := &jsonapi.ErrorsPayload{} + err := json.NewDecoder(r.Body).Decode(errPayload) + if err != nil || len(errPayload.Errors) == 0 { + return errs, errors.New(r.Status) + } + + // Parse and format the errors. + for _, e := range errPayload.Errors { + if e.Detail == "" { + errs = append(errs, e.Title) + } else { + errs = append(errs, fmt.Sprintf("%s\n\n%s", e.Title, e.Detail)) + } + } + + return errs, nil +} diff --git a/cloud/backend_context.go b/cloud/backend_context.go new file mode 100644 index 000000000000..06525b04e487 --- /dev/null +++ b/cloud/backend_context.go @@ -0,0 +1,292 @@ +package cloud + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// LocalRun implements backend.Local +func (b *Cloud) LocalRun(op *backend.Operation) (*backend.LocalRun, statemgr.Full, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := &backend.LocalRun{ + PlanOpts: &terraform.PlanOpts{ + Mode: op.PlanMode, + Targets: op.Targets, + }, + } + + op.StateLocker = op.StateLocker.WithContext(context.Background()) + + // Get the remote workspace name. + remoteWorkspaceName := b.getRemoteWorkspaceName(op.Workspace) + + // Get the latest state. + log.Printf("[TRACE] cloud: requesting state manager for workspace %q", remoteWorkspaceName) + stateMgr, err := b.StateMgr(op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, diags + } + + log.Printf("[TRACE] cloud: requesting state lock for workspace %q", remoteWorkspaceName) + if diags := op.StateLocker.Lock(stateMgr, op.Type.String()); diags.HasErrors() { + return nil, nil, diags + } + + defer func() { + // If we're returning with errors, and thus not producing a valid + // context, we'll want to avoid leaving the remote workspace locked. + if diags.HasErrors() { + diags = diags.Append(op.StateLocker.Unlock()) + } + }() + + log.Printf("[TRACE] cloud: reading remote state for workspace %q", remoteWorkspaceName) + if err := stateMgr.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, diags + } + + // Initialize our context options + var opts terraform.ContextOpts + if v := b.ContextOpts; v != nil { + opts = *v + } + + // Copy set options from the operation + opts.UIInput = op.UIIn + + // Load the latest state. If we enter contextFromPlanFile below then the + // state snapshot in the plan file must match this, or else it'll return + // error diagnostics. + log.Printf("[TRACE] cloud: retrieving remote state snapshot for workspace %q", remoteWorkspaceName) + ret.InputState = stateMgr.State() + + log.Printf("[TRACE] cloud: loading configuration for the current working directory") + config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, diags + } + ret.Config = config + + if op.AllowUnsetVariables { + // If we're not going to use the variables in an operation we'll be + // more lax about them, stubbing out any unset ones as unknown. + // This gives us enough information to produce a consistent context, + // but not enough information to run a real operation (plan, apply, etc) + ret.PlanOpts.SetVariables = stubAllVariables(op.Variables, config.Module.Variables) + } else { + // The underlying API expects us to use the opaque workspace id to request + // variables, so we'll need to look that up using our organization name + // and workspace name. + remoteWorkspaceID, err := b.getRemoteWorkspaceID(context.Background(), op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error finding remote workspace: %w", err)) + return nil, nil, diags + } + w, err := b.fetchWorkspace(context.Background(), b.organization, op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading workspace: %w", err)) + return nil, nil, diags + } + + if isLocalExecutionMode(w.ExecutionMode) { + log.Printf("[TRACE] skipping retrieving variables from workspace %s/%s (%s), workspace is in Local Execution mode", remoteWorkspaceName, b.organization, remoteWorkspaceID) + } else { + log.Printf("[TRACE] cloud: retrieving variables from workspace %s/%s (%s)", remoteWorkspaceName, b.organization, remoteWorkspaceID) + tfeVariables, err := b.client.Variables.List(context.Background(), remoteWorkspaceID, nil) + if err != nil && err != tfe.ErrResourceNotFound { + diags = diags.Append(fmt.Errorf("error loading variables: %w", err)) + return nil, nil, diags + } + + if tfeVariables != nil { + if op.Variables == nil { + op.Variables = make(map[string]backend.UnparsedVariableValue) + } + + for _, v := range tfeVariables.Items { + if v.Category == tfe.CategoryTerraform { + if _, ok := op.Variables[v.Key]; !ok { + op.Variables[v.Key] = &remoteStoredVariableValue{ + definition: v, + } + } + } + } + } + } + + if op.Variables != nil { + variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, nil, diags + } + ret.PlanOpts.SetVariables = variables + } + } + + tfCtx, ctxDiags := terraform.NewContext(&opts) + diags = diags.Append(ctxDiags) + ret.Core = tfCtx + + log.Printf("[TRACE] cloud: finished building terraform.Context") + + return ret, stateMgr, diags +} + +func (b *Cloud) getRemoteWorkspaceName(localWorkspaceName string) string { + switch { + case localWorkspaceName == backend.DefaultStateName: + // The default workspace name is a special case + return b.WorkspaceMapping.Name + default: + return localWorkspaceName + } +} + +func (b *Cloud) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) { + remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName) + + log.Printf("[TRACE] cloud: looking up workspace for %s/%s", b.organization, remoteWorkspaceName) + remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) + if err != nil { + return nil, err + } + + return remoteWorkspace, nil +} + +func (b *Cloud) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) { + remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName) + if err != nil { + return "", err + } + + return remoteWorkspace.ID, nil +} + +func stubAllVariables(vv map[string]backend.UnparsedVariableValue, decls map[string]*configs.Variable) terraform.InputValues { + ret := make(terraform.InputValues, len(decls)) + + for name, cfg := range decls { + raw, exists := vv[name] + if !exists { + ret[name] = &terraform.InputValue{ + Value: cty.UnknownVal(cfg.Type), + SourceType: terraform.ValueFromConfig, + } + continue + } + + val, diags := raw.ParseVariableValue(cfg.ParsingMode) + if diags.HasErrors() { + ret[name] = &terraform.InputValue{ + Value: cty.UnknownVal(cfg.Type), + SourceType: terraform.ValueFromConfig, + } + continue + } + ret[name] = val + } + + return ret +} + +// remoteStoredVariableValue is a backend.UnparsedVariableValue implementation +// that translates from the go-tfe representation of stored variables into +// the Terraform Core backend representation of variables. +type remoteStoredVariableValue struct { + definition *tfe.Variable +} + +var _ backend.UnparsedVariableValue = (*remoteStoredVariableValue)(nil) + +func (v *remoteStoredVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var val cty.Value + + switch { + case v.definition.Sensitive: + // If it's marked as sensitive then it's not available for use in + // local operations. We'll use an unknown value as a placeholder for + // it so that operations that don't need it might still work, but + // we'll also produce a warning about it to add context for any + // errors that might result here. + val = cty.DynamicVal + if !v.definition.HCL { + // If it's not marked as HCL then we at least know that the + // value must be a string, so we'll set that in case it allows + // us to do some more precise type checking. + val = cty.UnknownVal(cty.String) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + fmt.Sprintf("Value for var.%s unavailable", v.definition.Key), + fmt.Sprintf("The value of variable %q is marked as sensitive in the remote workspace. This operation always runs locally, so the value for that variable is not available.", v.definition.Key), + )) + + case v.definition.HCL: + // If the variable value is marked as being in HCL syntax, we need to + // parse it the same way as it would be interpreted in a .tfvars + // file because that is how it would get passed to Terraform CLI for + // a remote operation and we want to mimic that result as closely as + // possible. + var exprDiags hcl.Diagnostics + expr, exprDiags := hclsyntax.ParseExpression([]byte(v.definition.Value), "", hcl.Pos{Line: 1, Column: 1}) + if expr != nil { + var moreDiags hcl.Diagnostics + val, moreDiags = expr.Value(nil) + exprDiags = append(exprDiags, moreDiags...) + } else { + // We'll have already put some errors in exprDiags above, so we'll + // just stub out the value here. + val = cty.DynamicVal + } + + // We don't have sufficient context to return decent error messages + // for syntax errors in the remote values, so we'll just return a + // generic message instead for now. + // (More complete error messages will still result from true remote + // operations, because they'll run on the remote system where we've + // materialized the values into a tfvars file we can report from.) + if exprDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid expression for var.%s", v.definition.Key), + fmt.Sprintf("The value of variable %q is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.", v.definition.Key), + )) + } + + default: + // A variable value _not_ marked as HCL is always be a string, given + // literally. + val = cty.StringVal(v.definition.Value) + } + + return &terraform.InputValue{ + Value: val, + + // We mark these as "from input" with the rationale that entering + // variable values into the Terraform Cloud or Enterprise UI is, + // roughly speaking, a similar idea to entering variable values at + // the interactive CLI prompts. It's not a perfect correspondance, + // but it's closer than the other options. + SourceType: terraform.ValueFromInput, + }, diags +} diff --git a/cloud/backend_context_test.go b/cloud/backend_context_test.go new file mode 100644 index 000000000000..0ad957fcf8dd --- /dev/null +++ b/cloud/backend_context_test.go @@ -0,0 +1,455 @@ +package cloud + +import ( + "context" + "reflect" + "testing" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func TestRemoteStoredVariableValue(t *testing.T) { + tests := map[string]struct { + Def *tfe.Variable + Want cty.Value + WantError string + }{ + "string literal": { + &tfe.Variable{ + Key: "test", + Value: "foo", + HCL: false, + Sensitive: false, + }, + cty.StringVal("foo"), + ``, + }, + "string HCL": { + &tfe.Variable{ + Key: "test", + Value: `"foo"`, + HCL: true, + Sensitive: false, + }, + cty.StringVal("foo"), + ``, + }, + "list HCL": { + &tfe.Variable{ + Key: "test", + Value: `[]`, + HCL: true, + Sensitive: false, + }, + cty.EmptyTupleVal, + ``, + }, + "null HCL": { + &tfe.Variable{ + Key: "test", + Value: `null`, + HCL: true, + Sensitive: false, + }, + cty.NullVal(cty.DynamicPseudoType), + ``, + }, + "literal sensitive": { + &tfe.Variable{ + Key: "test", + HCL: false, + Sensitive: true, + }, + cty.UnknownVal(cty.String), + ``, + }, + "HCL sensitive": { + &tfe.Variable{ + Key: "test", + HCL: true, + Sensitive: true, + }, + cty.DynamicVal, + ``, + }, + "HCL computation": { + // This (stored expressions containing computation) is not a case + // we intentionally supported, but it became possible for remote + // operations in Terraform 0.12 (due to Terraform Cloud/Enterprise + // just writing the HCL verbatim into generated `.tfvars` files). + // We support it here for consistency, and we continue to support + // it in both places for backward-compatibility. In practice, + // there's little reason to do computation in a stored variable + // value because references are not supported. + &tfe.Variable{ + Key: "test", + Value: `[for v in ["a"] : v]`, + HCL: true, + Sensitive: false, + }, + cty.TupleVal([]cty.Value{cty.StringVal("a")}), + ``, + }, + "HCL syntax error": { + &tfe.Variable{ + Key: "test", + Value: `[`, + HCL: true, + Sensitive: false, + }, + cty.DynamicVal, + `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, + }, + "HCL with references": { + &tfe.Variable{ + Key: "test", + Value: `foo.bar`, + HCL: true, + Sensitive: false, + }, + cty.DynamicVal, + `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + v := &remoteStoredVariableValue{ + definition: test.Def, + } + // This ParseVariableValue implementation ignores the parsing mode, + // so we'll just always parse literal here. (The parsing mode is + // selected by the remote server, not by our local configuration.) + gotIV, diags := v.ParseVariableValue(configs.VariableParseLiteral) + if test.WantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) + } + errStr := diags.Err().Error() + if errStr != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) + } + } else { + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + got := gotIV.Value + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + } + }) + } +} + +func TestRemoteContextWithVars(t *testing.T) { + catTerraform := tfe.CategoryTerraform + catEnv := tfe.CategoryEnv + + tests := map[string]struct { + Opts *tfe.VariableCreateOptions + WantError string + }{ + "Terraform variable": { + &tfe.VariableCreateOptions{ + Category: &catTerraform, + }, + `Value for undeclared variable: A variable named "key" was assigned a value, but the root module does not declare a variable of that name. To use this value, add a "variable" block to the configuration.`, + }, + "environment variable": { + &tfe.VariableCreateOptions{ + Category: &catEnv, + }, + ``, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configDir := "./testdata/empty" + + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + defer configCleanup() + + workspaceID, err := b.getRemoteWorkspaceID(context.Background(), testBackendSingleWorkspaceName) + if err != nil { + t.Fatal(err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewLocker(0, view), + Workspace: testBackendSingleWorkspaceName, + } + + v := test.Opts + if v.Key == nil { + key := "key" + v.Key = &key + } + b.client.Variables.Create(context.TODO(), workspaceID, *v) + + _, _, diags := b.LocalRun(op) + + if test.WantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) + } + errStr := diags.Err().Error() + if errStr != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) + } + // When Context() returns an error, it should unlock the state, + // so re-locking it is expected to succeed. + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state: %s", err.Error()) + } + } else { + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + // When Context() succeeds, this should fail w/ "workspace already locked" + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { + t.Fatal("unexpected success locking state after Context") + } + } + }) + } +} + +func TestRemoteVariablesDoNotOverride(t *testing.T) { + catTerraform := tfe.CategoryTerraform + + varName1 := "key1" + varName2 := "key2" + varName3 := "key3" + + varValue1 := "value1" + varValue2 := "value2" + varValue3 := "value3" + + tests := map[string]struct { + localVariables map[string]backend.UnparsedVariableValue + remoteVariables []*tfe.VariableCreateOptions + expectedVariables terraform.InputValues + }{ + "no local variables": { + map[string]backend.UnparsedVariableValue{}, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, + { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, + { + Key: &varName3, + Value: &varValue3, + Category: &catTerraform, + }, + }, + terraform.InputValues{ + varName1: &terraform.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &terraform.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &terraform.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + }, + }, + "single conflicting local variable": { + map[string]backend.UnparsedVariableValue{ + varName3: testUnparsedVariableValue{source: terraform.ValueFromNamedFile, value: cty.StringVal(varValue3)}, + }, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, { + Key: &varName3, + Value: &varValue3, + Category: &catTerraform, + }, + }, + terraform.InputValues{ + varName1: &terraform.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &terraform.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &terraform.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: terraform.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + }, + }, + "no conflicting local variable": { + map[string]backend.UnparsedVariableValue{ + varName3: testUnparsedVariableValue{source: terraform.ValueFromNamedFile, value: cty.StringVal(varValue3)}, + }, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, + }, + terraform.InputValues{ + varName1: &terraform.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &terraform.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: terraform.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &terraform.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: terraform.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configDir := "./testdata/variables" + + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + defer configCleanup() + + workspaceID, err := b.getRemoteWorkspaceID(context.Background(), testBackendSingleWorkspaceName) + if err != nil { + t.Fatal(err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewLocker(0, view), + Workspace: testBackendSingleWorkspaceName, + Variables: test.localVariables, + } + + for _, v := range test.remoteVariables { + b.client.Variables.Create(context.TODO(), workspaceID, *v) + } + + lr, _, diags := b.LocalRun(op) + + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + // When Context() succeeds, this should fail w/ "workspace already locked" + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { + t.Fatal("unexpected success locking state after Context") + } + + actual := lr.PlanOpts.SetVariables + expected := test.expectedVariables + + for expectedKey := range expected { + actualValue := actual[expectedKey] + expectedValue := expected[expectedKey] + + if !reflect.DeepEqual(*actualValue, *expectedValue) { + t.Fatalf("unexpected variable '%s'\ngot: %v\nwant: %v", expectedKey, actualValue, expectedValue) + } + } + }) + } +} diff --git a/cloud/backend_plan.go b/cloud/backend_plan.go new file mode 100644 index 000000000000..3b2eec74d13d --- /dev/null +++ b/cloud/backend_plan.go @@ -0,0 +1,507 @@ +package cloud + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" +) + +var planConfigurationVersionsPollInterval = 500 * time.Millisecond + +func (b *Cloud) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] cloud: starting Plan operation") + + var diags tfdiags.Diagnostics + + if !w.Permissions.CanQueueRun { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to generate a plan", + "The provided credentials have insufficient rights to generate a plan. In order "+ + "to generate plans, at least plan permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `Terraform Cloud does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Displaying a saved plan is currently not supported", + `Terraform Cloud currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if op.PlanOutPath != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saving a generated plan is currently not supported", + `Terraform Cloud does not support saving the generated execution `+ + `plan locally at this time.`, + )) + } + + if !op.HasConfig() && op.PlanMode != plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Plan requires configuration to be present. Planning without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run plan with the "-destroy" `+ + `flag or create a single empty configuration file. Otherwise, please create `+ + `a Terraform configuration file in the path being executed and try again.`, + )) + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + return b.plan(stopCtx, cancelCtx, op, w) +} + +func (b *Cloud) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + if b.CLI != nil { + header := planDefaultHeader + if op.Type == backend.OperationTypeApply || op.Type == backend.OperationTypeRefresh { + header = applyDefaultHeader + } + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(header) + "\n")) + } + + configOptions := tfe.ConfigurationVersionCreateOptions{ + AutoQueueRuns: tfe.Bool(false), + Speculative: tfe.Bool(op.Type == backend.OperationTypePlan), + } + + cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions) + if err != nil { + return nil, generalError("Failed to create configuration version", err) + } + + var configDir string + if op.ConfigDir != "" { + // De-normalize the configuration directory path. + configDir, err = filepath.Abs(op.ConfigDir) + if err != nil { + return nil, generalError( + "Failed to get absolute path of the configuration directory: %v", err) + } + + // Make sure to take the working directory into account by removing + // the working directory from the current path. This will result in + // a path that points to the expected root of the workspace. + configDir = filepath.Clean(strings.TrimSuffix( + filepath.Clean(configDir), + filepath.Clean(w.WorkingDirectory), + )) + + // If the workspace has a subdirectory as its working directory then + // our configDir will be some parent directory of the current working + // directory. Users are likely to find that surprising, so we'll + // produce an explicit message about it to be transparent about what + // we are doing and why. + if w.WorkingDirectory != "" && filepath.Base(configDir) != w.WorkingDirectory { + if b.CLI != nil { + b.CLI.Output(fmt.Sprintf(strings.TrimSpace(` +The remote workspace is configured to work with configuration at +%s relative to the target repository. + +Terraform will upload the contents of the following directory, +excluding files or directories as defined by a .terraformignore file +at %s/.terraformignore (if it is present), +in order to capture the filesystem context the remote workspace expects: + %s +`), w.WorkingDirectory, configDir, configDir) + "\n") + } + } + + } else { + // We did a check earlier to make sure we either have a config dir, + // or the plan is run with -destroy. So this else clause will only + // be executed when we are destroying and doesn't need the config. + configDir, err = ioutil.TempDir("", "tf") + if err != nil { + return nil, generalError("Failed to create temporary directory", err) + } + defer os.RemoveAll(configDir) + + // Make sure the configured working directory exists. + err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700) + if err != nil { + return nil, generalError( + "Failed to create temporary working directory", err) + } + } + + err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir) + if err != nil { + return nil, generalError("Failed to upload configuration files", err) + } + + uploaded := false + for i := 0; i < 60 && !uploaded; i++ { + select { + case <-stopCtx.Done(): + return nil, context.Canceled + case <-cancelCtx.Done(): + return nil, context.Canceled + case <-time.After(planConfigurationVersionsPollInterval): + cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) + if err != nil { + return nil, generalError("Failed to retrieve configuration version", err) + } + + if cv.Status == tfe.ConfigurationUploaded { + uploaded = true + } + } + } + + if !uploaded { + return nil, generalError( + "Failed to upload configuration files", errors.New("operation timed out")) + } + + runOptions := tfe.RunCreateOptions{ + ConfigurationVersion: cv, + Refresh: tfe.Bool(op.PlanRefresh), + Workspace: w, + AutoApply: tfe.Bool(op.AutoApprove), + } + + switch op.PlanMode { + case plans.NormalMode: + // okay, but we don't need to do anything special for this + case plans.RefreshOnlyMode: + runOptions.RefreshOnly = tfe.Bool(true) + case plans.DestroyMode: + runOptions.IsDestroy = tfe.Bool(true) + default: + // Shouldn't get here because we should update this for each new + // plan mode we add, mapping it to the corresponding RunCreateOptions + // field. + return nil, generalError( + "Invalid plan mode", + fmt.Errorf("Terraform Cloud doesn't support %s", op.PlanMode), + ) + } + + if len(op.Targets) != 0 { + runOptions.TargetAddrs = make([]string, 0, len(op.Targets)) + for _, addr := range op.Targets { + runOptions.TargetAddrs = append(runOptions.TargetAddrs, addr.String()) + } + } + + if len(op.ForceReplace) != 0 { + runOptions.ReplaceAddrs = make([]string, 0, len(op.ForceReplace)) + for _, addr := range op.ForceReplace { + runOptions.ReplaceAddrs = append(runOptions.ReplaceAddrs, addr.String()) + } + } + + config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir) + if configDiags.HasErrors() { + return nil, fmt.Errorf("error loading config with snapshot: %w", configDiags.Errs()[0]) + } + variables, varDiags := ParseCloudRunVariables(op.Variables, config.Module.Variables) + + if varDiags.HasErrors() { + return nil, varDiags.Err() + } + + runVariables := make([]*tfe.RunVariable, 0, len(variables)) + for name, value := range variables { + runVariables = append(runVariables, &tfe.RunVariable{ + Key: name, + Value: value, + }) + } + runOptions.Variables = runVariables + + r, err := b.client.Runs.Create(stopCtx, runOptions) + if err != nil { + return r, generalError("Failed to create run", err) + } + + // When the lock timeout is set, if the run is still pending and + // cancellable after that period, we attempt to cancel it. + if lockTimeout := op.StateLocker.Timeout(); lockTimeout > 0 { + go func() { + select { + case <-stopCtx.Done(): + return + case <-cancelCtx.Done(): + return + case <-time.After(lockTimeout): + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + log.Printf("[ERROR] error reading run: %v", err) + return + } + + if r.Status == tfe.RunPending && r.Actions.IsCancelable { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr))) + } + + // We abuse the auto aprove flag to indicate that we do not + // want to ask if the remote operation should be canceled. + op.AutoApprove = true + + p, err := os.FindProcess(os.Getpid()) + if err != nil { + log.Printf("[ERROR] error searching process ID: %v", err) + return + } + p.Signal(syscall.SIGINT) + } + } + }() + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( + runHeader, b.hostname, b.organization, op.Workspace, r.ID)) + "\n")) + } + + // Retrieve the run to get task stages. + // Task Stages are calculated upfront so we only need to call this once for the run. + taskStages, err := b.runTaskStages(stopCtx, b.client, r.ID) + if err != nil { + return r, err + } + + if stage, ok := taskStages[tfe.PrePlan]; ok { + if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Pre-plan Tasks"); err != nil { + return r, err + } + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w) + if err != nil { + return r, err + } + + err = b.renderPlanLogs(stopCtx, op, r) + if err != nil { + return r, err + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // If the run is canceled or errored, we still continue to the + // cost-estimation and policy check phases to ensure we render any + // results available. In the case of a hard-failed policy check, the + // status of the run will be "errored", but there is still policy + // information which should be shown. + + if stage, ok := taskStages[tfe.PostPlan]; ok { + if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Post-plan Tasks"); err != nil { + return r, err + } + } + + // Show any cost estimation output. + if r.CostEstimate != nil { + err = b.costEstimate(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + // Check any configured sentinel policies. + if len(r.PolicyChecks) > 0 { + err = b.checkPolicy(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + return r, nil +} + +// renderPlanLogs reads the streamed plan JSON logs and calls the JSON Plan renderer (jsonformat.RenderPlan) to +// render the plan output. The plan output is fetched from the redacted output endpoint. +func (b *Cloud) renderPlanLogs(ctx context.Context, op *backend.Operation, run *tfe.Run) error { + logs, err := b.client.Plans.Logs(ctx, run.Plan.ID) + if err != nil { + return err + } + + if b.CLI != nil { + reader := bufio.NewReaderSize(logs, 64*1024) + + for next := true; next; { + var l, line []byte + var err error + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + + line = append(line, l...) + } + + if next || len(line) > 0 { + log := &jsonformat.JSONLog{} + if err := json.Unmarshal(line, log); err != nil { + // If we can not parse the line as JSON, we will simply + // print the line. This maintains backwards compatibility for + // users who do not wish to enable structured output in their + // workspace. + b.CLI.Output(string(line)) + continue + } + + // We will ignore plan output, change summary or outputs logs + // during the plan phase. + if log.Type == jsonformat.LogOutputs || + log.Type == jsonformat.LogChangeSummary || + log.Type == jsonformat.LogPlannedChange { + continue + } + + if b.renderer != nil { + // Otherwise, we will print the log + err := b.renderer.RenderLog(log) + if err != nil { + return err + } + } + } + } + } + + // Get the run's current status and include the workspace. We will check if + // the run has errored and if structured output is enabled. + run, err = b.client.Runs.ReadWithOptions(ctx, run.ID, &tfe.RunReadOptions{ + Include: []tfe.RunIncludeOpt{tfe.RunWorkspace}, + }) + if err != nil { + return err + } + + // If the run was errored, canceled, or discarded we will not resume the rest + // of this logic and attempt to render the plan. + if run.Status == tfe.RunErrored || run.Status == tfe.RunCanceled || + run.Status == tfe.RunDiscarded { + // We won't return an error here since we need to resume the logic that + // follows after rendering the logs (run tasks, cost estimation, etc.) + return nil + } + + // Determine whether we should call the renderer to generate the plan output + // in human readable format. Otherwise we risk duplicate plan output since + // plan output may be contained in the streamed log file. + if ok, err := b.shouldRenderStructuredRunOutput(run); ok { + // Fetch the redacted plan. + redacted, err := readRedactedPlan(ctx, b.client.BaseURL(), b.token, run.Plan.ID) + if err != nil { + return err + } + + // Render plan output. + b.renderer.RenderHumanPlan(*redacted, op.PlanMode) + } else if err != nil { + return err + } + + return nil +} + +// shouldRenderStructuredRunOutput ensures the remote workspace has structured +// run output enabled and, if using Terraform Enterprise, ensures it is a release +// that supports enabling SRO for CLI-driven runs. The plan output will have +// already been rendered when the logs were read if this wasn't the case. +func (b *Cloud) shouldRenderStructuredRunOutput(run *tfe.Run) (bool, error) { + if b.renderer == nil || !run.Workspace.StructuredRunOutputEnabled { + return false, nil + } + + // If the cloud backend is configured against TFC, we only require that + // the workspace has structured run output enabled. + if b.client.IsCloud() && run.Workspace.StructuredRunOutputEnabled { + return true, nil + } + + // If the cloud backend is configured against TFE, ensure the release version + // supports enabling SRO for CLI runs. + if b.client.IsEnterprise() { + tfeVersion := b.client.RemoteTFEVersion() + if tfeVersion != "" { + v := strings.Split(tfeVersion[1:], "-") + releaseDate, err := strconv.Atoi(v[0]) + if err != nil { + return false, err + } + + // Any release older than 202302-1 will not support enabling SRO for + // CLI-driven runs + if releaseDate < 202302 { + return false, nil + } else if run.Workspace.StructuredRunOutputEnabled { + return true, nil + } + } + } + + // Version of TFE is unknowable + return false, nil +} + +const planDefaultHeader = ` +[reset][yellow]Running plan in Terraform Cloud. Output will stream here. Pressing Ctrl-C +will stop streaming the logs, but will not stop the plan running remotely.[reset] + +Preparing the remote plan... +` + +const runHeader = ` +[reset][yellow]To view this run in a browser, visit: +https://%s/app/%s/%s/runs/%s[reset] +` + +// The newline in this error is to make it look good in the CLI! +const lockTimeoutErr = ` +[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation. +[reset] +` diff --git a/cloud/backend_plan_test.go b/cloud/backend_plan_test.go new file mode 100644 index 000000000000..c93c724a3c52 --- /dev/null +++ b/cloud/backend_plan_test.go @@ -0,0 +1,1370 @@ +package cloud + +import ( + "context" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" +) + +func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationPlanWithTimeout(t, configDir, 0) +} + +func testOperationPlanWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + // Many of our tests use an overridden "null" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/null")) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypePlan, + View: operationView, + DependencyLocks: depLocks, + }, configCleanup, done +} + +func TestCloud_planBasic(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } +} + +func TestCloud_planJSONBasic(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-basic") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } +} + +func TestCloud_planCanceled(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Stop the run to simulate a Ctrl-C. + run.Stop() + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after cancelled plan: %s", err.Error()) + } +} + +func TestCloud_planLongLine(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-long-line") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planJSONFull(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-full") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "tfcoremock_simple_resource.example: Refreshing state... [id=my-simple-resource]") { + t.Fatalf("expected plan log: %s", gotOut) + } + + if !strings.Contains(gotOut, "2 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } +} + +func TestCloud_planWithoutPermissions(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueRun = false + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Insufficient rights to generate a plan") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestCloud_planWithParallelism(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + if b.ContextOpts == nil { + b.ContextOpts = &terraform.ContextOpts{} + } + b.ContextOpts.Parallelism = 3 + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestCloud_planWithPlan(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.PlanFile = &planfile.Reader{} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestCloud_planWithPath(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.PlanOutPath = "./testdata/plan" + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "generated plan is currently not supported") { + t.Fatalf("expected a generated plan error, got: %v", errOutput) + } +} + +func TestCloud_planWithoutRefresh(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanRefresh = false + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + // We should find a run inside the mock client that has refresh set + // to false. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(false, run.Refresh); diff != "" { + t.Errorf("wrong Refresh setting in the created run\n%s", diff) + } + } +} + +func TestCloud_planWithRefreshOnly(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + // We should find a run inside the mock client that has refresh-only set + // to true. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { + t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) + } + } +} + +func TestCloud_planWithTarget(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // When the backend code creates a new run, we'll tweak it so that it + // has a cost estimation object with the "skipped_due_to_targeting" status, + // emulating how a real server is expected to behave in that case. + b.client.Runs.(*MockRuns).ModifyNewRun = func(client *MockClient, options tfe.RunCreateOptions, run *tfe.Run) { + const fakeID = "fake" + // This is the cost estimate object embedded in the run itself which + // the backend will use to learn the ID to request from the cost + // estimates endpoint. It's pending to simulate what a freshly-created + // run is likely to look like. + run.CostEstimate = &tfe.CostEstimate{ + ID: fakeID, + Status: "pending", + } + // The backend will then use the main cost estimation API to retrieve + // the same ID indicated in the object above, where we'll then return + // the status "skipped_due_to_targeting" to trigger the special skip + // message in the backend output. + client.CostEstimates.Estimations[fakeID] = &tfe.CostEstimate{ + ID: fakeID, + Status: "skipped_due_to_targeting", + } + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // testBackendDefault above attached a "mock UI" to our backend, so we + // can retrieve its non-error output via the OutputWriter in-memory buffer. + gotOutput := b.CLI.(*cli.MockUi).OutputWriter.String() + if wantOutput := "Not available for this plan, because it was created with the -target option."; !strings.Contains(gotOutput, wantOutput) { + t.Errorf("missing message about skipped cost estimation\ngot:\n%s\nwant substring: %s", gotOutput, wantOutput) + } + + // We should find a run inside the mock client that has the same + // target address we requested above. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { + t.Errorf("wrong TargetAddrs in the created run\n%s", diff) + } + } +} + +func TestCloud_planWithReplace(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // refresh address we requested above. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { + t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) + } + } +} + +func TestCloud_planWithRequiredVariables(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-variables") + defer configCleanup() + defer done(t) + + op.Variables = testVariables(terraform.ValueFromCLIArg, "foo") // "bar" variable defined in config is missing + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + // The usual error of a required variable being missing is deferred and the operation + // is successful. + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("unexpected TFC header in output: %s", output) + } +} + +func TestCloud_planNoConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } +} + +func TestCloud_planNoChanges(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-no-changes") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summary: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } +} + +func TestCloud_planForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the cloud backend will use + // the local backend with itself as embedded backend. + if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { + t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) + } + defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planWithoutOperationsEntitlement(t *testing.T) { + b, bCleanup := testBackendNoOperations(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planWorkspaceWithoutOperations(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = "no-operations" + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planLockTimeout(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.WorkspaceMapping.Name) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationPlanWithTimeout(t, "./testdata/plan", 50) + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("expected lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summary in output: %s", output) + } +} + +func TestCloud_planDestroy(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestCloud_planDestroyNoConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestCloud_planWithWorkingDirectory(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("terraform"), + } + + // Configure the workspace to use a custom working directory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.WorkspaceMapping.Name, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-working-directory/terraform") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "The remote workspace is configured to work with configuration") { + t.Fatalf("expected working directory warning: %s", output) + } + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planWithWorkingDirectoryFromCurrentPath(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("terraform"), + } + + // Configure the workspace to use a custom working directory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.WorkspaceMapping.Name, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting current working directory: %v", err) + } + + // We need to change into the configuration directory to make sure + // the logic to upload the correct slug is working as expected. + if err := os.Chdir("./testdata/plan-with-working-directory/terraform"); err != nil { + t.Fatalf("error changing directory: %v", err) + } + defer os.Chdir(wd) // Make sure we change back again when were done. + + // For this test we need to give our current directory instead of the + // full path to the configuration as we already changed directories. + op, configCleanup, done := testOperationPlan(t, ".") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planCostEstimation(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cost-estimation") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Resources: 1 of 1 estimated") { + t.Fatalf("expected cost estimate result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planPolicyPass(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-passed") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planPolicyHardFail(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-hard-failed") + defer configCleanup() + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planPolicySoftFail(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-soft-failed") + defer configCleanup() + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planWithRemoteError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in Terraform Cloud") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("expected plan error in output: %s", output) + } +} + +func TestCloud_planJSONWithRemoteError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + // Initialize the plan renderer + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "Unsupported block type") { + t.Fatalf("unexpected plan error in output: %s", gotOut) + } +} + +func TestCloud_planOtherError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = "network-error" // custom error response in backend_mock.go + + _, err := b.Operation(context.Background(), op) + if err == nil { + t.Errorf("expected error, got success") + } + + if !strings.Contains(err.Error(), + "Terraform Cloud returned an unexpected error:\n\nI'm a little teacup") { + t.Fatalf("expected error message, got: %s", err.Error()) + } +} + +func TestCloud_planShouldRenderSRO(t *testing.T) { + t.Run("when instance is TFC", func(t *testing.T) { + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + w.Header().Set("TFP-AppName", "Terraform Cloud") + }, + } + b, bCleanup := testBackendWithHandlers(t, handlers) + t.Cleanup(bCleanup) + b.renderer = &jsonformat.Renderer{} + + t.Run("and SRO is enabled", func(t *testing.T) { + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: true, + }, + } + assertSRORendered(t, b, r, true) + }) + + t.Run("and SRO is not enabled", func(t *testing.T) { + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: false, + }, + } + assertSRORendered(t, b, r, false) + }) + + }) + + t.Run("when instance is TFE and version supports CLI SRO", func(t *testing.T) { + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + w.Header().Set("TFP-AppName", "Terraform Enterprise") + w.Header().Set("X-TFE-Version", "v202303-1") + }, + } + b, bCleanup := testBackendWithHandlers(t, handlers) + t.Cleanup(bCleanup) + b.renderer = &jsonformat.Renderer{} + + t.Run("and SRO is enabled", func(t *testing.T) { + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: true, + }, + } + assertSRORendered(t, b, r, true) + }) + + t.Run("and SRO is not enabled", func(t *testing.T) { + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: false, + }, + } + assertSRORendered(t, b, r, false) + }) + }) + + t.Run("when instance is a known unsupported TFE release", func(t *testing.T) { + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + w.Header().Set("TFP-AppName", "Terraform Enterprise") + w.Header().Set("X-TFE-Version", "v202208-1") + }, + } + b, bCleanup := testBackendWithHandlers(t, handlers) + t.Cleanup(bCleanup) + b.renderer = &jsonformat.Renderer{} + + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: true, + }, + } + assertSRORendered(t, b, r, false) + }) + + t.Run("when instance is an unknown TFE release", func(t *testing.T) { + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + }, + } + b, bCleanup := testBackendWithHandlers(t, handlers) + t.Cleanup(bCleanup) + b.renderer = &jsonformat.Renderer{} + + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: true, + }, + } + assertSRORendered(t, b, r, false) + }) + +} + +func assertSRORendered(t *testing.T, b *Cloud, r *tfe.Run, shouldRender bool) { + got, err := b.shouldRenderStructuredRunOutput(r) + if err != nil { + t.Fatalf("expected no error: %v", err) + } + if shouldRender != got { + t.Fatalf("expected SRO to be rendered: %t, got %t", shouldRender, got) + } +} diff --git a/cloud/backend_refresh_test.go b/cloud/backend_refresh_test.go new file mode 100644 index 000000000000..93f909e6c40e --- /dev/null +++ b/cloud/backend_refresh_test.go @@ -0,0 +1,79 @@ +package cloud + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/mitchellh/cli" +) + +func testOperationRefresh(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationRefreshWithTimeout(t, configDir, 0) +} + +func testOperationRefreshWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypeRefresh, + View: operationView, + }, configCleanup, done +} + +func TestCloud_refreshBasicActuallyRunsApplyRefresh(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") + defer configCleanup() + defer done(t) + + op.UIOut = b.CLI + b.CLIColor = b.cliColorize() + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Proceeding with 'terraform apply -refresh-only -auto-approve'") { + t.Fatalf("expected TFC header in output: %s", output) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} diff --git a/internal/cloud/backend_taskStage_policyEvaluation.go b/cloud/backend_taskStage_policyEvaluation.go similarity index 100% rename from internal/cloud/backend_taskStage_policyEvaluation.go rename to cloud/backend_taskStage_policyEvaluation.go diff --git a/internal/cloud/backend_taskStage_policyEvaluation_test.go b/cloud/backend_taskStage_policyEvaluation_test.go similarity index 100% rename from internal/cloud/backend_taskStage_policyEvaluation_test.go rename to cloud/backend_taskStage_policyEvaluation_test.go diff --git a/internal/cloud/backend_taskStage_taskResults.go b/cloud/backend_taskStage_taskResults.go similarity index 100% rename from internal/cloud/backend_taskStage_taskResults.go rename to cloud/backend_taskStage_taskResults.go diff --git a/internal/cloud/backend_taskStage_taskResults_test.go b/cloud/backend_taskStage_taskResults_test.go similarity index 100% rename from internal/cloud/backend_taskStage_taskResults_test.go rename to cloud/backend_taskStage_taskResults_test.go diff --git a/internal/cloud/backend_taskStages.go b/cloud/backend_taskStages.go similarity index 99% rename from internal/cloud/backend_taskStages.go rename to cloud/backend_taskStages.go index 693572daf2b6..0b8887b9021c 100644 --- a/internal/cloud/backend_taskStages.go +++ b/cloud/backend_taskStages.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/go-multierror" tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/terraform" ) type taskStages map[tfe.Stage]*tfe.TaskStage diff --git a/internal/cloud/backend_taskStages_test.go b/cloud/backend_taskStages_test.go similarity index 100% rename from internal/cloud/backend_taskStages_test.go rename to cloud/backend_taskStages_test.go diff --git a/cloud/backend_test.go b/cloud/backend_test.go new file mode 100644 index 000000000000..e5209f1126d3 --- /dev/null +++ b/cloud/backend_test.go @@ -0,0 +1,1219 @@ +package cloud + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + "testing" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" +) + +func TestCloud(t *testing.T) { + var _ backend.Enhanced = New(nil) + var _ backend.CLI = New(nil) +} + +func TestCloud_backendWithName(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("error: %v", err) + } + + if len(workspaces) != 1 || workspaces[0] != testBackendSingleWorkspaceName { + t.Fatalf("should only have a single configured workspace matching the configured 'name' strategy, but got: %#v", workspaces) + } + + if _, err := b.StateMgr("foo"); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected fetching a state which is NOT the single configured workspace to have an ErrWorkspacesNotSupported error, but got: %v", err) + } + + if err := b.DeleteWorkspace(testBackendSingleWorkspaceName, true); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected deleting the single configured workspace name to result in an error, but got: %v", err) + } + + if err := b.DeleteWorkspace("foo", true); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected deleting a workspace which is NOT the configured workspace name to result in an error, but got: %v", err) + } +} + +func TestCloud_backendWithTags(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + backend.TestBackendStates(t, b) + + // Test pagination works + for i := 0; i < 25; i++ { + _, err := b.StateMgr(fmt.Sprintf("foo-%d", i+1)) + if err != nil { + t.Fatalf("error: %s", err) + } + } + + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("error: %s", err) + } + actual := len(workspaces) + if actual != 26 { + t.Errorf("expected 26 workspaces (over one standard paginated response), got %d", actual) + } +} + +func TestCloud_PrepareConfig(t *testing.T) { + cases := map[string]struct { + config cty.Value + expectedErr string + }{ + "null organization": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + expectedErr: `Invalid or missing required argument: "organization" must be set in the cloud configuration or as an environment variable: TF_CLOUD_ORGANIZATION.`, + }, + "null workspace": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("org"), + "workspaces": cty.NullVal(cty.String), + }), + expectedErr: `Invalid workspaces configuration: Missing workspace mapping strategy. Either workspace "tags" or "name" is required.`, + }, + "workspace: empty tags, name": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + expectedErr: `Invalid workspaces configuration: Missing workspace mapping strategy. Either workspace "tags" or "name" is required.`, + }, + "workspace: name present": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + expectedErr: `Invalid workspaces configuration: Only one of workspace "tags" or "name" is allowed.`, + }, + "workspace: name and tags present": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + }), + }), + expectedErr: `Invalid workspaces configuration: Only one of workspace "tags" or "name" is allowed.`, + }, + } + + for name, tc := range cases { + s := testServer(t) + b := New(testDisco(s)) + + // Validate + _, valDiags := b.PrepareConfig(tc.config) + if valDiags.Err() != nil && tc.expectedErr != "" { + actualErr := valDiags.Err().Error() + if !strings.Contains(actualErr, tc.expectedErr) { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + } + } +} + +func TestCloud_PrepareConfigWithEnvVars(t *testing.T) { + cases := map[string]struct { + config cty.Value + vars map[string]string + expectedErr string + }{ + "with no organization": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + vars: map[string]string{ + "TF_CLOUD_ORGANIZATION": "example-org", + }, + }, + "with no organization attribute or env var": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + vars: map[string]string{}, + expectedErr: `Invalid or missing required argument: "organization" must be set in the cloud configuration or as an environment variable: TF_CLOUD_ORGANIZATION.`, + }, + "null workspace": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("hashicorp"), + "workspaces": cty.NullVal(cty.String), + }), + vars: map[string]string{ + "TF_WORKSPACE": "my-workspace", + }, + }, + "organization and workspace env var": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "workspaces": cty.NullVal(cty.String), + }), + vars: map[string]string{ + "TF_CLOUD_ORGANIZATION": "hashicorp", + "TF_WORKSPACE": "my-workspace", + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + s := testServer(t) + b := New(testDisco(s)) + + for k, v := range tc.vars { + os.Setenv(k, v) + } + t.Cleanup(func() { + for k := range tc.vars { + os.Unsetenv(k) + } + }) + + _, valDiags := b.PrepareConfig(tc.config) + if valDiags.Err() != nil && tc.expectedErr != "" { + actualErr := valDiags.Err().Error() + if !strings.Contains(actualErr, tc.expectedErr) { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + } + }) + } +} + +func TestCloud_configWithEnvVars(t *testing.T) { + cases := map[string]struct { + setup func(b *Cloud) + config cty.Value + vars map[string]string + expectedOrganization string + expectedHostname string + expectedWorkspaceName string + expectedErr string + }{ + "with no organization specified": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + vars: map[string]string{ + "TF_CLOUD_ORGANIZATION": "hashicorp", + }, + expectedOrganization: "hashicorp", + }, + "with both organization and env var specified": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + vars: map[string]string{ + "TF_CLOUD_ORGANIZATION": "we-should-not-see-this", + }, + expectedOrganization: "hashicorp", + }, + "with no hostname specified": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + vars: map[string]string{ + "TF_CLOUD_HOSTNAME": "private.hashicorp.engineering", + }, + expectedHostname: "private.hashicorp.engineering", + }, + "with hostname and env var specified": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("private.hashicorp.engineering"), + "token": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + vars: map[string]string{ + "TF_CLOUD_HOSTNAME": "mycool.tfe-host.io", + }, + expectedHostname: "private.hashicorp.engineering", + }, + "an invalid workspace env var": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "workspaces": cty.NullVal(cty.Object(map[string]cty.Type{ + "name": cty.String, + "tags": cty.Set(cty.String), + })), + }), + vars: map[string]string{ + "TF_WORKSPACE": "i-dont-exist-in-org", + }, + expectedErr: `Invalid workspace selection: Terraform failed to find workspace "i-dont-exist-in-org" in organization hashicorp`, + }, + "workspaces and env var specified": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.StringVal("mordor"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("mt-doom"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + vars: map[string]string{ + "TF_WORKSPACE": "shire", + }, + expectedWorkspaceName: "mt-doom", + }, + "env var workspace does not have specified tag": { + setup: func(b *Cloud) { + b.client.Organizations.Create(context.Background(), tfe.OrganizationCreateOptions{ + Name: tfe.String("mordor"), + }) + + b.client.Workspaces.Create(context.Background(), "mordor", tfe.WorkspaceCreateOptions{ + Name: tfe.String("shire"), + }) + }, + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.StringVal("mordor"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal([]cty.Value{ + cty.StringVal("cloud"), + }), + }), + }), + vars: map[string]string{ + "TF_WORKSPACE": "shire", + }, + expectedErr: "Terraform failed to find workspace \"shire\" with the tags specified in your configuration:\n[cloud]", + }, + "env var workspace has specified tag": { + setup: func(b *Cloud) { + b.client.Organizations.Create(context.Background(), tfe.OrganizationCreateOptions{ + Name: tfe.String("mordor"), + }) + + b.client.Workspaces.Create(context.Background(), "mordor", tfe.WorkspaceCreateOptions{ + Name: tfe.String("shire"), + Tags: []*tfe.Tag{ + { + Name: "hobbity", + }, + }, + }) + }, + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.StringVal("mordor"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal([]cty.Value{ + cty.StringVal("hobbity"), + }), + }), + }), + vars: map[string]string{ + "TF_WORKSPACE": "shire", + }, + expectedWorkspaceName: "", // No error is raised, but workspace is not set + }, + "with everything set as env vars": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.NullVal(cty.String), + "workspaces": cty.NullVal(cty.String), + }), + vars: map[string]string{ + "TF_CLOUD_ORGANIZATION": "mordor", + "TF_WORKSPACE": "mt-doom", + "TF_CLOUD_HOSTNAME": "mycool.tfe-host.io", + }, + expectedOrganization: "mordor", + expectedWorkspaceName: "mt-doom", + expectedHostname: "mycool.tfe-host.io", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + b, cleanup := testUnconfiguredBackend(t) + t.Cleanup(cleanup) + + for k, v := range tc.vars { + os.Setenv(k, v) + } + + t.Cleanup(func() { + for k := range tc.vars { + os.Unsetenv(k) + } + }) + + _, valDiags := b.PrepareConfig(tc.config) + if valDiags.Err() != nil { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + + if tc.setup != nil { + tc.setup(b) + } + + diags := b.Configure(tc.config) + if (diags.Err() != nil || tc.expectedErr != "") && + (diags.Err() == nil || !strings.Contains(diags.Err().Error(), tc.expectedErr)) { + t.Fatalf("%s: unexpected configure result: %v", name, diags.Err()) + } + + if tc.expectedOrganization != "" && tc.expectedOrganization != b.organization { + t.Fatalf("%s: organization not valid: %s, expected: %s", name, b.organization, tc.expectedOrganization) + } + + if tc.expectedHostname != "" && tc.expectedHostname != b.hostname { + t.Fatalf("%s: hostname not valid: %s, expected: %s", name, b.hostname, tc.expectedHostname) + } + + if tc.expectedWorkspaceName != "" && tc.expectedWorkspaceName != b.WorkspaceMapping.Name { + t.Fatalf("%s: workspace name not valid: %s, expected: %s", name, b.WorkspaceMapping.Name, tc.expectedWorkspaceName) + } + }) + } +} + +func TestCloud_config(t *testing.T) { + cases := map[string]struct { + config cty.Value + confErr string + valErr string + }{ + "with_a_non_tfe_host": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("nontfe.local"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + confErr: "Host nontfe.local does not provide a tfe service", + }, + // localhost advertises TFE services, but has no token in the credentials + "without_a_token": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("localhost"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + confErr: "terraform login localhost", + }, + "with_tags": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + }), + }), + }, + "with_a_name": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + }, + "without_a_name_tags": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + valErr: `Missing workspace mapping strategy.`, + }, + "with_both_a_name_and_tags": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + }), + }), + valErr: `Only one of workspace "tags" or "name" is allowed.`, + }, + "null config": { + config: cty.NullVal(cty.EmptyObject), + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + b, cleanup := testUnconfiguredBackend(t) + t.Cleanup(cleanup) + + // Validate + _, valDiags := b.PrepareConfig(tc.config) + if (valDiags.Err() != nil || tc.valErr != "") && + (valDiags.Err() == nil || !strings.Contains(valDiags.Err().Error(), tc.valErr)) { + t.Fatalf("unexpected validation result: %v", valDiags.Err()) + } + + // Configure + confDiags := b.Configure(tc.config) + if (confDiags.Err() != nil || tc.confErr != "") && + (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.confErr)) { + t.Fatalf("unexpected configure result: %v", confDiags.Err()) + } + }) + } +} + +func TestCloud_configVerifyMinimumTFEVersion(t *testing.T) { + config := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + }), + }) + + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.4") + }, + } + s := testServerWithHandlers(handlers) + + b := New(testDisco(s)) + + confDiags := b.Configure(config) + if confDiags.Err() == nil { + t.Fatalf("expected configure to error") + } + + expected := `The 'cloud' option is not supported with this version of Terraform Enterprise.` + if !strings.Contains(confDiags.Err().Error(), expected) { + t.Fatalf("expected configure to error with %q, got %q", expected, confDiags.Err().Error()) + } +} + +func TestCloud_configVerifyMinimumTFEVersionInAutomation(t *testing.T) { + config := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + }), + }) + + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.4") + }, + } + s := testServerWithHandlers(handlers) + + b := New(testDisco(s)) + b.runningInAutomation = true + + confDiags := b.Configure(config) + if confDiags.Err() == nil { + t.Fatalf("expected configure to error") + } + + expected := `This version of Terraform Cloud/Enterprise does not support the state mechanism +attempting to be used by the platform. This should never happen.` + if !strings.Contains(confDiags.Err().Error(), expected) { + t.Fatalf("expected configure to error with %q, got %q", expected, confDiags.Err().Error()) + } +} + +func TestCloud_setUnavailableTerraformVersion(t *testing.T) { + // go-tfe returns an error IRL if you try to set a Terraform version that's + // not available in your TFC instance. To test this, tfe_client_mock errors if + // you try to set any Terraform version for this specific workspace name. + workspaceName := "unavailable-terraform-version" + + config := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("sometag"), + }, + ), + }), + }) + + b, bCleanup := testBackend(t, config, nil) + defer bCleanup() + + // Make sure the workspace doesn't exist yet -- otherwise, we can't test what + // happens when a workspace gets created. This is why we can't use "name" in + // the backend config above, btw: if you do, testBackend() creates the default + // workspace before we get a chance to do anything. + _, err := b.client.Workspaces.Read(context.Background(), b.organization, workspaceName) + if err != tfe.ErrResourceNotFound { + t.Fatalf("the workspace we were about to try and create (%s/%s) already exists in the mocks somehow, so this test isn't trustworthy anymore", b.organization, workspaceName) + } + + _, err = b.StateMgr(workspaceName) + if err != nil { + t.Fatalf("expected no error from StateMgr, despite not being able to set remote Terraform version: %#v", err) + } + // Make sure the workspace was created: + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, workspaceName) + if err != nil { + t.Fatalf("b.StateMgr() didn't actually create the desired workspace") + } + // Make sure our mocks still error as expected, using the same update function b.StateMgr() would call: + _, err = b.client.Workspaces.UpdateByID( + context.Background(), + workspace.ID, + tfe.WorkspaceUpdateOptions{TerraformVersion: tfe.String("1.1.0")}, + ) + if err == nil { + t.Fatalf("the mocks aren't emulating a nonexistent remote Terraform version correctly, so this test isn't trustworthy anymore") + } +} + +func TestCloud_setConfigurationFields(t *testing.T) { + originalForceBackendEnv := os.Getenv("TF_FORCE_LOCAL_BACKEND") + + cases := map[string]struct { + obj cty.Value + expectedHostname string + expectedOrganziation string + expectedWorkspaceName string + expectedWorkspaceTags []string + expectedForceLocal bool + setEnv func() + resetEnv func() + expectedErr string + }{ + "with hostname set": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("hashicorp"), + "hostname": cty.StringVal("hashicorp.com"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + expectedHostname: "hashicorp.com", + expectedOrganziation: "hashicorp", + }, + "with hostname not set, set to default hostname": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("hashicorp"), + "hostname": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + expectedHostname: defaultHostname, + expectedOrganziation: "hashicorp", + }, + "with workspace name set": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("hashicorp"), + "hostname": cty.StringVal("hashicorp.com"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + expectedHostname: "hashicorp.com", + expectedOrganziation: "hashicorp", + expectedWorkspaceName: "prod", + }, + "with workspace tags set": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("hashicorp"), + "hostname": cty.StringVal("hashicorp.com"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + }), + }), + expectedHostname: "hashicorp.com", + expectedOrganziation: "hashicorp", + expectedWorkspaceTags: []string{"billing"}, + }, + "with force local set": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("hashicorp"), + "hostname": cty.StringVal("hashicorp.com"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }), + expectedHostname: "hashicorp.com", + expectedOrganziation: "hashicorp", + setEnv: func() { + os.Setenv("TF_FORCE_LOCAL_BACKEND", "1") + }, + resetEnv: func() { + os.Setenv("TF_FORCE_LOCAL_BACKEND", originalForceBackendEnv) + }, + expectedForceLocal: true, + }, + } + + for name, tc := range cases { + b := &Cloud{} + + // if `setEnv` is set, then we expect `resetEnv` to also be set + if tc.setEnv != nil { + tc.setEnv() + defer tc.resetEnv() + } + + errDiags := b.setConfigurationFields(tc.obj) + if errDiags.HasErrors() || tc.expectedErr != "" { + actualErr := errDiags.Err().Error() + if !strings.Contains(actualErr, tc.expectedErr) { + t.Fatalf("%s: unexpected validation result: %v", name, errDiags.Err()) + } + } + + if tc.expectedHostname != "" && b.hostname != tc.expectedHostname { + t.Fatalf("%s: expected hostname %s to match configured hostname %s", name, b.hostname, tc.expectedHostname) + } + if tc.expectedOrganziation != "" && b.organization != tc.expectedOrganziation { + t.Fatalf("%s: expected organization (%s) to match configured organization (%s)", name, b.organization, tc.expectedOrganziation) + } + if tc.expectedWorkspaceName != "" && b.WorkspaceMapping.Name != tc.expectedWorkspaceName { + t.Fatalf("%s: expected workspace name mapping (%s) to match configured workspace name (%s)", name, b.WorkspaceMapping.Name, tc.expectedWorkspaceName) + } + if len(tc.expectedWorkspaceTags) > 0 { + presentSet := make(map[string]struct{}) + for _, tag := range b.WorkspaceMapping.Tags { + presentSet[tag] = struct{}{} + } + + expectedSet := make(map[string]struct{}) + for _, tag := range tc.expectedWorkspaceTags { + expectedSet[tag] = struct{}{} + } + + var missing []string + var unexpected []string + + for _, expected := range tc.expectedWorkspaceTags { + if _, ok := presentSet[expected]; !ok { + missing = append(missing, expected) + } + } + + for _, actual := range b.WorkspaceMapping.Tags { + if _, ok := expectedSet[actual]; !ok { + unexpected = append(unexpected, actual) + } + } + + if len(missing) > 0 { + t.Fatalf("%s: expected workspace tag mapping (%s) to contain the following tags: %s", name, b.WorkspaceMapping.Tags, missing) + } + + if len(unexpected) > 0 { + t.Fatalf("%s: expected workspace tag mapping (%s) to NOT contain the following tags: %s", name, b.WorkspaceMapping.Tags, unexpected) + } + + } + if tc.expectedForceLocal != false && b.forceLocal != tc.expectedForceLocal { + t.Fatalf("%s: expected force local backend to be set ", name) + } + } +} + +func TestCloud_localBackend(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + local, ok := b.local.(*backendLocal.Local) + if !ok { + t.Fatalf("expected b.local to be \"*local.Local\", got: %T", b.local) + } + + cloud, ok := local.Backend.(*Cloud) + if !ok { + t.Fatalf("expected local.Backend to be *cloud.Cloud, got: %T", cloud) + } +} + +func TestCloud_addAndRemoveWorkspacesDefault(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if err := b.DeleteWorkspace(testBackendSingleWorkspaceName, true); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } +} + +func TestCloud_StateMgr_versionCheck(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // Some fixed versions for testing with. This logic is a simple string + // comparison, so we don't need many test cases. + v0135 := version.Must(version.NewSemver("0.13.5")) + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the mock remote workspace Terraform version to match the local + // Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0140.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed + if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Now change the remote workspace to a different Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0135.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should fail + want := `Remote workspace Terraform version "0.13.5" does not match local Terraform version "0.14.0"` + if _, err := b.StateMgr(testBackendSingleWorkspaceName); err.Error() != want { + t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want) + } +} + +func TestCloud_StateMgr_versionCheckLatest(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the remote workspace to the pseudo-version "latest" + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("latest"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed despite not being a string match + if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { + t.Fatalf("expected no error, got %v", err) + } +} + +func TestCloud_VerifyWorkspaceTerraformVersion(t *testing.T) { + testCases := []struct { + local string + remote string + executionMode string + wantErr bool + }{ + {"0.13.5", "0.13.5", "agent", false}, + {"0.14.0", "0.13.5", "remote", true}, + {"0.14.0", "0.13.5", "local", false}, + {"0.14.0", "0.14.1", "remote", false}, + {"0.14.0", "1.0.99", "remote", false}, + {"0.14.0", "1.1.0", "remote", false}, + {"0.14.0", "1.3.0", "remote", true}, + {"1.2.0", "1.2.99", "remote", false}, + {"1.2.0", "1.3.0", "remote", true}, + {"0.15.0", "latest", "remote", false}, + {"1.1.5", "~> 1.1.1", "remote", false}, + {"1.1.5", "> 1.1.0, < 1.3.0", "remote", false}, + {"1.1.5", "~> 1.0.1", "remote", true}, + // pre-release versions are comparable within their pre-release stage (dev, + // alpha, beta), but not comparable to different stages and not comparable + // to final releases. + {"1.1.0-beta1", "1.1.0-beta1", "remote", false}, + {"1.1.0-beta1", "~> 1.1.0-beta", "remote", false}, + {"1.1.0", "~> 1.1.0-beta", "remote", true}, + {"1.1.0-beta1", "~> 1.1.0-dev", "remote", true}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + local := version.Must(version.NewSemver(tc.local)) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace Terraform version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + ExecutionMode: &tc.executionMode, + TerraformVersion: tfe.String(tc.remote), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if tc.wantErr { + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Incompatible Terraform version") { + t.Fatalf("unexpected error: %s", got) + } + } else { + if len(diags) != 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + } + }) + } +} + +func TestCloud_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // Attempting to check the version against a workspace which doesn't exist + // should result in no errors + diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace") + if len(diags) != 0 { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + // Use a special workspace ID to trigger a 500 error, which should result + // in a failed check + diags = b.VerifyWorkspaceTerraformVersion("network-error") + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") { + t.Fatalf("unexpected error: %s", got) + } + + // Update the mock remote workspace Terraform version to an invalid version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("1.0.cheetarah"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Incompatible Terraform version: The remote workspace specified") { + t.Fatalf("unexpected error: %s", got) + } +} + +func TestCloud_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // If the ignore flag is set, the behaviour changes + b.IgnoreVersionConflict() + + // Different local & remote versions to cause an error + local := version.Must(version.NewSemver("0.14.0")) + remote := version.Must(version.NewSemver("0.13.5")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace Terraform version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(remote.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + + if got, want := diags[0].Severity(), tfdiags.Warning; got != want { + t.Errorf("wrong severity: got %#v, want %#v", got, want) + } + if got, want := diags[0].Description().Summary, "Incompatible Terraform version"; got != want { + t.Errorf("wrong summary: got %s, want %s", got, want) + } + wantDetail := "The local Terraform version (0.14.0) does not meet the version requirements for remote workspace hashicorp/app-prod (0.13.5)." + if got := diags[0].Description().Detail; got != wantDetail { + t.Errorf("wrong summary: got %s, want %s", got, wantDetail) + } +} + +func TestClodBackend_DeleteWorkspace_SafeAndForce(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + safeDeleteWorkspaceName := "safe-delete-workspace" + forceDeleteWorkspaceName := "force-delete-workspace" + + _, err := b.StateMgr(safeDeleteWorkspaceName) + if err != nil { + t.Fatalf("error: %s", err) + } + + _, err = b.StateMgr(forceDeleteWorkspaceName) + if err != nil { + t.Fatalf("error: %s", err) + } + + // sanity check that the mock now contains two workspaces + wl, err := b.Workspaces() + if err != nil { + t.Fatalf("error fetching workspace names: %v", err) + } + if len(wl) != 2 { + t.Fatalf("expected 2 workspaced but got %d", len(wl)) + } + + c := context.Background() + safeDeleteWorkspace, err := b.client.Workspaces.Read(c, b.organization, safeDeleteWorkspaceName) + if err != nil { + t.Fatalf("error fetching workspace: %v", err) + } + + // Lock a workspace so that it should fail to be safe deleted + _, err = b.client.Workspaces.Lock(context.Background(), safeDeleteWorkspace.ID, tfe.WorkspaceLockOptions{Reason: tfe.String("test")}) + if err != nil { + t.Fatalf("error locking workspace: %v", err) + } + err = b.DeleteWorkspace(safeDeleteWorkspaceName, false) + if err == nil { + t.Fatalf("workspace should have failed to safe delete") + } + + // unlock the workspace and confirm that safe-delete now works + _, err = b.client.Workspaces.Unlock(context.Background(), safeDeleteWorkspace.ID) + if err != nil { + t.Fatalf("error unlocking workspace: %v", err) + } + err = b.DeleteWorkspace(safeDeleteWorkspaceName, false) + if err != nil { + t.Fatalf("error safe deleting workspace: %v", err) + } + + // lock a workspace and then confirm that force deleting it works + forceDeleteWorkspace, err := b.client.Workspaces.Read(c, b.organization, forceDeleteWorkspaceName) + if err != nil { + t.Fatalf("error fetching workspace: %v", err) + } + _, err = b.client.Workspaces.Lock(context.Background(), forceDeleteWorkspace.ID, tfe.WorkspaceLockOptions{Reason: tfe.String("test")}) + if err != nil { + t.Fatalf("error locking workspace: %v", err) + } + err = b.DeleteWorkspace(forceDeleteWorkspaceName, true) + if err != nil { + t.Fatalf("error force deleting workspace: %v", err) + } +} + +func TestClodBackend_DeleteWorkspace_DoesNotExist(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + err := b.DeleteWorkspace("non-existent-workspace", false) + if err != nil { + t.Fatalf("expected deleting a workspace which does not exist to succeed") + } +} diff --git a/internal/cloud/cloud_integration.go b/cloud/cloud_integration.go similarity index 98% rename from internal/cloud/cloud_integration.go rename to cloud/cloud_integration.go index cd1c6be96aed..8a74197a0899 100644 --- a/internal/cloud/cloud_integration.go +++ b/cloud/cloud_integration.go @@ -7,7 +7,7 @@ import ( "time" "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/backend" + "github.com/hashicorp/terraform/backend" "github.com/mitchellh/cli" ) diff --git a/internal/cloud/cloud_variables.go b/cloud/cloud_variables.go similarity index 88% rename from internal/cloud/cloud_variables.go rename to cloud/cloud_variables.go index af6d6afcfbc1..253b873a85d7 100644 --- a/internal/cloud/cloud_variables.go +++ b/cloud/cloud_variables.go @@ -2,10 +2,10 @@ package cloud import ( "github.com/hashicorp/hcl/v2/hclwrite" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) func allowedSourceType(source terraform.ValueSourceType) bool { diff --git a/internal/cloud/cloud_variables_test.go b/cloud/cloud_variables_test.go similarity index 97% rename from internal/cloud/cloud_variables_test.go rename to cloud/cloud_variables_test.go index 9780f788c154..bdaa4e4d00ea 100644 --- a/internal/cloud/cloud_variables_test.go +++ b/cloud/cloud_variables_test.go @@ -5,10 +5,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/cloud/configchangemode_string.go b/cloud/configchangemode_string.go similarity index 100% rename from internal/cloud/configchangemode_string.go rename to cloud/configchangemode_string.go diff --git a/internal/cloud/e2e/README.md b/cloud/e2e/README.md similarity index 100% rename from internal/cloud/e2e/README.md rename to cloud/e2e/README.md diff --git a/internal/cloud/e2e/apply_auto_approve_test.go b/cloud/e2e/apply_auto_approve_test.go similarity index 100% rename from internal/cloud/e2e/apply_auto_approve_test.go rename to cloud/e2e/apply_auto_approve_test.go diff --git a/internal/cloud/e2e/apply_no_input_flag_test.go b/cloud/e2e/apply_no_input_flag_test.go similarity index 100% rename from internal/cloud/e2e/apply_no_input_flag_test.go rename to cloud/e2e/apply_no_input_flag_test.go diff --git a/internal/cloud/e2e/backend_apply_before_init_test.go b/cloud/e2e/backend_apply_before_init_test.go similarity index 100% rename from internal/cloud/e2e/backend_apply_before_init_test.go rename to cloud/e2e/backend_apply_before_init_test.go diff --git a/internal/cloud/e2e/env_variables_test.go b/cloud/e2e/env_variables_test.go similarity index 100% rename from internal/cloud/e2e/env_variables_test.go rename to cloud/e2e/env_variables_test.go diff --git a/internal/cloud/e2e/helper_test.go b/cloud/e2e/helper_test.go similarity index 100% rename from internal/cloud/e2e/helper_test.go rename to cloud/e2e/helper_test.go diff --git a/internal/cloud/e2e/init_with_empty_tags_test.go b/cloud/e2e/init_with_empty_tags_test.go similarity index 100% rename from internal/cloud/e2e/init_with_empty_tags_test.go rename to cloud/e2e/init_with_empty_tags_test.go diff --git a/cloud/e2e/main_test.go b/cloud/e2e/main_test.go new file mode 100644 index 000000000000..156be1bb41aa --- /dev/null +++ b/cloud/e2e/main_test.go @@ -0,0 +1,249 @@ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "strings" + "testing" + + expect "github.com/Netflix/go-expect" + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/e2e" + tfversion "github.com/hashicorp/terraform/version" +) + +var terraformBin string +var cliConfigFileEnv string + +var tfeClient *tfe.Client +var tfeHostname string +var tfeToken string +var verboseMode bool + +func TestMain(m *testing.M) { + teardown := setup() + code := m.Run() + teardown() + + os.Exit(code) +} + +func accTest() bool { + // TF_ACC is set when we want to run acceptance tests, meaning it relies on + // network access. + return os.Getenv("TF_ACC") != "" +} + +func hasHostname() bool { + return os.Getenv("TFE_HOSTNAME") != "" +} + +func hasToken() bool { + return os.Getenv("TFE_TOKEN") != "" +} + +func hasRequiredEnvVars() bool { + return accTest() && hasHostname() && hasToken() +} + +func skipIfMissingEnvVar(t *testing.T) { + if !hasRequiredEnvVars() { + t.Skip("Skipping test, required environment variables missing. Use `TF_ACC`, `TFE_HOSTNAME`, `TFE_TOKEN`") + } +} + +func setup() func() { + tfOutput := flag.Bool("tfoutput", false, "This flag produces the terraform output from tests.") + flag.Parse() + verboseMode = *tfOutput + + setTfeClient() + teardown := setupBinary() + + return func() { + teardown() + } +} +func testRunner(t *testing.T, cases testCases, orgCount int, tfEnvFlags ...string) { + for name, tc := range cases { + tc := tc // rebind tc into this lexical scope + t.Run(name, func(subtest *testing.T) { + subtest.Parallel() + + orgNames := []string{} + for i := 0; i < orgCount; i++ { + organization, cleanup := createOrganization(t) + t.Cleanup(cleanup) + orgNames = append(orgNames, organization.Name) + } + + exp, err := expect.NewConsole(defaultOpts()...) + if err != nil { + subtest.Fatal(err) + } + defer exp.Close() + + tmpDir := t.TempDir() + + tf := e2e.NewBinary(t, terraformBin, tmpDir) + tfEnvFlags = append(tfEnvFlags, "TF_LOG=INFO") + tfEnvFlags = append(tfEnvFlags, cliConfigFileEnv) + for _, env := range tfEnvFlags { + tf.AddEnv(env) + } + + var orgName string + for index, op := range tc.operations { + switch orgCount { + case 0: + orgName = "" + case 1: + orgName = orgNames[0] + default: + orgName = orgNames[index] + } + + op.prep(t, orgName, tf.WorkDir()) + for _, tfCmd := range op.commands { + cmd := tf.Cmd(tfCmd.command...) + cmd.Stdin = exp.Tty() + cmd.Stdout = exp.Tty() + cmd.Stderr = exp.Tty() + + err = cmd.Start() + if err != nil { + subtest.Fatal(err) + } + + if tfCmd.expectedCmdOutput != "" { + got, err := exp.ExpectString(tfCmd.expectedCmdOutput) + if err != nil { + subtest.Fatalf("error while waiting for output\nwant: %s\nerror: %s\noutput\n%s", tfCmd.expectedCmdOutput, err, got) + } + } + + lenInput := len(tfCmd.userInput) + lenInputOutput := len(tfCmd.postInputOutput) + if lenInput > 0 { + for i := 0; i < lenInput; i++ { + input := tfCmd.userInput[i] + exp.SendLine(input) + // use the index to find the corresponding + // output that matches the input. + if lenInputOutput-1 >= i { + output := tfCmd.postInputOutput[i] + _, err := exp.ExpectString(output) + if err != nil { + subtest.Fatal(err) + } + } + } + } + + err = cmd.Wait() + if err != nil && !tfCmd.expectError { + subtest.Fatal(err) + } + } + } + + if tc.validations != nil { + tc.validations(t, orgName) + } + }) + } +} + +func setTfeClient() { + tfeHostname = os.Getenv("TFE_HOSTNAME") + tfeToken = os.Getenv("TFE_TOKEN") + + cfg := &tfe.Config{ + Address: fmt.Sprintf("https://%s", tfeHostname), + Token: tfeToken, + } + + if tfeHostname != "" && tfeToken != "" { + // Create a new TFE client. + client, err := tfe.NewClient(cfg) + if err != nil { + fmt.Printf("Could not create new tfe client: %v\n", err) + os.Exit(1) + } + tfeClient = client + } +} + +func setupBinary() func() { + log.Println("Setting up terraform binary") + tmpTerraformBinaryDir, err := ioutil.TempDir("", "terraform-test") + if err != nil { + fmt.Printf("Could not create temp directory: %v\n", err) + os.Exit(1) + } + log.Println(tmpTerraformBinaryDir) + currentDir, err := os.Getwd() + defer os.Chdir(currentDir) + if err != nil { + fmt.Printf("Could not change directories: %v\n", err) + os.Exit(1) + } + // Getting top level dir + dirPaths := strings.Split(currentDir, "/") + log.Println(currentDir) + topLevel := len(dirPaths) - 3 + topDir := strings.Join(dirPaths[0:topLevel], "/") + + if err := os.Chdir(topDir); err != nil { + fmt.Printf("Could not change directories: %v\n", err) + os.Exit(1) + } + + cmd := exec.Command( + "go", + "build", + "-o", tmpTerraformBinaryDir, + "-ldflags", fmt.Sprintf("-X \"github.com/hashicorp/terraform/version.Prerelease=%s\"", tfversion.Prerelease), + ) + err = cmd.Run() + if err != nil { + fmt.Printf("Could not run exec command: %v\n", err) + os.Exit(1) + } + + credFile := fmt.Sprintf("%s/dev.tfrc", tmpTerraformBinaryDir) + writeCredRC(credFile) + + terraformBin = fmt.Sprintf("%s/terraform", tmpTerraformBinaryDir) + cliConfigFileEnv = fmt.Sprintf("TF_CLI_CONFIG_FILE=%s", credFile) + + return func() { + os.RemoveAll(tmpTerraformBinaryDir) + } +} + +func writeCredRC(file string) { + creds := credentialBlock() + f, err := os.Create(file) + if err != nil { + fmt.Printf("Could not create file: %v\n", err) + os.Exit(1) + } + _, err = f.WriteString(creds) + if err != nil { + fmt.Printf("Could not write credentials: %v\n", err) + os.Exit(1) + } + f.Close() +} + +func credentialBlock() string { + return fmt.Sprintf(` +credentials "%s" { + token = "%s" +}`, tfeHostname, tfeToken) +} diff --git a/internal/cloud/e2e/migrate_state_multi_to_tfc_test.go b/cloud/e2e/migrate_state_multi_to_tfc_test.go similarity index 100% rename from internal/cloud/e2e/migrate_state_multi_to_tfc_test.go rename to cloud/e2e/migrate_state_multi_to_tfc_test.go diff --git a/internal/cloud/e2e/migrate_state_remote_backend_to_tfc_test.go b/cloud/e2e/migrate_state_remote_backend_to_tfc_test.go similarity index 100% rename from internal/cloud/e2e/migrate_state_remote_backend_to_tfc_test.go rename to cloud/e2e/migrate_state_remote_backend_to_tfc_test.go diff --git a/internal/cloud/e2e/migrate_state_single_to_tfc_test.go b/cloud/e2e/migrate_state_single_to_tfc_test.go similarity index 100% rename from internal/cloud/e2e/migrate_state_single_to_tfc_test.go rename to cloud/e2e/migrate_state_single_to_tfc_test.go diff --git a/internal/cloud/e2e/migrate_state_tfc_to_other_test.go b/cloud/e2e/migrate_state_tfc_to_other_test.go similarity index 100% rename from internal/cloud/e2e/migrate_state_tfc_to_other_test.go rename to cloud/e2e/migrate_state_tfc_to_other_test.go diff --git a/internal/cloud/e2e/migrate_state_tfc_to_tfc_test.go b/cloud/e2e/migrate_state_tfc_to_tfc_test.go similarity index 100% rename from internal/cloud/e2e/migrate_state_tfc_to_tfc_test.go rename to cloud/e2e/migrate_state_tfc_to_tfc_test.go diff --git a/internal/cloud/e2e/run_variables_test.go b/cloud/e2e/run_variables_test.go similarity index 100% rename from internal/cloud/e2e/run_variables_test.go rename to cloud/e2e/run_variables_test.go diff --git a/cloud/errors.go b/cloud/errors.go new file mode 100644 index 000000000000..9d133a7865db --- /dev/null +++ b/cloud/errors.go @@ -0,0 +1,60 @@ +package cloud + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// String based errors +var ( + errApplyDiscarded = errors.New("Apply discarded.") + errDestroyDiscarded = errors.New("Destroy discarded.") + errRunApproved = errors.New("approved using the UI or API") + errRunDiscarded = errors.New("discarded using the UI or API") + errRunOverridden = errors.New("overridden using the UI or API") + errApplyNeedsUIConfirmation = errors.New("Cannot confirm apply due to -input=false. Please handle run confirmation in the UI.") + errPolicyOverrideNeedsUIConfirmation = errors.New("Cannot override soft failed policy checks when -input=false. Please open the run in the UI to override.") +) + +// Diagnostic error messages +var ( + invalidWorkspaceConfigMissingValues = tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + fmt.Sprintf("Missing workspace mapping strategy. Either workspace \"tags\" or \"name\" is required.\n\n%s", workspaceConfigurationHelp), + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + ) + + invalidWorkspaceConfigMisconfiguration = tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + fmt.Sprintf("Only one of workspace \"tags\" or \"name\" is allowed.\n\n%s", workspaceConfigurationHelp), + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + ) +) + +const ignoreRemoteVersionHelp = "If you're sure you want to upgrade the state, you can force Terraform to continue using the -ignore-remote-version flag. This may result in an unusable workspace." + +func missingConfigAttributeAndEnvVar(attribute string, envVar string) tfdiags.Diagnostic { + detail := strings.TrimSpace(fmt.Sprintf("\"%s\" must be set in the cloud configuration or as an environment variable: %s.\n", attribute, envVar)) + return tfdiags.AttributeValue( + tfdiags.Error, + "Invalid or missing required argument", + detail, + cty.Path{cty.GetAttrStep{Name: attribute}}) +} + +func incompatibleWorkspaceTerraformVersion(message string, ignoreVersionConflict bool) tfdiags.Diagnostic { + severity := tfdiags.Error + suggestion := ignoreRemoteVersionHelp + if ignoreVersionConflict { + severity = tfdiags.Warning + suggestion = "" + } + description := strings.TrimSpace(fmt.Sprintf("%s\n\n%s", message, suggestion)) + return tfdiags.Sourceless(severity, "Incompatible Terraform version", description) +} diff --git a/internal/cloud/migration.go b/cloud/migration.go similarity index 97% rename from internal/cloud/migration.go rename to cloud/migration.go index 069d1b28ebfd..1ffda5d23fc2 100644 --- a/internal/cloud/migration.go +++ b/cloud/migration.go @@ -1,8 +1,8 @@ package cloud import ( - "github.com/hashicorp/terraform/internal/configs" - legacy "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/configs" + legacy "github.com/hashicorp/terraform/legacy/terraform" ) // Most of the logic for migrating into and out of "cloud mode" actually lives diff --git a/internal/cloud/migration_test.go b/cloud/migration_test.go similarity index 96% rename from internal/cloud/migration_test.go rename to cloud/migration_test.go index f1ae0f48eca5..a3415dcd34e5 100644 --- a/internal/cloud/migration_test.go +++ b/cloud/migration_test.go @@ -3,8 +3,8 @@ package cloud import ( "testing" - "github.com/hashicorp/terraform/internal/configs" - legacy "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/configs" + legacy "github.com/hashicorp/terraform/legacy/terraform" ) func TestDetectConfigChangeType(t *testing.T) { diff --git a/cloud/remote_test.go b/cloud/remote_test.go new file mode 100644 index 000000000000..e41bdfd9ce5d --- /dev/null +++ b/cloud/remote_test.go @@ -0,0 +1,25 @@ +package cloud + +import ( + "flag" + "os" + "testing" + "time" + + _ "github.com/hashicorp/terraform/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + + // Make sure TF_FORCE_LOCAL_BACKEND is unset + os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + // Reduce delays to make tests run faster + backoffMin = 1.0 + backoffMax = 1.0 + planConfigurationVersionsPollInterval = 1 * time.Millisecond + runPollInterval = 1 * time.Millisecond + + os.Exit(m.Run()) +} diff --git a/cloud/state.go b/cloud/state.go new file mode 100644 index 000000000000..eeb0348b02fa --- /dev/null +++ b/cloud/state.go @@ -0,0 +1,507 @@ +package cloud + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "log" + "os" + "strings" + "sync" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + tfe "github.com/hashicorp/go-tfe" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/remote" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" +) + +// State implements the State interfaces in the state package to handle +// reading and writing the remote state to TFC. This State on its own does no +// local caching so every persist will go to the remote storage and local +// writes will go to memory. +type State struct { + mu sync.Mutex + + // We track two pieces of meta data in addition to the state itself: + // + // lineage - the state's unique ID + // serial - the monotonic counter of "versions" of the state + // + // Both of these (along with state) have a sister field + // that represents the values read in from an existing source. + // All three of these values are used to determine if the new + // state has changed from an existing state we read in. + lineage, readLineage string + serial, readSerial uint64 + state, readState *states.State + disableLocks bool + tfeClient *tfe.Client + organization string + workspace *tfe.Workspace + stateUploadErr bool + forcePush bool + lockInfo *statemgr.LockInfo +} + +var ErrStateVersionUnauthorizedUpgradeState = errors.New(strings.TrimSpace(` +You are not authorized to read the full state version containing outputs. +State versions created by terraform v1.3.0 and newer do not require this level +of authorization and therefore this error can usually be fixed by upgrading the +remote state version. +`)) + +var _ statemgr.Full = (*State)(nil) +var _ statemgr.Migrator = (*State)(nil) + +// statemgr.Reader impl. +func (s *State) State() *states.State { + s.mu.Lock() + defer s.mu.Unlock() + + return s.state.DeepCopy() +} + +// StateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) StateForMigration() *statefile.File { + s.mu.Lock() + defer s.mu.Unlock() + + return statefile.New(s.state.DeepCopy(), s.lineage, s.serial) +} + +// WriteStateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) WriteStateForMigration(f *statefile.File, force bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !force { + checkFile := statefile.New(s.state, s.lineage, s.serial) + if err := statemgr.CheckValidImport(f, checkFile); err != nil { + return err + } + } + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = f.State.DeepCopy() + s.lineage = f.Lineage + s.serial = f.Serial + s.forcePush = force + + return nil +} + +// DisableLocks turns the Lock and Unlock methods into no-ops. This is intended +// to be called during initialization of a state manager and should not be +// called after any of the statemgr.Full interface methods have been called. +func (s *State) DisableLocks() { + s.disableLocks = true +} + +// StateSnapshotMeta returns the metadata from the most recently persisted +// or refreshed persistent state snapshot. +// +// This is an implementation of statemgr.PersistentMeta. +func (s *State) StateSnapshotMeta() statemgr.SnapshotMeta { + return statemgr.SnapshotMeta{ + Lineage: s.lineage, + Serial: s.serial, + } +} + +// statemgr.Writer impl. +func (s *State) WriteState(state *states.State) error { + s.mu.Lock() + defer s.mu.Unlock() + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = state.DeepCopy() + s.forcePush = false + + return nil +} + +// PersistState uploads a snapshot of the latest state as a StateVersion to Terraform Cloud +func (s *State) PersistState(schemas *terraform.Schemas) error { + s.mu.Lock() + defer s.mu.Unlock() + + log.Printf("[DEBUG] cloud/state: state read serial is: %d; serial is: %d", s.readSerial, s.serial) + log.Printf("[DEBUG] cloud/state: state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) + + if s.readState != nil { + lineageUnchanged := s.readLineage != "" && s.lineage == s.readLineage + serialUnchanged := s.readSerial != 0 && s.serial == s.readSerial + stateUnchanged := statefile.StatesMarshalEqual(s.state, s.readState) + if stateUnchanged && lineageUnchanged && serialUnchanged { + // If the state, lineage or serial haven't changed at all then we have nothing to do. + return nil + } + s.serial++ + } else { + // We might be writing a new state altogether, but before we do that + // we'll check to make sure there isn't already a snapshot present + // that we ought to be updating. + err := s.refreshState() + if err != nil { + return fmt.Errorf("failed checking for existing remote state: %s", err) + } + log.Printf("[DEBUG] cloud/state: after refresh, state read serial is: %d; serial is: %d", s.readSerial, s.serial) + log.Printf("[DEBUG] cloud/state: after refresh, state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) + + if s.lineage == "" { // indicates that no state snapshot is present yet + lineage, err := uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("failed to generate initial lineage: %v", err) + } + s.lineage = lineage + s.serial++ + } + } + + f := statefile.New(s.state, s.lineage, s.serial) + + var buf bytes.Buffer + err := statefile.Write(f, &buf) + if err != nil { + return err + } + + var jsonState []byte + if schemas != nil { + jsonState, err = jsonstate.Marshal(f, schemas) + if err != nil { + return err + } + } + + stateFile, err := statefile.Read(bytes.NewReader(buf.Bytes())) + if err != nil { + return fmt.Errorf("failed to read state: %w", err) + } + + ov, err := jsonstate.MarshalOutputs(stateFile.State.RootModule().OutputValues) + if err != nil { + return fmt.Errorf("failed to translate outputs: %w", err) + } + jsonStateOutputs, err := json.Marshal(ov) + if err != nil { + return fmt.Errorf("failed to marshal outputs to json: %w", err) + } + + err = s.uploadState(s.lineage, s.serial, s.forcePush, buf.Bytes(), jsonState, jsonStateOutputs) + if err != nil { + s.stateUploadErr = true + return fmt.Errorf("error uploading state: %w", err) + } + // After we've successfully persisted, what we just wrote is our new + // reference state until someone calls RefreshState again. + // We've potentially overwritten (via force) the state, lineage + // and / or serial (and serial was incremented) so we copy over all + // three fields so everything matches the new state and a subsequent + // operation would correctly detect no changes to the lineage, serial or state. + s.readState = s.state.DeepCopy() + s.readLineage = s.lineage + s.readSerial = s.serial + return nil +} + +func (s *State) uploadState(lineage string, serial uint64, isForcePush bool, state, jsonState, jsonStateOutputs []byte) error { + ctx := context.Background() + + options := tfe.StateVersionCreateOptions{ + Lineage: tfe.String(lineage), + Serial: tfe.Int64(int64(serial)), + MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), + State: tfe.String(base64.StdEncoding.EncodeToString(state)), + Force: tfe.Bool(isForcePush), + JSONState: tfe.String(base64.StdEncoding.EncodeToString(jsonState)), + JSONStateOutputs: tfe.String(base64.StdEncoding.EncodeToString(jsonStateOutputs)), + } + + // If we have a run ID, make sure to add it to the options + // so the state will be properly associated with the run. + runID := os.Getenv("TFE_RUN_ID") + if runID != "" { + options.Run = &tfe.Run{ID: runID} + } + // Create the new state. + _, err := s.tfeClient.StateVersions.Create(ctx, s.workspace.ID, options) + return err +} + +// Lock calls the Client's Lock method if it's implemented. +func (s *State) Lock(info *statemgr.LockInfo) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return "", nil + } + ctx := context.Background() + + lockErr := &statemgr.LockError{Info: s.lockInfo} + + // Lock the workspace. + _, err := s.tfeClient.Workspaces.Lock(ctx, s.workspace.ID, tfe.WorkspaceLockOptions{ + Reason: tfe.String("Locked by Terraform"), + }) + if err != nil { + if err == tfe.ErrWorkspaceLocked { + lockErr.Info = info + err = fmt.Errorf("%s (lock ID: \"%s/%s\")", err, s.organization, s.workspace.Name) + } + lockErr.Err = err + return "", lockErr + } + + s.lockInfo = info + + return s.lockInfo.ID, nil +} + +// statemgr.Refresher impl. +func (s *State) RefreshState() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.refreshState() +} + +// refreshState is the main implementation of RefreshState, but split out so +// that we can make internal calls to it from methods that are already holding +// the s.mu lock. +func (s *State) refreshState() error { + payload, err := s.getStatePayload() + if err != nil { + return err + } + + // no remote state is OK + if payload == nil { + s.readState = nil + s.lineage = "" + s.serial = 0 + return nil + } + + stateFile, err := statefile.Read(bytes.NewReader(payload.Data)) + if err != nil { + return err + } + + s.lineage = stateFile.Lineage + s.serial = stateFile.Serial + s.state = stateFile.State + + // Properties from the remote must be separate so we can + // track changes as lineage, serial and/or state are mutated + s.readLineage = stateFile.Lineage + s.readSerial = stateFile.Serial + s.readState = s.state.DeepCopy() + return nil +} + +func (s *State) getStatePayload() (*remote.Payload, error) { + ctx := context.Background() + + sv, err := s.tfeClient.StateVersions.ReadCurrent(ctx, s.workspace.ID) + if err != nil { + if err == tfe.ErrResourceNotFound { + // If no state exists, then return nil. + return nil, nil + } + return nil, fmt.Errorf("error retrieving state: %v", err) + } + + state, err := s.tfeClient.StateVersions.Download(ctx, sv.DownloadURL) + if err != nil { + return nil, fmt.Errorf("error downloading state: %v", err) + } + + // If the state is empty, then return nil. + if len(state) == 0 { + return nil, nil + } + + // Get the MD5 checksum of the state. + sum := md5.Sum(state) + + return &remote.Payload{ + Data: state, + MD5: sum[:], + }, nil +} + +// Unlock calls the Client's Unlock method if it's implemented. +func (s *State) Unlock(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return nil + } + + ctx := context.Background() + + // We first check if there was an error while uploading the latest + // state. If so, we will not unlock the workspace to prevent any + // changes from being applied until the correct state is uploaded. + if s.stateUploadErr { + return nil + } + + lockErr := &statemgr.LockError{Info: s.lockInfo} + + // With lock info this should be treated as a normal unlock. + if s.lockInfo != nil { + // Verify the expected lock ID. + if s.lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock ID does not match existing lock") + return lockErr + } + + // Unlock the workspace. + _, err := s.tfeClient.Workspaces.Unlock(ctx, s.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil + } + + // Verify the optional force-unlock lock ID. + if s.organization+"/"+s.workspace.Name != id { + lockErr.Err = fmt.Errorf( + "lock ID %q does not match existing lock ID \"%s/%s\"", + id, + s.organization, + s.workspace.Name, + ) + return lockErr + } + + // Force unlock the workspace. + _, err := s.tfeClient.Workspaces.ForceUnlock(ctx, s.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} + +// Delete the remote state. +func (s *State) Delete(force bool) error { + + var err error + + isSafeDeleteSupported := s.workspace.Permissions.CanForceDelete != nil + if force || !isSafeDeleteSupported { + err = s.tfeClient.Workspaces.Delete(context.Background(), s.organization, s.workspace.Name) + } else { + err = s.tfeClient.Workspaces.SafeDelete(context.Background(), s.organization, s.workspace.Name) + } + + if err != nil && err != tfe.ErrResourceNotFound { + return fmt.Errorf("error deleting workspace %s: %v", s.workspace.Name, err) + } + + return nil +} + +// GetRootOutputValues fetches output values from Terraform Cloud +func (s *State) GetRootOutputValues() (map[string]*states.OutputValue, error) { + ctx := context.Background() + + so, err := s.tfeClient.StateVersionOutputs.ReadCurrent(ctx, s.workspace.ID) + + if err != nil { + return nil, fmt.Errorf("could not read state version outputs: %w", err) + } + + result := make(map[string]*states.OutputValue) + + for _, output := range so.Items { + if output.DetailedType == nil { + // If there is no detailed type information available, this state was probably created + // with a version of terraform < 1.3.0. In this case, we'll eject completely from this + // function and fall back to the old behavior of reading the entire state file, which + // requires a higher level of authorization. + log.Printf("[DEBUG] falling back to reading full state") + + if err := s.RefreshState(); err != nil { + return nil, fmt.Errorf("failed to load state: %w", err) + } + + state := s.State() + if state == nil { + // We know that there is supposed to be state (and this is not simply a new workspace + // without state) because the fallback is only invoked when outputs are present but + // detailed types are not available. + return nil, ErrStateVersionUnauthorizedUpgradeState + } + + return state.RootModule().OutputValues, nil + } + + if output.Sensitive { + // Since this is a sensitive value, the output must be requested explicitly in order to + // read its value, which is assumed to be present by callers + sensitiveOutput, err := s.tfeClient.StateVersionOutputs.Read(ctx, output.ID) + if err != nil { + return nil, fmt.Errorf("could not read state version output %s: %w", output.ID, err) + } + output.Value = sensitiveOutput.Value + } + + cval, err := tfeOutputToCtyValue(*output) + if err != nil { + return nil, fmt.Errorf("could not decode output %s (ID %s)", output.Name, output.ID) + } + + result[output.Name] = &states.OutputValue{ + Value: cval, + Sensitive: output.Sensitive, + } + } + + return result, nil +} + +// tfeOutputToCtyValue decodes a combination of TFE output value and detailed-type to create a +// cty value that is suitable for use in terraform. +func tfeOutputToCtyValue(output tfe.StateVersionOutput) (cty.Value, error) { + var result cty.Value + bufType, err := json.Marshal(output.DetailedType) + if err != nil { + return result, fmt.Errorf("could not marshal output %s type: %w", output.ID, err) + } + + var ctype cty.Type + err = ctype.UnmarshalJSON(bufType) + if err != nil { + return result, fmt.Errorf("could not interpret output %s type: %w", output.ID, err) + } + + result, err = gocty.ToCtyValue(output.Value, ctype) + if err != nil { + return result, fmt.Errorf("could not interpret value %v as type %s for output %s: %w", result, ctype.FriendlyName(), output.ID, err) + } + + return result, nil +} diff --git a/cloud/state_test.go b/cloud/state_test.go new file mode 100644 index 000000000000..71abe6fcdc73 --- /dev/null +++ b/cloud/state_test.go @@ -0,0 +1,272 @@ +package cloud + +import ( + "bytes" + "context" + "io/ioutil" + "testing" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" +) + +func TestState_impl(t *testing.T) { + var _ statemgr.Reader = new(State) + var _ statemgr.Writer = new(State) + var _ statemgr.Persister = new(State) + var _ statemgr.Refresher = new(State) + var _ statemgr.OutputReader = new(State) + var _ statemgr.Locker = new(State) +} + +type ExpectedOutput struct { + Name string + Sensitive bool + IsNull bool +} + +func TestState_GetRootOutputValues(t *testing.T) { + b, bCleanup := testBackendWithOutputs(t) + defer bCleanup() + + state := &State{tfeClient: b.client, organization: b.organization, workspace: &tfe.Workspace{ + ID: "ws-abcd", + }} + outputs, err := state.GetRootOutputValues() + + if err != nil { + t.Fatalf("error returned from GetRootOutputValues: %s", err) + } + + cases := []ExpectedOutput{ + { + Name: "sensitive_output", + Sensitive: true, + IsNull: false, + }, + { + Name: "nonsensitive_output", + Sensitive: false, + IsNull: false, + }, + { + Name: "object_output", + Sensitive: false, + IsNull: false, + }, + { + Name: "list_output", + Sensitive: false, + IsNull: false, + }, + } + + if len(outputs) != len(cases) { + t.Errorf("Expected %d item but %d were returned", len(cases), len(outputs)) + } + + for _, testCase := range cases { + so, ok := outputs[testCase.Name] + if !ok { + t.Fatalf("Expected key %s but it was not found", testCase.Name) + } + if so.Value.IsNull() != testCase.IsNull { + t.Errorf("Key %s does not match null expectation %v", testCase.Name, testCase.IsNull) + } + if so.Sensitive != testCase.Sensitive { + t.Errorf("Key %s does not match sensitive expectation %v", testCase.Name, testCase.Sensitive) + } + } +} + +func TestState(t *testing.T) { + var buf bytes.Buffer + s := statemgr.TestFullInitialState() + sf := statefile.New(s, "stub-lineage", 2) + err := statefile.Write(sf, &buf) + if err != nil { + t.Fatalf("err: %s", err) + } + data := buf.Bytes() + + state := testCloudState(t) + + jsonState, err := ioutil.ReadFile("../command/testdata/show-json-state/sensitive-variables/output.json") + if err != nil { + t.Fatal(err) + } + + jsonStateOutputs := []byte(` +{ + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +}`) + + if err := state.uploadState(state.lineage, state.serial, state.forcePush, data, jsonState, jsonStateOutputs); err != nil { + t.Fatalf("put: %s", err) + } + + payload, err := state.getStatePayload() + if err != nil { + t.Fatalf("get: %s", err) + } + if !bytes.Equal(payload.Data, data) { + t.Fatalf("expected full state %q\n\ngot: %q", string(payload.Data), string(data)) + } + + if err := state.Delete(true); err != nil { + t.Fatalf("delete: %s", err) + } + + p, err := state.getStatePayload() + if err != nil { + t.Fatalf("get: %s", err) + } + if p != nil { + t.Fatalf("expected empty state, got: %q", string(p.Data)) + } +} + +func TestCloudLocks(t *testing.T) { + back, bCleanup := testBackendWithName(t) + defer bCleanup() + + a, err := back.StateMgr(testBackendSingleWorkspaceName) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + b, err := back.StateMgr(testBackendSingleWorkspaceName) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + lockerA, ok := a.(statemgr.Locker) + if !ok { + t.Fatal("client A not a statemgr.Locker") + } + + lockerB, ok := b.(statemgr.Locker) + if !ok { + t.Fatal("client B not a statemgr.Locker") + } + + infoA := statemgr.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := statemgr.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + if _, ok := err.(*statemgr.LockError); !ok { + t.Errorf("expected a LockError, but was %t: %s", err, err) + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Fatalf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } +} + +func TestDelete_SafeDeleteNotSupported(t *testing.T) { + state := testCloudState(t) + workspaceId := state.workspace.ID + state.workspace.Permissions.CanForceDelete = nil + state.workspace.ResourceCount = 5 + + // Typically delete(false) should safe-delete a cloud workspace, which should fail on this workspace with resources + // However, since we have set the workspace canForceDelete permission to nil, we should fall back to force delete + if err := state.Delete(false); err != nil { + t.Fatalf("delete: %s", err) + } + workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) + if workspace != nil || err != tfe.ErrResourceNotFound { + t.Fatalf("workspace %s not deleted", workspaceId) + } +} + +func TestDelete_ForceDelete(t *testing.T) { + state := testCloudState(t) + workspaceId := state.workspace.ID + state.workspace.Permissions.CanForceDelete = tfe.Bool(true) + state.workspace.ResourceCount = 5 + + if err := state.Delete(true); err != nil { + t.Fatalf("delete: %s", err) + } + workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) + if workspace != nil || err != tfe.ErrResourceNotFound { + t.Fatalf("workspace %s not deleted", workspaceId) + } +} + +func TestDelete_SafeDelete(t *testing.T) { + state := testCloudState(t) + workspaceId := state.workspace.ID + state.workspace.Permissions.CanForceDelete = tfe.Bool(false) + state.workspace.ResourceCount = 5 + + // safe-deleting a workspace with resources should fail + err := state.Delete(false) + if err == nil { + t.Fatalf("workspace should have failed to safe delete") + } + + // safe-deleting a workspace with resources should succeed once it has no resources + state.workspace.ResourceCount = 0 + if err = state.Delete(false); err != nil { + t.Fatalf("workspace safe-delete err: %s", err) + } + + workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) + if workspace != nil || err != tfe.ErrResourceNotFound { + t.Fatalf("workspace %s not deleted", workspaceId) + } +} + +func TestState_PersistState(t *testing.T) { + cloudState := testCloudState(t) + + t.Run("Initial PersistState", func(t *testing.T) { + if cloudState.readState != nil { + t.Fatal("expected nil initial readState") + } + + err := cloudState.PersistState(nil) + if err != nil { + t.Fatalf("expected no error, got %q", err) + } + + var expectedSerial uint64 = 1 + if cloudState.readSerial != expectedSerial { + t.Fatalf("expected initial state readSerial to be %d, got %d", expectedSerial, cloudState.readSerial) + } + }) +} diff --git a/internal/cloud/testdata/.gitignore b/cloud/testdata/.gitignore similarity index 100% rename from internal/cloud/testdata/.gitignore rename to cloud/testdata/.gitignore diff --git a/internal/cloud/testdata/apply-destroy/apply.log b/cloud/testdata/apply-destroy/apply.log similarity index 100% rename from internal/cloud/testdata/apply-destroy/apply.log rename to cloud/testdata/apply-destroy/apply.log diff --git a/internal/cloud/testdata/apply-destroy/main.tf b/cloud/testdata/apply-destroy/main.tf similarity index 100% rename from internal/cloud/testdata/apply-destroy/main.tf rename to cloud/testdata/apply-destroy/main.tf diff --git a/internal/cloud/testdata/apply-destroy/plan.log b/cloud/testdata/apply-destroy/plan.log similarity index 100% rename from internal/cloud/testdata/apply-destroy/plan.log rename to cloud/testdata/apply-destroy/plan.log diff --git a/internal/cloud/testdata/apply-json-with-error/main.tf b/cloud/testdata/apply-json-with-error/main.tf similarity index 100% rename from internal/cloud/testdata/apply-json-with-error/main.tf rename to cloud/testdata/apply-json-with-error/main.tf diff --git a/internal/cloud/testdata/apply-json-with-error/plan-redacted.json b/cloud/testdata/apply-json-with-error/plan-redacted.json similarity index 100% rename from internal/cloud/testdata/apply-json-with-error/plan-redacted.json rename to cloud/testdata/apply-json-with-error/plan-redacted.json diff --git a/internal/cloud/testdata/apply-json-with-error/plan.log b/cloud/testdata/apply-json-with-error/plan.log similarity index 100% rename from internal/cloud/testdata/apply-json-with-error/plan.log rename to cloud/testdata/apply-json-with-error/plan.log diff --git a/internal/cloud/testdata/apply-json-with-outputs/apply.log b/cloud/testdata/apply-json-with-outputs/apply.log similarity index 100% rename from internal/cloud/testdata/apply-json-with-outputs/apply.log rename to cloud/testdata/apply-json-with-outputs/apply.log diff --git a/internal/cloud/testdata/apply-json-with-outputs/main.tf b/cloud/testdata/apply-json-with-outputs/main.tf similarity index 100% rename from internal/cloud/testdata/apply-json-with-outputs/main.tf rename to cloud/testdata/apply-json-with-outputs/main.tf diff --git a/internal/cloud/testdata/apply-json-with-outputs/plan-redacted.json b/cloud/testdata/apply-json-with-outputs/plan-redacted.json similarity index 100% rename from internal/cloud/testdata/apply-json-with-outputs/plan-redacted.json rename to cloud/testdata/apply-json-with-outputs/plan-redacted.json diff --git a/internal/cloud/testdata/apply-json-with-outputs/plan.log b/cloud/testdata/apply-json-with-outputs/plan.log similarity index 100% rename from internal/cloud/testdata/apply-json-with-outputs/plan.log rename to cloud/testdata/apply-json-with-outputs/plan.log diff --git a/internal/cloud/testdata/apply-json-with-provisioner-error/apply.log b/cloud/testdata/apply-json-with-provisioner-error/apply.log similarity index 100% rename from internal/cloud/testdata/apply-json-with-provisioner-error/apply.log rename to cloud/testdata/apply-json-with-provisioner-error/apply.log diff --git a/internal/cloud/testdata/apply-json-with-provisioner-error/main.tf b/cloud/testdata/apply-json-with-provisioner-error/main.tf similarity index 100% rename from internal/cloud/testdata/apply-json-with-provisioner-error/main.tf rename to cloud/testdata/apply-json-with-provisioner-error/main.tf diff --git a/internal/cloud/testdata/apply-json-with-provisioner-error/plan-redacted.json b/cloud/testdata/apply-json-with-provisioner-error/plan-redacted.json similarity index 100% rename from internal/cloud/testdata/apply-json-with-provisioner-error/plan-redacted.json rename to cloud/testdata/apply-json-with-provisioner-error/plan-redacted.json diff --git a/internal/cloud/testdata/apply-json-with-provisioner-error/plan.log b/cloud/testdata/apply-json-with-provisioner-error/plan.log similarity index 100% rename from internal/cloud/testdata/apply-json-with-provisioner-error/plan.log rename to cloud/testdata/apply-json-with-provisioner-error/plan.log diff --git a/internal/cloud/testdata/apply-json-with-provisioner/apply.log b/cloud/testdata/apply-json-with-provisioner/apply.log similarity index 100% rename from internal/cloud/testdata/apply-json-with-provisioner/apply.log rename to cloud/testdata/apply-json-with-provisioner/apply.log diff --git a/internal/cloud/testdata/apply-json-with-provisioner/main.tf b/cloud/testdata/apply-json-with-provisioner/main.tf similarity index 100% rename from internal/cloud/testdata/apply-json-with-provisioner/main.tf rename to cloud/testdata/apply-json-with-provisioner/main.tf diff --git a/internal/cloud/testdata/apply-json-with-provisioner/plan-redacted.json b/cloud/testdata/apply-json-with-provisioner/plan-redacted.json similarity index 100% rename from internal/cloud/testdata/apply-json-with-provisioner/plan-redacted.json rename to cloud/testdata/apply-json-with-provisioner/plan-redacted.json diff --git a/internal/cloud/testdata/apply-json-with-provisioner/plan.log b/cloud/testdata/apply-json-with-provisioner/plan.log similarity index 100% rename from internal/cloud/testdata/apply-json-with-provisioner/plan.log rename to cloud/testdata/apply-json-with-provisioner/plan.log diff --git a/internal/cloud/testdata/apply-json/apply.log b/cloud/testdata/apply-json/apply.log similarity index 100% rename from internal/cloud/testdata/apply-json/apply.log rename to cloud/testdata/apply-json/apply.log diff --git a/internal/cloud/testdata/apply-json/main.tf b/cloud/testdata/apply-json/main.tf similarity index 100% rename from internal/cloud/testdata/apply-json/main.tf rename to cloud/testdata/apply-json/main.tf diff --git a/internal/cloud/testdata/apply-json/plan-redacted.json b/cloud/testdata/apply-json/plan-redacted.json similarity index 100% rename from internal/cloud/testdata/apply-json/plan-redacted.json rename to cloud/testdata/apply-json/plan-redacted.json diff --git a/internal/cloud/testdata/apply-json/plan.log b/cloud/testdata/apply-json/plan.log similarity index 100% rename from internal/cloud/testdata/apply-json/plan.log rename to cloud/testdata/apply-json/plan.log diff --git a/internal/cloud/testdata/apply-no-changes/main.tf b/cloud/testdata/apply-no-changes/main.tf similarity index 100% rename from internal/cloud/testdata/apply-no-changes/main.tf rename to cloud/testdata/apply-no-changes/main.tf diff --git a/internal/cloud/testdata/apply-no-changes/plan.log b/cloud/testdata/apply-no-changes/plan.log similarity index 100% rename from internal/cloud/testdata/apply-no-changes/plan.log rename to cloud/testdata/apply-no-changes/plan.log diff --git a/internal/cloud/testdata/apply-no-changes/policy.log b/cloud/testdata/apply-no-changes/policy.log similarity index 100% rename from internal/cloud/testdata/apply-no-changes/policy.log rename to cloud/testdata/apply-no-changes/policy.log diff --git a/internal/cloud/testdata/apply-policy-hard-failed/main.tf b/cloud/testdata/apply-policy-hard-failed/main.tf similarity index 100% rename from internal/cloud/testdata/apply-policy-hard-failed/main.tf rename to cloud/testdata/apply-policy-hard-failed/main.tf diff --git a/internal/cloud/testdata/apply-policy-hard-failed/plan.log b/cloud/testdata/apply-policy-hard-failed/plan.log similarity index 100% rename from internal/cloud/testdata/apply-policy-hard-failed/plan.log rename to cloud/testdata/apply-policy-hard-failed/plan.log diff --git a/internal/cloud/testdata/apply-policy-hard-failed/policy.log b/cloud/testdata/apply-policy-hard-failed/policy.log similarity index 100% rename from internal/cloud/testdata/apply-policy-hard-failed/policy.log rename to cloud/testdata/apply-policy-hard-failed/policy.log diff --git a/internal/cloud/testdata/apply-policy-passed/apply.log b/cloud/testdata/apply-policy-passed/apply.log similarity index 100% rename from internal/cloud/testdata/apply-policy-passed/apply.log rename to cloud/testdata/apply-policy-passed/apply.log diff --git a/internal/cloud/testdata/apply-policy-passed/main.tf b/cloud/testdata/apply-policy-passed/main.tf similarity index 100% rename from internal/cloud/testdata/apply-policy-passed/main.tf rename to cloud/testdata/apply-policy-passed/main.tf diff --git a/internal/cloud/testdata/apply-policy-passed/plan.log b/cloud/testdata/apply-policy-passed/plan.log similarity index 100% rename from internal/cloud/testdata/apply-policy-passed/plan.log rename to cloud/testdata/apply-policy-passed/plan.log diff --git a/internal/cloud/testdata/apply-policy-passed/policy.log b/cloud/testdata/apply-policy-passed/policy.log similarity index 100% rename from internal/cloud/testdata/apply-policy-passed/policy.log rename to cloud/testdata/apply-policy-passed/policy.log diff --git a/internal/cloud/testdata/apply-policy-soft-failed/apply.log b/cloud/testdata/apply-policy-soft-failed/apply.log similarity index 100% rename from internal/cloud/testdata/apply-policy-soft-failed/apply.log rename to cloud/testdata/apply-policy-soft-failed/apply.log diff --git a/internal/cloud/testdata/apply-policy-soft-failed/main.tf b/cloud/testdata/apply-policy-soft-failed/main.tf similarity index 100% rename from internal/cloud/testdata/apply-policy-soft-failed/main.tf rename to cloud/testdata/apply-policy-soft-failed/main.tf diff --git a/internal/cloud/testdata/apply-policy-soft-failed/plan.log b/cloud/testdata/apply-policy-soft-failed/plan.log similarity index 100% rename from internal/cloud/testdata/apply-policy-soft-failed/plan.log rename to cloud/testdata/apply-policy-soft-failed/plan.log diff --git a/internal/cloud/testdata/apply-policy-soft-failed/policy.log b/cloud/testdata/apply-policy-soft-failed/policy.log similarity index 100% rename from internal/cloud/testdata/apply-policy-soft-failed/policy.log rename to cloud/testdata/apply-policy-soft-failed/policy.log diff --git a/internal/cloud/testdata/apply-variables/apply.log b/cloud/testdata/apply-variables/apply.log similarity index 100% rename from internal/cloud/testdata/apply-variables/apply.log rename to cloud/testdata/apply-variables/apply.log diff --git a/internal/cloud/testdata/apply-variables/main.tf b/cloud/testdata/apply-variables/main.tf similarity index 100% rename from internal/cloud/testdata/apply-variables/main.tf rename to cloud/testdata/apply-variables/main.tf diff --git a/internal/cloud/testdata/apply-variables/plan.log b/cloud/testdata/apply-variables/plan.log similarity index 100% rename from internal/cloud/testdata/apply-variables/plan.log rename to cloud/testdata/apply-variables/plan.log diff --git a/internal/cloud/testdata/apply-with-error/main.tf b/cloud/testdata/apply-with-error/main.tf similarity index 100% rename from internal/cloud/testdata/apply-with-error/main.tf rename to cloud/testdata/apply-with-error/main.tf diff --git a/internal/cloud/testdata/apply-with-error/plan.log b/cloud/testdata/apply-with-error/plan.log similarity index 100% rename from internal/cloud/testdata/apply-with-error/plan.log rename to cloud/testdata/apply-with-error/plan.log diff --git a/internal/cloud/testdata/apply/apply.log b/cloud/testdata/apply/apply.log similarity index 100% rename from internal/cloud/testdata/apply/apply.log rename to cloud/testdata/apply/apply.log diff --git a/internal/cloud/testdata/apply/main.tf b/cloud/testdata/apply/main.tf similarity index 100% rename from internal/cloud/testdata/apply/main.tf rename to cloud/testdata/apply/main.tf diff --git a/internal/cloud/testdata/apply/plan.log b/cloud/testdata/apply/plan.log similarity index 100% rename from internal/cloud/testdata/apply/plan.log rename to cloud/testdata/apply/plan.log diff --git a/internal/cloud/testdata/empty/.gitignore b/cloud/testdata/empty/.gitignore similarity index 100% rename from internal/cloud/testdata/empty/.gitignore rename to cloud/testdata/empty/.gitignore diff --git a/internal/cloud/testdata/plan-cost-estimation/ce.log b/cloud/testdata/plan-cost-estimation/ce.log similarity index 100% rename from internal/cloud/testdata/plan-cost-estimation/ce.log rename to cloud/testdata/plan-cost-estimation/ce.log diff --git a/internal/cloud/testdata/plan-cost-estimation/cost-estimate.log b/cloud/testdata/plan-cost-estimation/cost-estimate.log similarity index 100% rename from internal/cloud/testdata/plan-cost-estimation/cost-estimate.log rename to cloud/testdata/plan-cost-estimation/cost-estimate.log diff --git a/internal/cloud/testdata/plan-cost-estimation/main.tf b/cloud/testdata/plan-cost-estimation/main.tf similarity index 100% rename from internal/cloud/testdata/plan-cost-estimation/main.tf rename to cloud/testdata/plan-cost-estimation/main.tf diff --git a/internal/cloud/testdata/plan-cost-estimation/plan.log b/cloud/testdata/plan-cost-estimation/plan.log similarity index 100% rename from internal/cloud/testdata/plan-cost-estimation/plan.log rename to cloud/testdata/plan-cost-estimation/plan.log diff --git a/internal/cloud/testdata/plan-json-basic/main.tf b/cloud/testdata/plan-json-basic/main.tf similarity index 100% rename from internal/cloud/testdata/plan-json-basic/main.tf rename to cloud/testdata/plan-json-basic/main.tf diff --git a/internal/cloud/testdata/plan-json-basic/plan-redacted.json b/cloud/testdata/plan-json-basic/plan-redacted.json similarity index 100% rename from internal/cloud/testdata/plan-json-basic/plan-redacted.json rename to cloud/testdata/plan-json-basic/plan-redacted.json diff --git a/internal/cloud/testdata/plan-json-basic/plan.log b/cloud/testdata/plan-json-basic/plan.log similarity index 100% rename from internal/cloud/testdata/plan-json-basic/plan.log rename to cloud/testdata/plan-json-basic/plan.log diff --git a/internal/cloud/testdata/plan-json-error/main.tf b/cloud/testdata/plan-json-error/main.tf similarity index 100% rename from internal/cloud/testdata/plan-json-error/main.tf rename to cloud/testdata/plan-json-error/main.tf diff --git a/internal/cloud/testdata/plan-json-error/plan-redacted.json b/cloud/testdata/plan-json-error/plan-redacted.json similarity index 100% rename from internal/cloud/testdata/plan-json-error/plan-redacted.json rename to cloud/testdata/plan-json-error/plan-redacted.json diff --git a/internal/cloud/testdata/plan-json-error/plan.log b/cloud/testdata/plan-json-error/plan.log similarity index 100% rename from internal/cloud/testdata/plan-json-error/plan.log rename to cloud/testdata/plan-json-error/plan.log diff --git a/internal/cloud/testdata/plan-json-full/main.tf b/cloud/testdata/plan-json-full/main.tf similarity index 100% rename from internal/cloud/testdata/plan-json-full/main.tf rename to cloud/testdata/plan-json-full/main.tf diff --git a/internal/cloud/testdata/plan-json-full/plan-redacted.json b/cloud/testdata/plan-json-full/plan-redacted.json similarity index 100% rename from internal/cloud/testdata/plan-json-full/plan-redacted.json rename to cloud/testdata/plan-json-full/plan-redacted.json diff --git a/internal/cloud/testdata/plan-json-full/plan.log b/cloud/testdata/plan-json-full/plan.log similarity index 100% rename from internal/cloud/testdata/plan-json-full/plan.log rename to cloud/testdata/plan-json-full/plan.log diff --git a/internal/cloud/testdata/plan-long-line/main.tf b/cloud/testdata/plan-long-line/main.tf similarity index 100% rename from internal/cloud/testdata/plan-long-line/main.tf rename to cloud/testdata/plan-long-line/main.tf diff --git a/internal/cloud/testdata/plan-long-line/plan.log b/cloud/testdata/plan-long-line/plan.log similarity index 100% rename from internal/cloud/testdata/plan-long-line/plan.log rename to cloud/testdata/plan-long-line/plan.log diff --git a/internal/cloud/testdata/plan-no-changes/main.tf b/cloud/testdata/plan-no-changes/main.tf similarity index 100% rename from internal/cloud/testdata/plan-no-changes/main.tf rename to cloud/testdata/plan-no-changes/main.tf diff --git a/internal/cloud/testdata/plan-no-changes/plan.log b/cloud/testdata/plan-no-changes/plan.log similarity index 100% rename from internal/cloud/testdata/plan-no-changes/plan.log rename to cloud/testdata/plan-no-changes/plan.log diff --git a/internal/cloud/testdata/plan-no-changes/policy.log b/cloud/testdata/plan-no-changes/policy.log similarity index 100% rename from internal/cloud/testdata/plan-no-changes/policy.log rename to cloud/testdata/plan-no-changes/policy.log diff --git a/internal/cloud/testdata/plan-policy-hard-failed/main.tf b/cloud/testdata/plan-policy-hard-failed/main.tf similarity index 100% rename from internal/cloud/testdata/plan-policy-hard-failed/main.tf rename to cloud/testdata/plan-policy-hard-failed/main.tf diff --git a/internal/cloud/testdata/plan-policy-hard-failed/plan.log b/cloud/testdata/plan-policy-hard-failed/plan.log similarity index 100% rename from internal/cloud/testdata/plan-policy-hard-failed/plan.log rename to cloud/testdata/plan-policy-hard-failed/plan.log diff --git a/internal/cloud/testdata/plan-policy-hard-failed/policy.log b/cloud/testdata/plan-policy-hard-failed/policy.log similarity index 100% rename from internal/cloud/testdata/plan-policy-hard-failed/policy.log rename to cloud/testdata/plan-policy-hard-failed/policy.log diff --git a/internal/cloud/testdata/plan-policy-passed/main.tf b/cloud/testdata/plan-policy-passed/main.tf similarity index 100% rename from internal/cloud/testdata/plan-policy-passed/main.tf rename to cloud/testdata/plan-policy-passed/main.tf diff --git a/internal/cloud/testdata/plan-policy-passed/plan.log b/cloud/testdata/plan-policy-passed/plan.log similarity index 100% rename from internal/cloud/testdata/plan-policy-passed/plan.log rename to cloud/testdata/plan-policy-passed/plan.log diff --git a/internal/cloud/testdata/plan-policy-passed/policy.log b/cloud/testdata/plan-policy-passed/policy.log similarity index 100% rename from internal/cloud/testdata/plan-policy-passed/policy.log rename to cloud/testdata/plan-policy-passed/policy.log diff --git a/internal/cloud/testdata/plan-policy-soft-failed/main.tf b/cloud/testdata/plan-policy-soft-failed/main.tf similarity index 100% rename from internal/cloud/testdata/plan-policy-soft-failed/main.tf rename to cloud/testdata/plan-policy-soft-failed/main.tf diff --git a/internal/cloud/testdata/plan-policy-soft-failed/plan.log b/cloud/testdata/plan-policy-soft-failed/plan.log similarity index 100% rename from internal/cloud/testdata/plan-policy-soft-failed/plan.log rename to cloud/testdata/plan-policy-soft-failed/plan.log diff --git a/internal/cloud/testdata/plan-policy-soft-failed/policy.log b/cloud/testdata/plan-policy-soft-failed/policy.log similarity index 100% rename from internal/cloud/testdata/plan-policy-soft-failed/policy.log rename to cloud/testdata/plan-policy-soft-failed/policy.log diff --git a/internal/cloud/testdata/plan-variables/main.tf b/cloud/testdata/plan-variables/main.tf similarity index 100% rename from internal/cloud/testdata/plan-variables/main.tf rename to cloud/testdata/plan-variables/main.tf diff --git a/internal/cloud/testdata/plan-variables/plan.log b/cloud/testdata/plan-variables/plan.log similarity index 100% rename from internal/cloud/testdata/plan-variables/plan.log rename to cloud/testdata/plan-variables/plan.log diff --git a/internal/cloud/testdata/plan-with-error/main.tf b/cloud/testdata/plan-with-error/main.tf similarity index 100% rename from internal/cloud/testdata/plan-with-error/main.tf rename to cloud/testdata/plan-with-error/main.tf diff --git a/internal/cloud/testdata/plan-with-error/plan.log b/cloud/testdata/plan-with-error/plan.log similarity index 100% rename from internal/cloud/testdata/plan-with-error/plan.log rename to cloud/testdata/plan-with-error/plan.log diff --git a/internal/cloud/testdata/plan-with-working-directory/terraform/main.tf b/cloud/testdata/plan-with-working-directory/terraform/main.tf similarity index 100% rename from internal/cloud/testdata/plan-with-working-directory/terraform/main.tf rename to cloud/testdata/plan-with-working-directory/terraform/main.tf diff --git a/internal/cloud/testdata/plan-with-working-directory/terraform/plan.log b/cloud/testdata/plan-with-working-directory/terraform/plan.log similarity index 100% rename from internal/cloud/testdata/plan-with-working-directory/terraform/plan.log rename to cloud/testdata/plan-with-working-directory/terraform/plan.log diff --git a/internal/cloud/testdata/plan/main.tf b/cloud/testdata/plan/main.tf similarity index 100% rename from internal/cloud/testdata/plan/main.tf rename to cloud/testdata/plan/main.tf diff --git a/internal/cloud/testdata/plan/plan.log b/cloud/testdata/plan/plan.log similarity index 100% rename from internal/cloud/testdata/plan/plan.log rename to cloud/testdata/plan/plan.log diff --git a/internal/cloud/testdata/refresh/main.tf b/cloud/testdata/refresh/main.tf similarity index 100% rename from internal/cloud/testdata/refresh/main.tf rename to cloud/testdata/refresh/main.tf diff --git a/internal/cloud/testdata/variables/main.tf b/cloud/testdata/variables/main.tf similarity index 100% rename from internal/cloud/testdata/variables/main.tf rename to cloud/testdata/variables/main.tf diff --git a/cloud/testing.go b/cloud/testing.go new file mode 100644 index 000000000000..8edc13ae049a --- /dev/null +++ b/cloud/testing.go @@ -0,0 +1,520 @@ +package cloud + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "path" + "testing" + "time" + + tfe "github.com/hashicorp/go-tfe" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/auth" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/terraform/version" + + backendLocal "github.com/hashicorp/terraform/backend/local" +) + +const ( + testCred = "test-auth-token" +) + +var ( + tfeHost = svchost.Hostname(defaultHostname) + credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + tfeHost: {"token": testCred}, + }) + testBackendSingleWorkspaceName = "app-prod" + defaultTFCPing = map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + w.Header().Set("TFP-AppName", "Terraform Cloud") + }, + } +) + +// mockInput is a mock implementation of terraform.UIInput. +type mockInput struct { + answers map[string]string +} + +func (m *mockInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { + v, ok := m.answers[opts.Id] + if !ok { + return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) + } + if v == "wait-for-external-update" { + select { + case <-ctx.Done(): + case <-time.After(time.Minute): + } + } + delete(m.answers, opts.Id) + return v, nil +} + +func testInput(t *testing.T, answers map[string]string) *mockInput { + return &mockInput{answers: answers} +} + +func testBackendWithName(t *testing.T) (*Cloud, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal(testBackendSingleWorkspaceName), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }) + return testBackend(t, obj, defaultTFCPing) +} + +func testBackendWithTags(t *testing.T) (*Cloud, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + }), + }) + return testBackend(t, obj, nil) +} + +func testBackendNoOperations(t *testing.T) (*Cloud, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("no-operations"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal(testBackendSingleWorkspaceName), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }) + return testBackend(t, obj, nil) +} + +func testBackendWithHandlers(t *testing.T, handlers map[string]func(http.ResponseWriter, *http.Request)) (*Cloud, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal(testBackendSingleWorkspaceName), + "tags": cty.NullVal(cty.Set(cty.String)), + }), + }) + return testBackend(t, obj, handlers) +} + +func testCloudState(t *testing.T) *State { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + raw, err := b.StateMgr(testBackendSingleWorkspaceName) + if err != nil { + t.Fatalf("error: %v", err) + } + + return raw.(*State) +} + +func testBackendWithOutputs(t *testing.T) (*Cloud, func()) { + b, cleanup := testBackendWithName(t) + + // Get a new mock client to use for adding outputs + mc := NewMockClient() + + mc.StateVersionOutputs.create("svo-abcd", &tfe.StateVersionOutput{ + ID: "svo-abcd", + Value: "foobar", + Sensitive: true, + Type: "string", + Name: "sensitive_output", + DetailedType: "string", + }) + + mc.StateVersionOutputs.create("svo-zyxw", &tfe.StateVersionOutput{ + ID: "svo-zyxw", + Value: "bazqux", + Type: "string", + Name: "nonsensitive_output", + DetailedType: "string", + }) + + var dt interface{} + var val interface{} + err := json.Unmarshal([]byte(`["object", {"foo":"string"}]`), &dt) + if err != nil { + t.Fatalf("could not unmarshal detailed type: %s", err) + } + err = json.Unmarshal([]byte(`{"foo":"bar"}`), &val) + if err != nil { + t.Fatalf("could not unmarshal value: %s", err) + } + mc.StateVersionOutputs.create("svo-efgh", &tfe.StateVersionOutput{ + ID: "svo-efgh", + Value: val, + Type: "object", + Name: "object_output", + DetailedType: dt, + }) + + err = json.Unmarshal([]byte(`["list", "bool"]`), &dt) + if err != nil { + t.Fatalf("could not unmarshal detailed type: %s", err) + } + err = json.Unmarshal([]byte(`[true, false, true, true]`), &val) + if err != nil { + t.Fatalf("could not unmarshal value: %s", err) + } + mc.StateVersionOutputs.create("svo-ijkl", &tfe.StateVersionOutput{ + ID: "svo-ijkl", + Value: val, + Type: "array", + Name: "list_output", + DetailedType: dt, + }) + + b.client.StateVersionOutputs = mc.StateVersionOutputs + + return b, cleanup +} + +func testBackend(t *testing.T, obj cty.Value, handlers map[string]func(http.ResponseWriter, *http.Request)) (*Cloud, func()) { + var s *httptest.Server + if handlers != nil { + s = testServerWithHandlers(handlers) + } else { + s = testServer(t) + } + b := New(testDisco(s)) + + // Configure the backend so the client is created. + newObj, valDiags := b.PrepareConfig(obj) + if len(valDiags) != 0 { + t.Fatalf("testBackend: backend.PrepareConfig() failed: %s", valDiags.ErrWithWarnings()) + } + obj = newObj + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + t.Fatalf("testBackend: backend.Configure() failed: %s", confDiags.ErrWithWarnings()) + } + + // Get a new mock client. + mc := NewMockClient() + + // Replace the services we use with our mock services. + b.CLI = cli.NewMockUi() + b.client.Applies = mc.Applies + b.client.ConfigurationVersions = mc.ConfigurationVersions + b.client.CostEstimates = mc.CostEstimates + b.client.Organizations = mc.Organizations + b.client.Plans = mc.Plans + b.client.TaskStages = mc.TaskStages + b.client.PolicySetOutcomes = mc.PolicySetOutcomes + b.client.PolicyChecks = mc.PolicyChecks + b.client.Runs = mc.Runs + b.client.StateVersions = mc.StateVersions + b.client.StateVersionOutputs = mc.StateVersionOutputs + b.client.Variables = mc.Variables + b.client.Workspaces = mc.Workspaces + + // Set local to a local test backend. + b.local = testLocalBackend(t, b) + b.input = true + + baseURL, err := url.Parse("https://app.terraform.io") + if err != nil { + t.Fatalf("testBackend: failed to parse base URL for client") + } + baseURL.Path = "/api/v2/" + + readRedactedPlan = func(ctx context.Context, baseURL url.URL, token, planID string) (*jsonformat.Plan, error) { + return mc.RedactedPlans.Read(ctx, baseURL.Hostname(), token, planID) + } + + ctx := context.Background() + + // Create the organization. + _, err = b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ + Name: tfe.String(b.organization), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + + // Create the default workspace if required. + if b.WorkspaceMapping.Name != "" { + _, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.WorkspaceMapping.Name), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + } + + return b, s.Close +} + +// testUnconfiguredBackend is used for testing the configuration of the backend +// with the mock client +func testUnconfiguredBackend(t *testing.T) (*Cloud, func()) { + s := testServer(t) + b := New(testDisco(s)) + + // Normally, the client is created during configuration, but the configuration uses the + // client to read entitlements. + var err error + b.client, err = tfe.NewClient(&tfe.Config{ + Token: "fake-token", + }) + if err != nil { + t.Fatal(err) + } + + // Get a new mock client. + mc := NewMockClient() + + // Replace the services we use with our mock services. + b.CLI = cli.NewMockUi() + b.client.Applies = mc.Applies + b.client.ConfigurationVersions = mc.ConfigurationVersions + b.client.CostEstimates = mc.CostEstimates + b.client.Organizations = mc.Organizations + b.client.Plans = mc.Plans + b.client.PolicySetOutcomes = mc.PolicySetOutcomes + b.client.PolicyChecks = mc.PolicyChecks + b.client.Runs = mc.Runs + b.client.StateVersions = mc.StateVersions + b.client.Variables = mc.Variables + b.client.Workspaces = mc.Workspaces + + baseURL, err := url.Parse("https://app.terraform.io") + if err != nil { + t.Fatalf("testBackend: failed to parse base URL for client") + } + baseURL.Path = "/api/v2/" + + readRedactedPlan = func(ctx context.Context, baseURL url.URL, token, planID string) (*jsonformat.Plan, error) { + return mc.RedactedPlans.Read(ctx, baseURL.Hostname(), token, planID) + } + + // Set local to a local test backend. + b.local = testLocalBackend(t, b) + + return b, s.Close +} + +func testLocalBackend(t *testing.T, cloud *Cloud) backend.Enhanced { + b := backendLocal.NewWithBackend(cloud) + + // Add a test provider to the local backend. + p := backendLocal.TestLocalProvider(t, b, "null", &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "null_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + return b +} + +// testServer returns a started *httptest.Server used for local testing with the default set of +// request handlers. +func testServer(t *testing.T) *httptest.Server { + return testServerWithHandlers(testDefaultRequestHandlers) +} + +// testServerWithHandlers returns a started *httptest.Server with the given set of request handlers +// overriding any default request handlers (testDefaultRequestHandlers). +func testServerWithHandlers(handlers map[string]func(http.ResponseWriter, *http.Request)) *httptest.Server { + mux := http.NewServeMux() + for route, handler := range handlers { + mux.HandleFunc(route, handler) + } + for route, handler := range testDefaultRequestHandlers { + if handlers[route] == nil { + mux.HandleFunc(route, handler) + } + } + + return httptest.NewServer(mux) +} + +// testDefaultRequestHandlers is a map of request handlers intended to be used in a request +// multiplexer for a test server. A caller may use testServerWithHandlers to start a server with +// this base set of routes, and override a particular route for whatever edge case is being tested. +var testDefaultRequestHandlers = map[string]func(http.ResponseWriter, *http.Request){ + // Respond to service discovery calls. + "/well-known/terraform.json": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{ + "tfe.v2": "/api/v2/", +}`) + }, + + // Respond to service version constraints calls. + "/v1/versions/": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, fmt.Sprintf(`{ + "service": "%s", + "product": "terraform", + "minimum": "0.1.0", + "maximum": "10.0.0" +}`, path.Base(r.URL.Path))) + }, + + // Respond to pings to get the API version header. + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + }, + + // Respond to the initial query to read the hashicorp org entitlements. + "/api/v2/organizations/hashicorp/entitlement-set": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-GExadygjSbKP8hsY", + "type": "entitlement-sets", + "attributes": { + "operations": true, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }, + + // Respond to the initial query to read the no-operations org entitlements. + "/api/v2/organizations/no-operations/entitlement-set": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-ufxa3y8jSbKP8hsT", + "type": "entitlement-sets", + "attributes": { + "operations": false, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }, + + // All tests that are assumed to pass will use the hashicorp organization, + // so for all other organization requests we will return a 404. + "/api/v2/organizations/": func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + io.WriteString(w, `{ + "errors": [ + { + "status": "404", + "title": "not found" + } + ] +}`) + }, +} + +func mockColorize() *colorstring.Colorize { + colors := make(map[string]string) + for k, v := range colorstring.DefaultColors { + colors[k] = v + } + colors["purple"] = "38;5;57" + + return &colorstring.Colorize{ + Colors: colors, + Disable: false, + Reset: true, + } +} + +func mockSROWorkspace(t *testing.T, b *Cloud, workspaceName string) { + _, err := b.client.Workspaces.Update(context.Background(), "hashicorp", workspaceName, tfe.WorkspaceUpdateOptions{ + StructuredRunOutputEnabled: tfe.Bool(true), + TerraformVersion: tfe.String("1.4.0"), + }) + if err != nil { + t.Fatalf("Error enabling SRO on workspace %s: %v", workspaceName, err) + } +} + +// testDisco returns a *disco.Disco mapping app.terraform.io and +// localhost to a local test server. +func testDisco(s *httptest.Server) *disco.Disco { + services := map[string]interface{}{ + "tfe.v2": fmt.Sprintf("%s/api/v2/", s.URL), + } + d := disco.NewWithCredentialsSource(credsSrc) + d.SetUserAgent(httpclient.TerraformUserAgent(version.String())) + + d.ForceHostServices(svchost.Hostname(defaultHostname), services) + d.ForceHostServices(svchost.Hostname("localhost"), services) + d.ForceHostServices(svchost.Hostname("nontfe.local"), nil) + return d +} + +type unparsedVariableValue struct { + value string + source terraform.ValueSourceType +} + +func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + return &terraform.InputValue{ + Value: cty.StringVal(v.value), + SourceType: v.source, + }, tfdiags.Diagnostics{} +} + +// testVariable returns a backend.UnparsedVariableValue used for testing. +func testVariables(s terraform.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue { + vars := make(map[string]backend.UnparsedVariableValue, len(vs)) + for _, v := range vs { + vars[v] = &unparsedVariableValue{ + value: v, + source: s, + } + } + return vars +} diff --git a/internal/cloud/tfe_client_mock.go b/cloud/tfe_client_mock.go similarity index 99% rename from internal/cloud/tfe_client_mock.go rename to cloud/tfe_client_mock.go index 090343fd0b20..5f579ff0e23c 100644 --- a/internal/cloud/tfe_client_mock.go +++ b/cloud/tfe_client_mock.go @@ -19,7 +19,7 @@ import ( tfe "github.com/hashicorp/go-tfe" "github.com/mitchellh/copystructure" - "github.com/hashicorp/terraform/internal/command/jsonformat" + "github.com/hashicorp/terraform/command/jsonformat" tfversion "github.com/hashicorp/terraform/version" ) diff --git a/command/apply.go b/command/apply.go new file mode 100644 index 000000000000..2fbc496cc87b --- /dev/null +++ b/command/apply.go @@ -0,0 +1,393 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/tfdiags" +) + +// ApplyCommand is a Command implementation that applies a Terraform +// configuration and actually builds or changes infrastructure. +type ApplyCommand struct { + Meta + + // If true, then this apply command will become the "destroy" + // command. It is just like apply but only processes a destroy. + Destroy bool +} + +func (c *ApplyCommand) Run(rawArgs []string) int { + var diags tfdiags.Diagnostics + + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Propagate -no-color for legacy use of Ui. The remote backend and + // cloud package use this; it should be removed when/if they are + // migrated to views. + c.Meta.color = !common.NoColor + c.Meta.Color = c.Meta.color + + // Parse and validate flags + var args *arguments.Apply + switch { + case c.Destroy: + args, diags = arguments.ParseApplyDestroy(rawArgs) + default: + args, diags = arguments.ParseApply(rawArgs) + } + + // Instantiate the view, even if there are flag errors, so that we render + // diagnostics according to the desired view + view := views.NewApply(args.ViewType, c.Destroy, c.View) + + if diags.HasErrors() { + view.Diagnostics(diags) + view.HelpPrompt() + return 1 + } + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + // Attempt to load the plan file, if specified + planFile, diags := c.LoadPlanFile(args.PlanPath) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Check for invalid combination of plan file and variable overrides + if planFile != nil && !args.Vars.Empty() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't set variables when applying a saved plan", + "The -var and -var-file options cannot be used when applying a saved plan file, because a saved plan includes the variable values that were set when it was created.", + )) + view.Diagnostics(diags) + return 1 + } + + // FIXME: the -input flag value is needed to initialize the backend and the + // operation, but there is no clear path to pass this value down, so we + // continue to mutate the Meta object state for now. + c.Meta.input = args.InputEnabled + + // FIXME: the -parallelism flag is used to control the concurrency of + // Terraform operations. At the moment, this value is used both to + // initialize the backend via the ContextOpts field inside CLIOpts, and to + // set a largely unused field on the Operation request. Again, there is no + // clear path to pass this value down, so we continue to mutate the Meta + // object state for now. + c.Meta.parallelism = args.Operation.Parallelism + + // Prepare the backend, passing the plan file if present, and the + // backend-specific arguments + be, beDiags := c.PrepareBackend(planFile, args.State, args.ViewType) + diags = diags.Append(beDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Build the operation request + opReq, opDiags := c.OperationRequest(be, view, args.ViewType, planFile, args.Operation, args.AutoApprove) + diags = diags.Append(opDiags) + + // Collect variable value and add them to the operation request + diags = diags.Append(c.GatherVariables(opReq, args.Vars)) + + // Before we delegate to the backend, we'll print any warning diagnostics + // we've accumulated here, since the backend will start fresh with its own + // diagnostics. + view.Diagnostics(diags) + if diags.HasErrors() { + return 1 + } + diags = nil + + // Run the operation + op, err := c.RunOperation(be, opReq) + if err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + if op.Result != backend.OperationSuccess { + return op.Result.ExitStatus() + } + + // Render the resource count and outputs, unless those counts are being + // rendered already in a remote Terraform process. + if rb, isRemoteBackend := be.(BackendWithRemoteTerraformVersion); !isRemoteBackend || rb.IsLocalOperations() { + view.ResourceCount(args.State.StateOutPath) + if !c.Destroy && op.State != nil { + view.Outputs(op.State.RootModule().OutputValues) + } + } + + view.Diagnostics(diags) + + if diags.HasErrors() { + return 1 + } + + return 0 +} + +func (c *ApplyCommand) LoadPlanFile(path string) (*planfile.Reader, tfdiags.Diagnostics) { + var planFile *planfile.Reader + var diags tfdiags.Diagnostics + + // Try to load plan if path is specified + if path != "" { + var err error + planFile, err = c.PlanFile(path) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Failed to load %q as a plan file", path), + fmt.Sprintf("Error: %s", err), + )) + return nil, diags + } + + // If the path doesn't look like a plan, both planFile and err will be + // nil. In that case, the user is probably trying to use the positional + // argument to specify a configuration path. Point them at -chdir. + if planFile == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Failed to load %q as a plan file", path), + "The specified path is a directory, not a plan file. You can use the global -chdir flag to use this directory as the configuration root.", + )) + return nil, diags + } + + // If we successfully loaded a plan but this is a destroy operation, + // explain that this is not supported. + if c.Destroy { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Destroy can't be called with a plan file", + fmt.Sprintf("If this plan was created using plan -destroy, apply it using:\n terraform apply %q", path), + )) + return nil, diags + } + } + + return planFile, diags +} + +func (c *ApplyCommand) PrepareBackend(planFile *planfile.Reader, args *arguments.State, viewType arguments.ViewType) (backend.Enhanced, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // FIXME: we need to apply the state arguments to the meta object here + // because they are later used when initializing the backend. Carving a + // path to pass these arguments to the functions that need them is + // difficult but would make their use easier to understand. + c.Meta.applyStateArguments(args) + + // Load the backend + var be backend.Enhanced + var beDiags tfdiags.Diagnostics + if planFile == nil { + backendConfig, configDiags := c.loadBackendConfig(".") + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags + } + + be, beDiags = c.Backend(&BackendOpts{ + Config: backendConfig, + ViewType: viewType, + }) + } else { + plan, err := planFile.ReadPlan() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read plan from plan file", + fmt.Sprintf("Cannot read the plan from the given plan file: %s.", err), + )) + return nil, diags + } + if plan.Backend.Config == nil { + // Should never happen; always indicates a bug in the creation of the plan file + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read plan from plan file", + "The given plan file does not have a valid backend configuration. This is a bug in the Terraform command that generated this plan file.", + )) + return nil, diags + } + be, beDiags = c.BackendForPlan(plan.Backend) + } + + diags = diags.Append(beDiags) + if beDiags.HasErrors() { + return nil, diags + } + return be, diags +} + +func (c *ApplyCommand) OperationRequest( + be backend.Enhanced, + view views.Apply, + viewType arguments.ViewType, + planFile *planfile.Reader, + args *arguments.Operation, + autoApprove bool, +) (*backend.Operation, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Applying changes with dev overrides in effect could make it impossible + // to switch back to a release version if the schema isn't compatible, + // so we'll warn about it. + diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) + + // Build the operation + opReq := c.Operation(be, viewType) + opReq.AutoApprove = autoApprove + opReq.ConfigDir = "." + opReq.PlanMode = args.PlanMode + opReq.Hooks = view.Hooks() + opReq.PlanFile = planFile + opReq.PlanRefresh = args.Refresh + opReq.Targets = args.Targets + opReq.ForceReplace = args.ForceReplace + opReq.Type = backend.OperationTypeApply + opReq.View = view.Operation() + + var err error + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %s", err)) + return nil, diags + } + + return opReq, diags +} + +func (c *ApplyCommand) GatherVariables(opReq *backend.Operation, args *arguments.Vars) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} + opReq.Variables, diags = c.collectVariableValues() + + return diags +} + +func (c *ApplyCommand) Help() string { + if c.Destroy { + return c.helpDestroy() + } + + return c.helpApply() +} + +func (c *ApplyCommand) Synopsis() string { + if c.Destroy { + return "Destroy previously-created infrastructure" + } + + return "Create or update infrastructure" +} + +func (c *ApplyCommand) helpApply() string { + helpText := ` +Usage: terraform [global options] apply [options] [PLAN] + + Creates or updates infrastructure according to Terraform configuration + files in the current directory. + + By default, Terraform will generate a new plan and present it for your + approval before taking any action. You can optionally provide a plan + file created by a previous call to "terraform plan", in which case + Terraform will take the actions described in that plan without any + confirmation prompt. + +Options: + + -auto-approve Skip interactive approval of plan before applying. + + -backup=path Path to backup the existing state file before + modifying. Defaults to the "-state-out" path with + ".backup" extension. Set to "-" to disable backup. + + -compact-warnings If Terraform produces any warnings that are not + accompanied by errors, show them in a more compact + form that includes only the summary messages. + + -destroy Destroy Terraform-managed infrastructure. + The command "terraform destroy" is a convenience alias + for this option. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -input=true Ask for input for variables if not directly set. + + -no-color If specified, output won't contain any color. + + -parallelism=n Limit the number of parallel resource operations. + Defaults to 10. + + -state=path Path to read and save state (unless state-out + is specified). Defaults to "terraform.tfstate". + + -state-out=path Path to write state to that is different than + "-state". This can be used to preserve the old + state. + + If you don't provide a saved plan file then this command will also accept + all of the plan-customization options accepted by the terraform plan command. + For more information on those options, run: + terraform plan -help +` + return strings.TrimSpace(helpText) +} + +func (c *ApplyCommand) helpDestroy() string { + helpText := ` +Usage: terraform [global options] destroy [options] + + Destroy Terraform-managed infrastructure. + + This command is a convenience alias for: + terraform apply -destroy + + This command also accepts many of the plan-customization options accepted by + the terraform plan command. For more information on those options, run: + terraform plan -help +` + return strings.TrimSpace(helpText) +} diff --git a/internal/command/apply_destroy_test.go b/command/apply_destroy_test.go similarity index 98% rename from internal/command/apply_destroy_test.go rename to command/apply_destroy_test.go index 27aaff901723..ee02130f9c65 100644 --- a/internal/command/apply_destroy_test.go +++ b/command/apply_destroy_test.go @@ -9,11 +9,11 @@ import ( "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" ) func TestApply_destroy(t *testing.T) { diff --git a/command/apply_test.go b/command/apply_test.go new file mode 100644 index 000000000000..7468f20ed105 --- /dev/null +++ b/command/apply_test.go @@ -0,0 +1,2232 @@ +package command + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestApply(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_path(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + testFixturePath("apply"), + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + if !strings.Contains(output.Stderr(), "-chdir") { + t.Fatal("expected command output to refer to -chdir flag, but got:", output.Stderr()) + } +} + +func TestApply_approveNo(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + defer testInputMap(t, map[string]string{ + "approve": "no", + })() + + // Do not use the NewMockUi initializer here, as we want to delay + // the call to init until after setting up the input mocks + ui := new(cli.MockUi) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + if got, want := output.Stdout(), "Apply cancelled"; !strings.Contains(got, want) { + t.Fatalf("expected output to include %q, but was:\n%s", want, got) + } + + if _, err := os.Stat(statePath); err == nil || !os.IsNotExist(err) { + t.Fatalf("state file should not exist") + } +} + +func TestApply_approveYes(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + defer testInputMap(t, map[string]string{ + "approve": "yes", + })() + + // Do not use the NewMockUi initializer here, as we want to delay + // the call to init until after setting up the input mocks + ui := new(cli.MockUi) + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +// test apply with locked state +func TestApply_lockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + defer unlock() + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatal("expected error") + } + + if !strings.Contains(output.Stderr(), "lock") { + t.Fatal("command output does not look like a lock error:", output.Stderr()) + } +} + +// test apply with locked state, waiting for unlock +func TestApply_lockedStateWait(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + + // unlock during apply + go func() { + time.Sleep(500 * time.Millisecond) + unlock() + }() + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // wait 4s just in case the lock process doesn't release in under a second, + // and we want our context to be alive for a second retry at the 3s mark. + args := []string{ + "-state", statePath, + "-lock-timeout", "4s", + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("lock should have succeeded in less than 3s: %s", output.Stderr()) + } +} + +// Verify that the parallelism flag allows no more than the desired number of +// concurrent calls to ApplyResourceChange. +func TestApply_parallelism(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("parallelism"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + par := 4 + + // started is a semaphore that we use to ensure that we never have more + // than "par" apply operations happening concurrently + started := make(chan struct{}, par) + + // beginCtx is used as a starting gate to hold back ApplyResourceChange + // calls until we reach the desired concurrency. The cancel func "begin" is + // called once we reach the desired concurrency, allowing all apply calls + // to proceed in unison. + beginCtx, begin := context.WithCancel(context.Background()) + + // Since our mock provider has its own mutex preventing concurrent calls + // to ApplyResourceChange, we need to use a number of separate providers + // here. They will all have the same mock implementation function assigned + // but crucially they will each have their own mutex. + providerFactories := map[addrs.Provider]providers.Factory{} + for i := 0; i < 10; i++ { + name := fmt.Sprintf("test%d", i) + provider := &terraform.MockProvider{} + provider.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + name + "_instance": {Block: &configschema.Block{}}, + }, + } + provider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + provider.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + + // If we ever have more than our intended parallelism number of + // apply operations running concurrently, the semaphore will fail. + select { + case started <- struct{}{}: + defer func() { + <-started + }() + default: + t.Fatal("too many concurrent apply operations") + } + + // If we never reach our intended parallelism, the context will + // never be canceled and the test will time out. + if len(started) >= par { + begin() + } + <-beginCtx.Done() + + // do some "work" + // Not required for correctness, but makes it easier to spot a + // failure when there is more overlap. + time.Sleep(10 * time.Millisecond) + + return providers.ApplyResourceChangeResponse{ + NewState: cty.EmptyObjectVal, + } + } + providerFactories[addrs.NewDefaultProvider(name)] = providers.FactoryFixed(provider) + } + testingOverrides := &testingOverrides{ + Providers: providerFactories, + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: testingOverrides, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + fmt.Sprintf("-parallelism=%d", par), + } + + res := c.Run(args) + output := done(t) + if res != 0 { + t.Fatal(output.Stdout()) + } +} + +func TestApply_configInvalid(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-config-invalid"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", testTempFile(t), + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stdout()) + } +} + +func TestApply_defaultState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := filepath.Join(td, DefaultStateFilename) + + // Change to the temporary directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(filepath.Dir(statePath)); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // create an existing state file + localState := statemgr.NewFilesystem(statePath) + if err := localState.WriteState(states.NewState()); err != nil { + t.Fatal(err) + } + + args := []string{ + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_error(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-error"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + var lock sync.Mutex + errored := false + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + lock.Lock() + defer lock.Unlock() + + if !errored { + errored = true + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) + } + + s := req.PlannedState.AsValueMap() + s["id"] = cty.StringVal("foo") + + resp.NewState = cty.ObjectVal(s) + return + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + s := req.ProposedNewState.AsValueMap() + s["id"] = cty.UnknownVal(cty.String) + resp.PlannedState = cty.ObjectVal(s) + return + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "error": {Type: cty.Bool, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("wrong exit code %d; want 1\n%s", code, output.Stdout()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + if len(state.RootModule().Resources) == 0 { + t.Fatal("no resources in state") + } +} + +func TestApply_input(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-input"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // The configuration for this test includes a declaration of variable + // "foo" with no default, and we don't set it on the command line below, + // so the apply command will produce an interactive prompt for the + // value of var.foo. We'll answer "foo" here, and we expect the output + // value "result" to echo that back to us below. + defaultInputReader = bytes.NewBufferString("foo\n") + defaultInputWriter = new(bytes.Buffer) + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + expected := strings.TrimSpace(` + +Outputs: + +result = foo + `) + testStateOutput(t, statePath, expected) +} + +// When only a partial set of the variables are set, Terraform +// should still ask for the unset ones by default (with -input=true) +func TestApply_inputPartial(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-input-partial"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // Set some default reader/writers for the inputs + defaultInputReader = bytes.NewBufferString("one\ntwo\n") + defaultInputWriter = new(bytes.Buffer) + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + "-var", "foo=foovalue", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + expected := strings.TrimSpace(` + +Outputs: + +bar = one +foo = foovalue + `) + testStateOutput(t, statePath, expected) +} + +func TestApply_noArgs(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_plan(t *testing.T) { + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // Set some default reader/writers for the inputs + defaultInputReader = new(bytes.Buffer) + defaultInputWriter = new(bytes.Buffer) + + planPath := applyFixturePlanFile(t) + statePath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state-out", statePath, + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_plan_backup(t *testing.T) { + statePath := testTempFile(t) + backupPath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // create a state file that needs to be backed up + fs := statemgr.NewFilesystem(statePath) + fs.StateSnapshotMeta() + err := fs.WriteState(states.NewState()) + if err != nil { + t.Fatal(err) + } + + // the plan file must contain the metadata from the prior state to be + // backed up + planPath := applyFixturePlanFileMatchState(t, fs.StateSnapshotMeta()) + + args := []string{ + "-state", statePath, + "-backup", backupPath, + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Should have a backup file + testStateRead(t, backupPath) +} + +func TestApply_plan_noBackup(t *testing.T) { + planPath := applyFixturePlanFile(t) + statePath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state-out", statePath, + "-backup", "-", + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Ensure there is no backup + _, err := os.Stat(statePath + DefaultBackupExtension) + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } + + // Ensure there is no literal "-" + _, err = os.Stat("-") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } +} + +func TestApply_plan_remoteState(t *testing.T) { + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + tmp := testCwd(t) + remoteStatePath := filepath.Join(tmp, DefaultDataDir, DefaultStateFilename) + if err := os.MkdirAll(filepath.Dir(remoteStatePath), 0755); err != nil { + t.Fatalf("err: %s", err) + } + + // Set some default reader/writers for the inputs + defaultInputReader = new(bytes.Buffer) + defaultInputWriter = new(bytes.Buffer) + + // Create a remote state + state := testState() + _, srv := testRemoteState(t, state, 200) + defer srv.Close() + + _, snap := testModuleWithSnapshot(t, "apply") + backendConfig := cty.ObjectVal(map[string]cty.Value{ + "address": cty.StringVal(srv.URL), + "update_method": cty.NullVal(cty.String), + "lock_address": cty.NullVal(cty.String), + "unlock_address": cty.NullVal(cty.String), + "lock_method": cty.NullVal(cty.String), + "unlock_method": cty.NullVal(cty.String), + "username": cty.NullVal(cty.String), + "password": cty.NullVal(cty.String), + "skip_cert_verification": cty.NullVal(cty.Bool), + "retry_max": cty.NullVal(cty.String), + "retry_wait_min": cty.NullVal(cty.String), + "retry_wait_max": cty.NullVal(cty.String), + "client_ca_certificate_pem": cty.NullVal(cty.String), + "client_certificate_pem": cty.NullVal(cty.String), + "client_private_key_pem": cty.NullVal(cty.String), + }) + backendConfigRaw, err := plans.NewDynamicValue(backendConfig, backendConfig.Type()) + if err != nil { + t.Fatal(err) + } + planPath := testPlanFile(t, snap, state, &plans.Plan{ + Backend: plans.Backend{ + Type: "http", + Config: backendConfigRaw, + }, + Changes: plans.NewChanges(), + }) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // State file should be not be installed + if _, err := os.Stat(filepath.Join(tmp, DefaultStateFilename)); err == nil { + data, _ := ioutil.ReadFile(DefaultStateFilename) + t.Fatalf("State path should not exist: %s", string(data)) + } + + // Check that there is no remote state config + if src, err := ioutil.ReadFile(remoteStatePath); err == nil { + t.Fatalf("has %s file; should not\n%s", remoteStatePath, src) + } +} + +func TestApply_planWithVarFile(t *testing.T) { + varFileDir := testTempDir(t) + varFilePath := filepath.Join(varFileDir, "terraform.tfvars") + if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + planPath := applyFixturePlanFile(t) + statePath := testTempFile(t) + + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(varFileDir); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state-out", statePath, + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_planVars(t *testing.T) { + planPath := applyFixturePlanFile(t) + statePath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-var", "foo=bar", + planPath, + } + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatal("should've failed: ", output.Stdout()) + } +} + +// we should be able to apply a plan file with no other file dependencies +func TestApply_planNoModuleFiles(t *testing.T) { + // temporary data directory which we can remove between commands + td := testTempDir(t) + defer os.RemoveAll(td) + + defer testChdir(t, td)() + + p := applyFixtureProvider() + planPath := applyFixturePlanFile(t) + view, done := testView(t) + apply := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: new(cli.MockUi), + View: view, + }, + } + args := []string{ + planPath, + } + apply.Run(args) + done(t) +} + +func TestApply_refresh(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"ami":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("should call ReadResource") + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + // Should have a backup file + backupState := testStateRead(t, statePath+DefaultBackupExtension) + + actualStr := strings.TrimSpace(backupState.String()) + expectedStr := strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestApply_refreshFalse(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"ami":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + "-refresh=false", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if p.ReadResourceCalled { + t.Fatal("should not call ReadResource when refresh=false") + } +} +func TestApply_shutdown(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-shutdown"), td) + defer testChdir(t, td)() + + cancelled := make(chan struct{}) + shutdownCh := make(chan struct{}) + + statePath := testTempFile(t) + p := testProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + ShutdownCh: shutdownCh, + }, + } + + p.StopFn = func() error { + close(cancelled) + return nil + } + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return + } + + var once sync.Once + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + // only cancel once + once.Do(func() { + shutdownCh <- struct{}{} + }) + + // Because of the internal lock in the MockProvider, we can't + // coordiante directly with the calling of Stop, and making the + // MockProvider concurrent is disruptive to a lot of existing tests. + // Wait here a moment to help make sure the main goroutine gets to the + // Stop call before we exit, or the plan may finish before it can be + // canceled. + time.Sleep(200 * time.Millisecond) + + resp.NewState = req.PlannedState + return + } + + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + select { + case <-cancelled: + default: + t.Fatal("command not cancelled") + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_state(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"ami":"foo"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "ami": cty.StringVal("bar"), + }), + } + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "ami": cty.StringVal("bar"), + }), + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.StringVal("foo"), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state during plan\ngot: %#v\nwant: %#v", actual, expected) + } + + actual = p.ApplyResourceChangeRequest.PriorState + expected = cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.StringVal("foo"), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state during apply\ngot: %#v\nwant: %#v", actual, expected) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + backupState := testStateRead(t, statePath+DefaultBackupExtension) + + actualStr := strings.TrimSpace(backupState.String()) + expectedStr := strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestApply_stateNoExist(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "idontexist.tfstate", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stdout()) + } +} + +func TestApply_sensitiveOutput(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-sensitive-output"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + statePath := testTempFile(t) + + args := []string{ + "-state", statePath, + "-auto-approve", + } + + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stdout()) + } + + stdout := output.Stdout() + if !strings.Contains(stdout, "notsensitive = \"Hello world\"") { + t.Fatalf("bad: output should contain 'notsensitive' output\n%s", stdout) + } + if !strings.Contains(stdout, "sensitive = ") { + t.Fatalf("bad: output should contain 'sensitive' output\n%s", stdout) + } +} + +func TestApply_vars(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + actual = req.ProposedNewState.GetAttr("value").AsString() + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + args := []string{ + "-auto-approve", + "-var", "foo=bar", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestApply_varFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + varFilePath := testTempFile(t) + if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + actual = req.ProposedNewState.GetAttr("value").AsString() + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + args := []string{ + "-auto-approve", + "-var-file", varFilePath, + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestApply_varFileDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars") + if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + actual = req.ProposedNewState.GetAttr("value").AsString() + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + args := []string{ + "-auto-approve", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestApply_varFileDefaultJSON(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars.json") + if err := ioutil.WriteFile(varFilePath, []byte(applyVarFileJSON), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + actual = req.ProposedNewState.GetAttr("value").AsString() + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + args := []string{ + "-auto-approve", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestApply_backup(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte("{\n \"id\": \"bar\"\n }"), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + backupPath := testTempFile(t) + + p := applyFixtureProvider() + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "ami": cty.StringVal("bar"), + }), + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-state", statePath, + "-backup", backupPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + backupState := testStateRead(t, backupPath) + + actual := backupState.RootModule().Resources["test_instance.foo"] + expected := originalState.RootModule().Resources["test_instance.foo"] + if !cmp.Equal(actual, expected, cmpopts.EquateEmpty()) { + t.Fatalf( + "wrong aws_instance.foo state\n%s", + cmp.Diff(expected, actual, cmp.Transformer("bytesAsString", func(b []byte) string { + return string(b) + })), + ) + } +} + +func TestApply_disableBackup(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := testState() + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "ami": cty.StringVal("bar"), + }), + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-state", statePath, + "-backup", "-", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.NullVal(cty.String), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state during plan\ngot: %#v\nwant: %#v", actual, expected) + } + + actual = p.ApplyResourceChangeRequest.PriorState + expected = cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.NullVal(cty.String), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state during apply\ngot: %#v\nwant: %#v", actual, expected) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + // Ensure there is no backup + _, err := os.Stat(statePath + DefaultBackupExtension) + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } + + // Ensure there is no literal "-" + _, err = os.Stat("-") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } +} + +// Test that the Terraform env is passed through +func TestApply_terraformEnv(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-terraform-env"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + expected := strings.TrimSpace(` + +Outputs: + +output = default + `) + testStateOutput(t, statePath, expected) +} + +// Test that the Terraform env is passed through +func TestApply_terraformEnvNonDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-terraform-env"), td) + defer testChdir(t, td)() + + // Create new env + { + ui := new(cli.MockUi) + newCmd := &WorkspaceNewCommand{ + Meta: Meta{ + Ui: ui, + }, + } + if code := newCmd.Run([]string{"test"}); code != 0 { + t.Fatal("error creating workspace") + } + } + + // Switch to it + { + args := []string{"test"} + ui := new(cli.MockUi) + selCmd := &WorkspaceSelectCommand{ + Meta: Meta{ + Ui: ui, + }, + } + if code := selCmd.Run(args); code != 0 { + t.Fatal("error switching workspace") + } + } + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + statePath := filepath.Join("terraform.tfstate.d", "test", "terraform.tfstate") + expected := strings.TrimSpace(` + +Outputs: + +output = test + `) + testStateOutput(t, statePath, expected) +} + +// Config with multiple resources, targeting apply of a subset +func TestApply_targeted(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-targeted"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-target", "test_instance.foo", + "-target", "test_instance.baz", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if got, want := output.Stdout(), "3 added, 0 changed, 0 destroyed"; !strings.Contains(got, want) { + t.Fatalf("bad change summary, want %q, got:\n%s", want, got) + } +} + +// Diagnostics for invalid -target flags +func TestApply_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } + + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-target", target, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + got := output.Stderr() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) + } +} + +func TestApply_replace(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-replace"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"hello"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + createCount := 0 + deleteCount := 0 + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + if req.PriorState.IsNull() { + createCount++ + } + if req.PlannedState.IsNull() { + deleteCount++ + } + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-state", statePath, + "-replace", "test_instance.a", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("wrong exit code %d\n\n%s", code, output.Stderr()) + } + + if got, want := output.Stdout(), "1 added, 0 changed, 1 destroyed"; !strings.Contains(got, want) { + t.Errorf("wrong change summary\ngot output:\n%s\n\nwant substring: %s", got, want) + } + + if got, want := createCount, 1; got != want { + t.Errorf("wrong create count %d; want %d", got, want) + } + if got, want := deleteCount, 1; got != want { + t.Errorf("wrong create count %d; want %d", got, want) + } +} + +func TestApply_pluginPath(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + pluginPath := []string{"a", "b", "c"} + + if err := c.Meta.storePluginPath(pluginPath); err != nil { + t.Fatal(err) + } + c.Meta.pluginPath = nil + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !reflect.DeepEqual(pluginPath, c.Meta.pluginPath) { + t.Fatalf("expected plugin path %#v, got %#v", pluginPath, c.Meta.pluginPath) + } +} + +func TestApply_jsonGoldenReference(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-json", + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + checkGoldenReference(t, output, "apply") +} + +func TestApply_warnings(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = applyFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + Diagnostics: tfdiags.Diagnostics{ + tfdiags.SimpleWarning("warning 1"), + tfdiags.SimpleWarning("warning 2"), + }, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: cty.UnknownAsNull(req.PlannedState), + } + } + + t.Run("full warnings", func(t *testing.T) { + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{"-auto-approve"} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + wantWarnings := []string{ + "warning 1", + "warning 2", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) + + t.Run("compact warnings", func(t *testing.T) { + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run([]string{"-auto-approve", "-compact-warnings"}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + // the output should contain 2 warnings and a message about -compact-warnings + wantWarnings := []string{ + "warning 1", + "warning 2", + "To see the full warning notes, run Terraform without -compact-warnings.", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) +} + +// applyFixtureSchema returns a schema suitable for processing the +// configuration in testdata/apply . This schema should be +// assigned to a mock provider named "test". +func applyFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +// applyFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/apply. This mock has +// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, +// with the plan/apply steps just passing through the data determined by +// Terraform Core. +func applyFixtureProvider() *terraform.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = applyFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: cty.UnknownAsNull(req.PlannedState), + } + } + return p +} + +// applyFixturePlanFile creates a plan file at a temporary location containing +// a single change to create the test_instance.foo that is included in the +// "apply" test fixture, returning the location of that plan file. +func applyFixturePlanFile(t *testing.T) string { + return applyFixturePlanFileMatchState(t, statemgr.SnapshotMeta{}) +} + +// applyFixturePlanFileMatchState creates a planfile like applyFixturePlanFile, +// but inserts the state meta information if that plan must match a preexisting +// state. +func applyFixturePlanFileMatchState(t *testing.T, stateMeta statemgr.SnapshotMeta) string { + _, snap := testModuleWithSnapshot(t, "apply") + plannedVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("bar"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plan := testPlan(t) + plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: priorValRaw, + After: plannedValRaw, + }, + }) + return testPlanFileMatchState( + t, + snap, + states.NewState(), + plan, + stateMeta, + ) +} + +const applyVarFile = ` +foo = "bar" +` + +const applyVarFileJSON = ` +{ "foo": "bar" } +` diff --git a/command/arguments/apply.go b/command/arguments/apply.go new file mode 100644 index 000000000000..267c8e5397d3 --- /dev/null +++ b/command/arguments/apply.go @@ -0,0 +1,147 @@ +package arguments + +import ( + "fmt" + + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" +) + +// Apply represents the command-line arguments for the apply command. +type Apply struct { + // State, Operation, and Vars are the common extended flags + State *State + Operation *Operation + Vars *Vars + + // AutoApprove skips the manual verification step for the apply operation. + AutoApprove bool + + // InputEnabled is used to disable interactive input for unspecified + // variable and backend config values. Default is true. + InputEnabled bool + + // PlanPath contains an optional path to a stored plan file + PlanPath string + + // ViewType specifies which output format to use + ViewType ViewType +} + +// ParseApply processes CLI arguments, returning an Apply value and errors. +// If errors are encountered, an Apply value is still returned representing +// the best effort interpretation of the arguments. +func ParseApply(args []string) (*Apply, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + apply := &Apply{ + State: &State{}, + Operation: &Operation{}, + Vars: &Vars{}, + } + + cmdFlags := extendedFlagSet("apply", apply.State, apply.Operation, apply.Vars) + cmdFlags.BoolVar(&apply.AutoApprove, "auto-approve", false, "auto-approve") + cmdFlags.BoolVar(&apply.InputEnabled, "input", true, "input") + + var json bool + cmdFlags.BoolVar(&json, "json", false, "json") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 0 { + apply.PlanPath = args[0] + args = args[1:] + } + + if len(args) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + )) + } + + // JSON view currently does not support input, so we disable it here. + if json { + apply.InputEnabled = false + } + + // JSON view cannot confirm apply, so we require either a plan file or + // auto-approve to be specified. We intentionally fail here rather than + // override auto-approve, which would be dangerous. + if json && apply.PlanPath == "" && !apply.AutoApprove { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Plan file or auto-approve required", + "Terraform cannot ask for interactive approval when -json is set. You can either apply a saved plan file, or enable the -auto-approve option.", + )) + } + + diags = diags.Append(apply.Operation.Parse()) + + switch { + case json: + apply.ViewType = ViewJSON + default: + apply.ViewType = ViewHuman + } + + return apply, diags +} + +// ParseApplyDestroy is a special case of ParseApply that deals with the +// "terraform destroy" command, which is effectively an alias for +// "terraform apply -destroy". +func ParseApplyDestroy(args []string) (*Apply, tfdiags.Diagnostics) { + apply, diags := ParseApply(args) + + // So far ParseApply was using the command line options like -destroy + // and -refresh-only to determine the plan mode. For "terraform destroy" + // we expect neither of those arguments to be set, and so the plan mode + // should currently be set to NormalMode, which we'll replace with + // DestroyMode here. If it's already set to something else then that + // suggests incorrect usage. + switch apply.Operation.PlanMode { + case plans.NormalMode: + // This indicates that the user didn't specify any mode options at + // all, which is correct, although we know from the command that + // they actually intended to use DestroyMode here. + apply.Operation.PlanMode = plans.DestroyMode + case plans.DestroyMode: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid mode option", + "The -destroy option is not valid for \"terraform destroy\", because this command always runs in destroy mode.", + )) + case plans.RefreshOnlyMode: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid mode option", + "The -refresh-only option is not valid for \"terraform destroy\".", + )) + default: + // This is a non-ideal error message for if we forget to handle a + // newly-handled plan mode in Operation.Parse. Ideally they should all + // have cases above so we can produce better error messages. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid mode option", + fmt.Sprintf("The \"terraform destroy\" command doesn't support %s.", apply.Operation.PlanMode), + )) + } + + // NOTE: It's also invalid to have apply.PlanPath set in this codepath, + // but we don't check that in here because we'll return a different error + // message depending on whether the given path seems to refer to a saved + // plan file or to a configuration directory. The apply command + // implementation itself therefore handles this situation. + + return apply, diags +} diff --git a/command/arguments/apply_test.go b/command/arguments/apply_test.go new file mode 100644 index 000000000000..eb23919c6f04 --- /dev/null +++ b/command/arguments/apply_test.go @@ -0,0 +1,389 @@ +package arguments + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" +) + +func TestParseApply_basicValid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Apply + }{ + "defaults": { + nil, + &Apply{ + AutoApprove: false, + InputEnabled: true, + PlanPath: "", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "auto-approve, disabled input, and plan path": { + []string{"-auto-approve", "-input=false", "saved.tfplan"}, + &Apply{ + AutoApprove: true, + InputEnabled: false, + PlanPath: "saved.tfplan", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "destroy mode": { + []string{"-destroy"}, + &Apply{ + AutoApprove: false, + InputEnabled: true, + PlanPath: "", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.DestroyMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "JSON view disables input": { + []string{"-json", "-auto-approve"}, + &Apply{ + AutoApprove: true, + InputEnabled: false, + PlanPath: "", + ViewType: ViewJSON, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + } + + cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { + t.Errorf("unexpected result\n%s", diff) + } + }) + } +} + +func TestParseApply_json(t *testing.T) { + testCases := map[string]struct { + args []string + wantSuccess bool + }{ + "-json": { + []string{"-json"}, + false, + }, + "-json -auto-approve": { + []string{"-json", "-auto-approve"}, + true, + }, + "-json saved.tfplan": { + []string{"-json", "saved.tfplan"}, + true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + + if tc.wantSuccess { + if len(diags) > 0 { + t.Errorf("unexpected diags: %v", diags) + } + } else { + if got, want := diags.Err().Error(), "Plan file or auto-approve required"; !strings.Contains(got, want) { + t.Errorf("wrong diags\n got: %s\nwant: %s", got, want) + } + } + + if got.ViewType != ViewJSON { + t.Errorf("unexpected view type. got: %#v, want: %#v", got.ViewType, ViewJSON) + } + }) + } +} + +func TestParseApply_invalid(t *testing.T) { + got, diags := ParseApply([]string{"-frob"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParseApply_tooManyArguments(t *testing.T) { + got, diags := ParseApply([]string{"saved.tfplan", "please"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParseApply_targets(t *testing.T) { + foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") + boop, _ := addrs.ParseTargetStr("module.boop") + testCases := map[string]struct { + args []string + want []addrs.Targetable + wantErr string + }{ + "no targets by default": { + args: nil, + want: nil, + }, + "one target": { + args: []string{"-target=foo_bar.baz"}, + want: []addrs.Targetable{foobarbaz.Subject}, + }, + "two targets": { + args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, + want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, + }, + "invalid traversal": { + args: []string{"-target=foo."}, + want: nil, + wantErr: "Dot must be followed by attribute name", + }, + "invalid target": { + args: []string{"-target=data[0].foo"}, + want: nil, + wantErr: "A data source name is required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + if len(diags) > 0 { + if tc.wantErr == "" { + t.Fatalf("unexpected diags: %v", diags) + } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) + } + } + if !cmp.Equal(got.Operation.Targets, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) + } + }) + } +} + +func TestParseApply_replace(t *testing.T) { + foobarbaz, _ := addrs.ParseAbsResourceInstanceStr("foo_bar.baz") + foobarbeep, _ := addrs.ParseAbsResourceInstanceStr("foo_bar.beep") + testCases := map[string]struct { + args []string + want []addrs.AbsResourceInstance + wantErr string + }{ + "no addresses by default": { + args: nil, + want: nil, + }, + "one address": { + args: []string{"-replace=foo_bar.baz"}, + want: []addrs.AbsResourceInstance{foobarbaz}, + }, + "two addresses": { + args: []string{"-replace=foo_bar.baz", "-replace", "foo_bar.beep"}, + want: []addrs.AbsResourceInstance{foobarbaz, foobarbeep}, + }, + "non-resource-instance address": { + args: []string{"-replace=module.boop"}, + want: nil, + wantErr: "A resource instance address is required here.", + }, + "data resource address": { + args: []string{"-replace=data.foo.bar"}, + want: nil, + wantErr: "Only managed resources can be used", + }, + "invalid traversal": { + args: []string{"-replace=foo."}, + want: nil, + wantErr: "Dot must be followed by attribute name", + }, + "invalid address": { + args: []string{"-replace=data[0].foo"}, + want: nil, + wantErr: "A data source name is required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + if len(diags) > 0 { + if tc.wantErr == "" { + t.Fatalf("unexpected diags: %v", diags) + } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) + } + } + if !cmp.Equal(got.Operation.ForceReplace, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) + } + }) + } +} + +func TestParseApply_vars(t *testing.T) { + testCases := map[string]struct { + args []string + want []FlagNameValue + }{ + "no var flags by default": { + args: nil, + want: nil, + }, + "one var": { + args: []string{"-var", "foo=bar"}, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + }, + }, + "one var-file": { + args: []string{"-var-file", "cool.tfvars"}, + want: []FlagNameValue{ + {Name: "-var-file", Value: "cool.tfvars"}, + }, + }, + "ordering preserved": { + args: []string{ + "-var", "foo=bar", + "-var-file", "cool.tfvars", + "-var", "boop=beep", + }, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + {Name: "-var-file", Value: "cool.tfvars"}, + {Name: "-var", Value: "boop=beep"}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) + } + if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { + t.Fatalf("expected Empty() to return %t, but was %t", want, got) + } + }) + } +} + +func TestParseApplyDestroy_basicValid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Apply + }{ + "defaults": { + nil, + &Apply{ + AutoApprove: false, + InputEnabled: true, + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.DestroyMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "auto-approve and disabled input": { + []string{"-auto-approve", "-input=false"}, + &Apply{ + AutoApprove: true, + InputEnabled: false, + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.DestroyMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + } + + cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApplyDestroy(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { + t.Errorf("unexpected result\n%s", diff) + } + }) + } +} + +func TestParseApplyDestroy_invalid(t *testing.T) { + t.Run("explicit destroy mode", func(t *testing.T) { + got, diags := ParseApplyDestroy([]string{"-destroy"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "Invalid mode option:"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } + }) +} diff --git a/internal/command/arguments/default.go b/command/arguments/default.go similarity index 100% rename from internal/command/arguments/default.go rename to command/arguments/default.go diff --git a/internal/command/arguments/extended.go b/command/arguments/extended.go similarity index 98% rename from internal/command/arguments/extended.go rename to command/arguments/extended.go index e698182db942..c4b98336ebd4 100644 --- a/internal/command/arguments/extended.go +++ b/command/arguments/extended.go @@ -7,9 +7,9 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" ) // DefaultParallelism is the limit Terraform places on total parallel diff --git a/internal/command/arguments/flags.go b/command/arguments/flags.go similarity index 100% rename from internal/command/arguments/flags.go rename to command/arguments/flags.go diff --git a/command/arguments/output.go b/command/arguments/output.go new file mode 100644 index 000000000000..f77c283ccc91 --- /dev/null +++ b/command/arguments/output.go @@ -0,0 +1,88 @@ +package arguments + +import ( + "github.com/hashicorp/terraform/tfdiags" +) + +// Output represents the command-line arguments for the output command. +type Output struct { + // Name identifies which root module output to show. If empty, show all + // outputs. + Name string + + // StatePath is an optional path to a state file, from which outputs will + // be loaded. + StatePath string + + // ViewType specifies which output format to use: human, JSON, or "raw". + ViewType ViewType +} + +// ParseOutput processes CLI arguments, returning an Output value and errors. +// If errors are encountered, an Output value is still returned representing +// the best effort interpretation of the arguments. +func ParseOutput(args []string) (*Output, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + output := &Output{} + + var jsonOutput, rawOutput bool + var statePath string + cmdFlags := defaultFlagSet("output") + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + cmdFlags.BoolVar(&rawOutput, "raw", false, "raw") + cmdFlags.StringVar(&statePath, "state", "", "path") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unexpected argument", + "The output command expects exactly one argument with the name of an output variable or no arguments to show all outputs.", + )) + } + + if jsonOutput && rawOutput { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output format", + "The -raw and -json options are mutually-exclusive.", + )) + + // Since the desired output format is unknowable, fall back to default + jsonOutput = false + rawOutput = false + } + + output.StatePath = statePath + + if len(args) > 0 { + output.Name = args[0] + } + + if rawOutput && output.Name == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Output name required", + "You must give the name of a single output value when using the -raw option.", + )) + } + + switch { + case jsonOutput: + output.ViewType = ViewJSON + case rawOutput: + output.ViewType = ViewRaw + default: + output.ViewType = ViewHuman + } + + return output, diags +} diff --git a/command/arguments/output_test.go b/command/arguments/output_test.go new file mode 100644 index 000000000000..304a156bff55 --- /dev/null +++ b/command/arguments/output_test.go @@ -0,0 +1,142 @@ +package arguments + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestParseOutput_valid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Output + }{ + "defaults": { + nil, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + }, + "json": { + []string{"-json"}, + &Output{ + Name: "", + ViewType: ViewJSON, + StatePath: "", + }, + }, + "raw": { + []string{"-raw", "foo"}, + &Output{ + Name: "foo", + ViewType: ViewRaw, + StatePath: "", + }, + }, + "state": { + []string{"-state=foobar.tfstate", "-raw", "foo"}, + &Output{ + Name: "foo", + ViewType: ViewRaw, + StatePath: "foobar.tfstate", + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseOutput(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseOutput_invalid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Output + wantDiags tfdiags.Diagnostics + }{ + "unknown flag": { + []string{"-boop"}, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + "flag provided but not defined: -boop", + ), + }, + }, + "json and raw specified": { + []string{"-json", "-raw"}, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Invalid output format", + "The -raw and -json options are mutually-exclusive.", + ), + }, + }, + "raw with no name": { + []string{"-raw"}, + &Output{ + Name: "", + ViewType: ViewRaw, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Output name required", + "You must give the name of a single output value when using the -raw option.", + ), + }, + }, + "too many arguments": { + []string{"-raw", "-state=foo.tfstate", "bar", "baz"}, + &Output{ + Name: "bar", + ViewType: ViewRaw, + StatePath: "foo.tfstate", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Unexpected argument", + "The output command expects exactly one argument with the name of an output variable or no arguments to show all outputs.", + ), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotDiags := ParseOutput(tc.args) + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !reflect.DeepEqual(gotDiags, tc.wantDiags) { + t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) + } + }) + } +} diff --git a/command/arguments/plan.go b/command/arguments/plan.go new file mode 100644 index 000000000000..0a93759440bb --- /dev/null +++ b/command/arguments/plan.go @@ -0,0 +1,81 @@ +package arguments + +import ( + "github.com/hashicorp/terraform/tfdiags" +) + +// Plan represents the command-line arguments for the plan command. +type Plan struct { + // State, Operation, and Vars are the common extended flags + State *State + Operation *Operation + Vars *Vars + + // DetailedExitCode enables different exit codes for error, success with + // changes, and success with no changes. + DetailedExitCode bool + + // InputEnabled is used to disable interactive input for unspecified + // variable and backend config values. Default is true. + InputEnabled bool + + // OutPath contains an optional path to store the plan file + OutPath string + + // ViewType specifies which output format to use + ViewType ViewType +} + +// ParsePlan processes CLI arguments, returning a Plan value and errors. +// If errors are encountered, a Plan value is still returned representing +// the best effort interpretation of the arguments. +func ParsePlan(args []string) (*Plan, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + plan := &Plan{ + State: &State{}, + Operation: &Operation{}, + Vars: &Vars{}, + } + + cmdFlags := extendedFlagSet("plan", plan.State, plan.Operation, plan.Vars) + cmdFlags.BoolVar(&plan.DetailedExitCode, "detailed-exitcode", false, "detailed-exitcode") + cmdFlags.BoolVar(&plan.InputEnabled, "input", true, "input") + cmdFlags.StringVar(&plan.OutPath, "out", "", "out") + + var json bool + cmdFlags.BoolVar(&json, "json", false, "json") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + + if len(args) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "To specify a working directory for the plan, use the global -chdir flag.", + )) + } + + diags = diags.Append(plan.Operation.Parse()) + + // JSON view currently does not support input, so we disable it here + if json { + plan.InputEnabled = false + } + + switch { + case json: + plan.ViewType = ViewJSON + default: + plan.ViewType = ViewHuman + } + + return plan, diags +} diff --git a/command/arguments/plan_test.go b/command/arguments/plan_test.go new file mode 100644 index 000000000000..14c72e9fb3e3 --- /dev/null +++ b/command/arguments/plan_test.go @@ -0,0 +1,207 @@ +package arguments + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" +) + +func TestParsePlan_basicValid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Plan + }{ + "defaults": { + nil, + &Plan{ + DetailedExitCode: false, + InputEnabled: true, + OutPath: "", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "setting all options": { + []string{"-destroy", "-detailed-exitcode", "-input=false", "-out=saved.tfplan"}, + &Plan{ + DetailedExitCode: true, + InputEnabled: false, + OutPath: "saved.tfplan", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.DestroyMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "JSON view disables input": { + []string{"-json"}, + &Plan{ + DetailedExitCode: false, + InputEnabled: false, + OutPath: "", + ViewType: ViewJSON, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + } + + cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParsePlan(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { + t.Errorf("unexpected result\n%s", diff) + } + }) + } +} + +func TestParsePlan_invalid(t *testing.T) { + got, diags := ParsePlan([]string{"-frob"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParsePlan_tooManyArguments(t *testing.T) { + got, diags := ParsePlan([]string{"saved.tfplan"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParsePlan_targets(t *testing.T) { + foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") + boop, _ := addrs.ParseTargetStr("module.boop") + testCases := map[string]struct { + args []string + want []addrs.Targetable + wantErr string + }{ + "no targets by default": { + args: nil, + want: nil, + }, + "one target": { + args: []string{"-target=foo_bar.baz"}, + want: []addrs.Targetable{foobarbaz.Subject}, + }, + "two targets": { + args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, + want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, + }, + "invalid traversal": { + args: []string{"-target=foo."}, + want: nil, + wantErr: "Dot must be followed by attribute name", + }, + "invalid target": { + args: []string{"-target=data[0].foo"}, + want: nil, + wantErr: "A data source name is required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParsePlan(tc.args) + if len(diags) > 0 { + if tc.wantErr == "" { + t.Fatalf("unexpected diags: %v", diags) + } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) + } + } + if !cmp.Equal(got.Operation.Targets, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) + } + }) + } +} + +func TestParsePlan_vars(t *testing.T) { + testCases := map[string]struct { + args []string + want []FlagNameValue + }{ + "no var flags by default": { + args: nil, + want: nil, + }, + "one var": { + args: []string{"-var", "foo=bar"}, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + }, + }, + "one var-file": { + args: []string{"-var-file", "cool.tfvars"}, + want: []FlagNameValue{ + {Name: "-var-file", Value: "cool.tfvars"}, + }, + }, + "ordering preserved": { + args: []string{ + "-var", "foo=bar", + "-var-file", "cool.tfvars", + "-var", "boop=beep", + }, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + {Name: "-var-file", Value: "cool.tfvars"}, + {Name: "-var", Value: "boop=beep"}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParsePlan(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) + } + if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { + t.Fatalf("expected Empty() to return %t, but was %t", want, got) + } + }) + } +} diff --git a/command/arguments/refresh.go b/command/arguments/refresh.go new file mode 100644 index 000000000000..0e35483aa7f8 --- /dev/null +++ b/command/arguments/refresh.go @@ -0,0 +1,71 @@ +package arguments + +import ( + "github.com/hashicorp/terraform/tfdiags" +) + +// Refresh represents the command-line arguments for the apply command. +type Refresh struct { + // State, Operation, and Vars are the common extended flags + State *State + Operation *Operation + Vars *Vars + + // InputEnabled is used to disable interactive input for unspecified + // variable and backend config values. Default is true. + InputEnabled bool + + // ViewType specifies which output format to use + ViewType ViewType +} + +// ParseRefresh processes CLI arguments, returning a Refresh value and errors. +// If errors are encountered, a Refresh value is still returned representing +// the best effort interpretation of the arguments. +func ParseRefresh(args []string) (*Refresh, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + refresh := &Refresh{ + State: &State{}, + Operation: &Operation{}, + Vars: &Vars{}, + } + + cmdFlags := extendedFlagSet("refresh", refresh.State, refresh.Operation, refresh.Vars) + cmdFlags.BoolVar(&refresh.InputEnabled, "input", true, "input") + + var json bool + cmdFlags.BoolVar(&json, "json", false, "json") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + )) + } + + diags = diags.Append(refresh.Operation.Parse()) + + // JSON view currently does not support input, so we disable it here + if json { + refresh.InputEnabled = false + } + + switch { + case json: + refresh.ViewType = ViewJSON + default: + refresh.ViewType = ViewHuman + } + + return refresh, diags +} diff --git a/command/arguments/refresh_test.go b/command/arguments/refresh_test.go new file mode 100644 index 000000000000..6988b77f5161 --- /dev/null +++ b/command/arguments/refresh_test.go @@ -0,0 +1,180 @@ +package arguments + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/addrs" +) + +func TestParseRefresh_basicValid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Refresh + }{ + "defaults": { + nil, + &Refresh{ + InputEnabled: true, + ViewType: ViewHuman, + }, + }, + "input=false": { + []string{"-input=false"}, + &Refresh{ + InputEnabled: false, + ViewType: ViewHuman, + }, + }, + "JSON view disables input": { + []string{"-json"}, + &Refresh{ + InputEnabled: false, + ViewType: ViewJSON, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseRefresh(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + // Ignore the extended arguments for simplicity + got.State = nil + got.Operation = nil + got.Vars = nil + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseRefresh_invalid(t *testing.T) { + got, diags := ParseRefresh([]string{"-frob"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParseRefresh_tooManyArguments(t *testing.T) { + got, diags := ParseRefresh([]string{"saved.tfplan"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParseRefresh_targets(t *testing.T) { + foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") + boop, _ := addrs.ParseTargetStr("module.boop") + testCases := map[string]struct { + args []string + want []addrs.Targetable + wantErr string + }{ + "no targets by default": { + args: nil, + want: nil, + }, + "one target": { + args: []string{"-target=foo_bar.baz"}, + want: []addrs.Targetable{foobarbaz.Subject}, + }, + "two targets": { + args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, + want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, + }, + "invalid traversal": { + args: []string{"-target=foo."}, + want: nil, + wantErr: "Dot must be followed by attribute name", + }, + "invalid target": { + args: []string{"-target=data[0].foo"}, + want: nil, + wantErr: "A data source name is required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseRefresh(tc.args) + if len(diags) > 0 { + if tc.wantErr == "" { + t.Fatalf("unexpected diags: %v", diags) + } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) + } + } + if !cmp.Equal(got.Operation.Targets, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) + } + }) + } +} + +func TestParseRefresh_vars(t *testing.T) { + testCases := map[string]struct { + args []string + want []FlagNameValue + }{ + "no var flags by default": { + args: nil, + want: nil, + }, + "one var": { + args: []string{"-var", "foo=bar"}, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + }, + }, + "one var-file": { + args: []string{"-var-file", "cool.tfvars"}, + want: []FlagNameValue{ + {Name: "-var-file", Value: "cool.tfvars"}, + }, + }, + "ordering preserved": { + args: []string{ + "-var", "foo=bar", + "-var-file", "cool.tfvars", + "-var", "boop=beep", + }, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + {Name: "-var-file", Value: "cool.tfvars"}, + {Name: "-var", Value: "boop=beep"}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseRefresh(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) + } + if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { + t.Fatalf("expected Empty() to return %t, but was %t", want, got) + } + }) + } +} diff --git a/command/arguments/show.go b/command/arguments/show.go new file mode 100644 index 000000000000..7667e295863d --- /dev/null +++ b/command/arguments/show.go @@ -0,0 +1,59 @@ +package arguments + +import ( + "github.com/hashicorp/terraform/tfdiags" +) + +// Show represents the command-line arguments for the show command. +type Show struct { + // Path is the path to the state file or plan file to be displayed. If + // unspecified, show will display the latest state snapshot. + Path string + + // ViewType specifies which output format to use: human, JSON, or "raw". + ViewType ViewType +} + +// ParseShow processes CLI arguments, returning a Show value and errors. +// If errors are encountered, a Show value is still returned representing +// the best effort interpretation of the arguments. +func ParseShow(args []string) (*Show, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + show := &Show{ + Path: "", + } + + var jsonOutput bool + cmdFlags := defaultFlagSet("show") + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + )) + } + + if len(args) > 0 { + show.Path = args[0] + } + + switch { + case jsonOutput: + show.ViewType = ViewJSON + default: + show.ViewType = ViewHuman + } + + return show, diags +} diff --git a/command/arguments/show_test.go b/command/arguments/show_test.go new file mode 100644 index 000000000000..ef1c072bcd4a --- /dev/null +++ b/command/arguments/show_test.go @@ -0,0 +1,99 @@ +package arguments + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestParseShow_valid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Show + }{ + "defaults": { + nil, + &Show{ + Path: "", + ViewType: ViewHuman, + }, + }, + "json": { + []string{"-json"}, + &Show{ + Path: "", + ViewType: ViewJSON, + }, + }, + "path": { + []string{"-json", "foo"}, + &Show{ + Path: "foo", + ViewType: ViewJSON, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseShow(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseShow_invalid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Show + wantDiags tfdiags.Diagnostics + }{ + "unknown flag": { + []string{"-boop"}, + &Show{ + Path: "", + ViewType: ViewHuman, + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + "flag provided but not defined: -boop", + ), + }, + }, + "too many arguments": { + []string{"-json", "bar", "baz"}, + &Show{ + Path: "bar", + ViewType: ViewJSON, + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + ), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotDiags := ParseShow(tc.args) + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !reflect.DeepEqual(gotDiags, tc.wantDiags) { + t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) + } + }) + } +} diff --git a/command/arguments/test.go b/command/arguments/test.go new file mode 100644 index 000000000000..c49759a8e957 --- /dev/null +++ b/command/arguments/test.go @@ -0,0 +1,63 @@ +package arguments + +import ( + "flag" + "io/ioutil" + + "github.com/hashicorp/terraform/tfdiags" +) + +// Test represents the command line arguments for the "terraform test" command. +type Test struct { + Output TestOutput +} + +// TestOutput represents a subset of the arguments for "terraform test" +// related to how it presents its results. That is, it's the arguments that +// are relevant to the command's view rather than its controller. +type TestOutput struct { + // If not an empty string, JUnitXMLFile gives a filename where JUnit-style + // XML test result output should be written, in addition to the normal + // output printed to the standard output and error streams. + // (The typical usage pattern for tools that can consume this file format + // is to configure them to look for a separate test result file on disk + // after running the tests.) + JUnitXMLFile string +} + +// ParseTest interprets a slice of raw command line arguments into a +// Test value. +func ParseTest(args []string) (Test, tfdiags.Diagnostics) { + var ret Test + var diags tfdiags.Diagnostics + + // NOTE: ParseTest should still return at least a partial + // Test even on error, containing enough information for the + // command to report error diagnostics in a suitable way. + + f := flag.NewFlagSet("test", flag.ContinueOnError) + f.SetOutput(ioutil.Discard) + f.Usage = func() {} + f.StringVar(&ret.Output.JUnitXMLFile, "junit-xml", "", "Write a JUnit XML file describing the results") + + err := f.Parse(args) + if err != nil { + diags = diags.Append(err) + return ret, diags + } + + // We'll now discard all of the arguments that the flag package handled, + // and focus only on the positional arguments for the rest of the function. + args = f.Args() + + if len(args) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid command arguments", + "The test command doesn't expect any positional command-line arguments.", + )) + return ret, diags + } + + return ret, diags +} diff --git a/command/arguments/test_test.go b/command/arguments/test_test.go new file mode 100644 index 000000000000..7cd671a77c6e --- /dev/null +++ b/command/arguments/test_test.go @@ -0,0 +1,83 @@ +package arguments + +import ( + "testing" + + "github.com/apparentlymart/go-shquot/shquot" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestParseTest(t *testing.T) { + tests := []struct { + Input []string + Want Test + WantError string + }{ + { + nil, + Test{ + Output: TestOutput{ + JUnitXMLFile: "", + }, + }, + ``, + }, + { + []string{"-invalid"}, + Test{ + Output: TestOutput{ + JUnitXMLFile: "", + }, + }, + `flag provided but not defined: -invalid`, + }, + { + []string{"-junit-xml=result.xml"}, + Test{ + Output: TestOutput{ + JUnitXMLFile: "result.xml", + }, + }, + ``, + }, + { + []string{"baz"}, + Test{ + Output: TestOutput{ + JUnitXMLFile: "", + }, + }, + `Invalid command arguments`, + }, + } + + baseCmdline := []string{"terraform", "test"} + for _, test := range tests { + name := shquot.POSIXShell(append(baseCmdline, test.Input...)) + t.Run(name, func(t *testing.T) { + t.Log(name) + got, diags := ParseTest(test.Input) + + if test.WantError != "" { + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want exactly 1\n%s", len(diags), diags.Err().Error()) + } + if diags[0].Severity() != tfdiags.Error { + t.Fatalf("got a warning; want an error\n%s", diags.Err().Error()) + } + if desc := diags[0].Description(); desc.Summary != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", desc.Summary, test.WantError) + } + } else { + if len(diags) != 0 { + t.Fatalf("got %d diagnostics; want none\n%s", len(diags), diags.Err().Error()) + } + } + + if diff := cmp.Diff(test.Want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} diff --git a/internal/command/arguments/types.go b/command/arguments/types.go similarity index 100% rename from internal/command/arguments/types.go rename to command/arguments/types.go diff --git a/command/arguments/validate.go b/command/arguments/validate.go new file mode 100644 index 000000000000..71b31e09aa93 --- /dev/null +++ b/command/arguments/validate.go @@ -0,0 +1,59 @@ +package arguments + +import ( + "github.com/hashicorp/terraform/tfdiags" +) + +// Validate represents the command-line arguments for the validate command. +type Validate struct { + // Path is the directory containing the configuration to be validated. If + // unspecified, validate will use the current directory. + Path string + + // ViewType specifies which output format to use: human, JSON, or "raw". + ViewType ViewType +} + +// ParseValidate processes CLI arguments, returning a Validate value and errors. +// If errors are encountered, a Validate value is still returned representing +// the best effort interpretation of the arguments. +func ParseValidate(args []string) (*Validate, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + validate := &Validate{ + Path: ".", + } + + var jsonOutput bool + cmdFlags := defaultFlagSet("validate") + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + )) + } + + if len(args) > 0 { + validate.Path = args[0] + } + + switch { + case jsonOutput: + validate.ViewType = ViewJSON + default: + validate.ViewType = ViewHuman + } + + return validate, diags +} diff --git a/command/arguments/validate_test.go b/command/arguments/validate_test.go new file mode 100644 index 000000000000..29b90d16c2d0 --- /dev/null +++ b/command/arguments/validate_test.go @@ -0,0 +1,99 @@ +package arguments + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestParseValidate_valid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Validate + }{ + "defaults": { + nil, + &Validate{ + Path: ".", + ViewType: ViewHuman, + }, + }, + "json": { + []string{"-json"}, + &Validate{ + Path: ".", + ViewType: ViewJSON, + }, + }, + "path": { + []string{"-json", "foo"}, + &Validate{ + Path: "foo", + ViewType: ViewJSON, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseValidate(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseValidate_invalid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Validate + wantDiags tfdiags.Diagnostics + }{ + "unknown flag": { + []string{"-boop"}, + &Validate{ + Path: ".", + ViewType: ViewHuman, + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + "flag provided but not defined: -boop", + ), + }, + }, + "too many arguments": { + []string{"-json", "bar", "baz"}, + &Validate{ + Path: "bar", + ViewType: ViewJSON, + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + ), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotDiags := ParseValidate(tc.args) + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !reflect.DeepEqual(gotDiags, tc.wantDiags) { + t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) + } + }) + } +} diff --git a/internal/command/arguments/view.go b/command/arguments/view.go similarity index 100% rename from internal/command/arguments/view.go rename to command/arguments/view.go diff --git a/internal/command/arguments/view_test.go b/command/arguments/view_test.go similarity index 100% rename from internal/command/arguments/view_test.go rename to command/arguments/view_test.go diff --git a/internal/command/autocomplete.go b/command/autocomplete.go similarity index 100% rename from internal/command/autocomplete.go rename to command/autocomplete.go diff --git a/internal/command/autocomplete_test.go b/command/autocomplete_test.go similarity index 100% rename from internal/command/autocomplete_test.go rename to command/autocomplete_test.go diff --git a/internal/command/cli_ui.go b/command/cli_ui.go similarity index 100% rename from internal/command/cli_ui.go rename to command/cli_ui.go diff --git a/internal/command/cli_ui_test.go b/command/cli_ui_test.go similarity index 100% rename from internal/command/cli_ui_test.go rename to command/cli_ui_test.go diff --git a/internal/command/cliconfig/cliconfig.go b/command/cliconfig/cliconfig.go similarity index 99% rename from internal/command/cliconfig/cliconfig.go rename to command/cliconfig/cliconfig.go index ba86c4027d64..fb15bba8432f 100644 --- a/internal/command/cliconfig/cliconfig.go +++ b/command/cliconfig/cliconfig.go @@ -21,7 +21,7 @@ import ( "github.com/hashicorp/hcl" svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) const pluginCacheDirEnvVar = "TF_PLUGIN_CACHE_DIR" diff --git a/internal/command/cliconfig/cliconfig_test.go b/command/cliconfig/cliconfig_test.go similarity index 99% rename from internal/command/cliconfig/cliconfig_test.go rename to command/cliconfig/cliconfig_test.go index c94dae86f2f7..4a6e8175e01b 100644 --- a/internal/command/cliconfig/cliconfig_test.go +++ b/command/cliconfig/cliconfig_test.go @@ -8,7 +8,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // This is the directory where our test fixtures are. diff --git a/internal/command/cliconfig/config_unix.go b/command/cliconfig/config_unix.go similarity index 100% rename from internal/command/cliconfig/config_unix.go rename to command/cliconfig/config_unix.go diff --git a/internal/command/cliconfig/config_windows.go b/command/cliconfig/config_windows.go similarity index 100% rename from internal/command/cliconfig/config_windows.go rename to command/cliconfig/config_windows.go diff --git a/internal/command/cliconfig/credentials.go b/command/cliconfig/credentials.go similarity index 99% rename from internal/command/cliconfig/credentials.go rename to command/cliconfig/credentials.go index a85a1b7cd2c6..8332c42ed269 100644 --- a/internal/command/cliconfig/credentials.go +++ b/command/cliconfig/credentials.go @@ -15,9 +15,9 @@ import ( svchost "github.com/hashicorp/terraform-svchost" svcauth "github.com/hashicorp/terraform-svchost/auth" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - pluginDiscovery "github.com/hashicorp/terraform/internal/plugin/discovery" - "github.com/hashicorp/terraform/internal/replacefile" + "github.com/hashicorp/terraform/configs/hcl2shim" + pluginDiscovery "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/replacefile" ) // credentialsConfigFile returns the path for the special configuration file diff --git a/internal/command/cliconfig/credentials_test.go b/command/cliconfig/credentials_test.go similarity index 100% rename from internal/command/cliconfig/credentials_test.go rename to command/cliconfig/credentials_test.go diff --git a/internal/command/cliconfig/provider_installation.go b/command/cliconfig/provider_installation.go similarity index 98% rename from internal/command/cliconfig/provider_installation.go rename to command/cliconfig/provider_installation.go index 90570e653048..deef74fcc14e 100644 --- a/internal/command/cliconfig/provider_installation.go +++ b/command/cliconfig/provider_installation.go @@ -6,9 +6,9 @@ import ( "github.com/hashicorp/hcl" hclast "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/tfdiags" ) // ProviderInstallation is the structure of the "provider_installation" diff --git a/internal/command/cliconfig/provider_installation_test.go b/command/cliconfig/provider_installation_test.go similarity index 96% rename from internal/command/cliconfig/provider_installation_test.go rename to command/cliconfig/provider_installation_test.go index cd55c0b6b6f5..14c05786d894 100644 --- a/internal/command/cliconfig/provider_installation_test.go +++ b/command/cliconfig/provider_installation_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) func TestLoadConfig_providerInstallation(t *testing.T) { diff --git a/internal/command/cliconfig/testdata/config b/command/cliconfig/testdata/config similarity index 100% rename from internal/command/cliconfig/testdata/config rename to command/cliconfig/testdata/config diff --git a/internal/command/cliconfig/testdata/config-env b/command/cliconfig/testdata/config-env similarity index 100% rename from internal/command/cliconfig/testdata/config-env rename to command/cliconfig/testdata/config-env diff --git a/internal/command/cliconfig/testdata/credentials b/command/cliconfig/testdata/credentials similarity index 100% rename from internal/command/cliconfig/testdata/credentials rename to command/cliconfig/testdata/credentials diff --git a/internal/command/cliconfig/testdata/hosts b/command/cliconfig/testdata/hosts similarity index 100% rename from internal/command/cliconfig/testdata/hosts rename to command/cliconfig/testdata/hosts diff --git a/internal/command/cliconfig/testdata/provider-installation b/command/cliconfig/testdata/provider-installation similarity index 100% rename from internal/command/cliconfig/testdata/provider-installation rename to command/cliconfig/testdata/provider-installation diff --git a/internal/command/cliconfig/testdata/provider-installation-errors b/command/cliconfig/testdata/provider-installation-errors similarity index 100% rename from internal/command/cliconfig/testdata/provider-installation-errors rename to command/cliconfig/testdata/provider-installation-errors diff --git a/internal/command/cliconfig/testdata/provider-installation.json b/command/cliconfig/testdata/provider-installation.json similarity index 100% rename from internal/command/cliconfig/testdata/provider-installation.json rename to command/cliconfig/testdata/provider-installation.json diff --git a/internal/command/clistate/local_state.go b/command/clistate/local_state.go similarity index 98% rename from internal/command/clistate/local_state.go rename to command/clistate/local_state.go index 7a0102c7025b..8301757097c6 100644 --- a/internal/command/clistate/local_state.go +++ b/command/clistate/local_state.go @@ -12,8 +12,8 @@ import ( "time" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/internal/legacy/terraform" - "github.com/hashicorp/terraform/internal/states/statemgr" + "github.com/hashicorp/terraform/legacy/terraform" + "github.com/hashicorp/terraform/states/statemgr" ) // LocalState manages a state storage that is local to the filesystem. diff --git a/internal/command/clistate/local_state_lock_unix.go b/command/clistate/local_state_lock_unix.go similarity index 100% rename from internal/command/clistate/local_state_lock_unix.go rename to command/clistate/local_state_lock_unix.go diff --git a/internal/command/clistate/local_state_lock_windows.go b/command/clistate/local_state_lock_windows.go similarity index 100% rename from internal/command/clistate/local_state_lock_windows.go rename to command/clistate/local_state_lock_windows.go diff --git a/command/clistate/state.go b/command/clistate/state.go new file mode 100644 index 000000000000..5fd686263d7a --- /dev/null +++ b/command/clistate/state.go @@ -0,0 +1,190 @@ +// Package state exposes common helpers for working with state from the CLI. +// +// This is a separate package so that backends can use this for consistent +// messaging without creating a circular reference to the command package. +package clistate + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/helper/slowmessage" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" +) + +const ( + LockThreshold = 400 * time.Millisecond + LockErrorMessage = `Error message: %s + +Terraform acquires a state lock to protect the state from being written +by multiple users at the same time. Please resolve the issue above and try +again. For most commands, you can disable locking with the "-lock=false" +flag, but this is not recommended.` + + UnlockErrorMessage = `Error message: %s + +Terraform acquires a lock when accessing your state to prevent others +running Terraform to potentially modify the state at the same time. An +error occurred while releasing this lock. This could mean that the lock +did or did not release properly. If the lock didn't release properly, +Terraform may not be able to run future commands since it'll appear as if +the lock is held. + +In this scenario, please call the "force-unlock" command to unlock the +state manually. This is a very dangerous operation since if it is done +erroneously it could result in two people modifying state at the same time. +Only call this command if you're certain that the unlock above failed and +that no one else is holding a lock.` +) + +// Locker allows for more convenient usage of the lower-level statemgr.Locker +// implementations. +// The statemgr.Locker API requires passing in a statemgr.LockInfo struct. Locker +// implementations are expected to create the required LockInfo struct when +// Lock is called, populate the Operation field with the "reason" string +// provided, and pass that on to the underlying statemgr.Locker. +// Locker implementations are also expected to store any state required to call +// Unlock, which is at a minimum the LockID string returned by the +// statemgr.Locker. +type Locker interface { + // Returns a shallow copy of the locker with its context changed to ctx. + WithContext(ctx context.Context) Locker + + // Lock the provided state manager, storing the reason string in the LockInfo. + Lock(s statemgr.Locker, reason string) tfdiags.Diagnostics + + // Unlock the previously locked state. + Unlock() tfdiags.Diagnostics + + // Timeout returns the configured timeout duration + Timeout() time.Duration +} + +type locker struct { + mu sync.Mutex + ctx context.Context + timeout time.Duration + state statemgr.Locker + view views.StateLocker + lockID string +} + +var _ Locker = (*locker)(nil) + +// Create a new Locker. +// This Locker uses state.LockWithContext to retry the lock until the provided +// timeout is reached, or the context is canceled. Lock progress will be be +// reported to the user through the provided UI. +func NewLocker(timeout time.Duration, view views.StateLocker) Locker { + return &locker{ + ctx: context.Background(), + timeout: timeout, + view: view, + } +} + +// WithContext returns a new Locker with the specified context, copying the +// timeout and view parameters from the original Locker. +func (l *locker) WithContext(ctx context.Context) Locker { + if ctx == nil { + panic("nil context") + } + return &locker{ + ctx: ctx, + timeout: l.timeout, + view: l.view, + } +} + +// Locker locks the given state and outputs to the user if locking is taking +// longer than the threshold. The lock is retried until the context is +// cancelled. +func (l *locker) Lock(s statemgr.Locker, reason string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + l.mu.Lock() + defer l.mu.Unlock() + + l.state = s + + ctx, cancel := context.WithTimeout(l.ctx, l.timeout) + defer cancel() + + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = reason + + err := slowmessage.Do(LockThreshold, func() error { + id, err := statemgr.LockWithContext(ctx, s, lockInfo) + l.lockID = id + return err + }, l.view.Locking) + + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error acquiring the state lock", + fmt.Sprintf(LockErrorMessage, err), + )) + } + + return diags +} + +func (l *locker) Unlock() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + l.mu.Lock() + defer l.mu.Unlock() + + if l.lockID == "" { + return diags + } + + err := slowmessage.Do(LockThreshold, func() error { + return l.state.Unlock(l.lockID) + }, l.view.Unlocking) + + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error releasing the state lock", + fmt.Sprintf(UnlockErrorMessage, err), + )) + } + + return diags + +} + +func (l *locker) Timeout() time.Duration { + return l.timeout +} + +type noopLocker struct{} + +// NewNoopLocker returns a valid Locker that does nothing. +func NewNoopLocker() Locker { + return noopLocker{} +} + +var _ Locker = noopLocker{} + +func (l noopLocker) WithContext(ctx context.Context) Locker { + return l +} + +func (l noopLocker) Lock(statemgr.Locker, string) tfdiags.Diagnostics { + return nil +} + +func (l noopLocker) Unlock() tfdiags.Diagnostics { + return nil +} + +func (l noopLocker) Timeout() time.Duration { + return 0 +} diff --git a/command/clistate/state_test.go b/command/clistate/state_test.go new file mode 100644 index 000000000000..b7efb0760cd9 --- /dev/null +++ b/command/clistate/state_test.go @@ -0,0 +1,25 @@ +package clistate + +import ( + "testing" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" +) + +func TestUnlock(t *testing.T) { + streams, _ := terminal.StreamsForTesting(t) + view := views.NewView(streams) + + l := NewLocker(0, views.NewStateLocker(arguments.ViewHuman, view)) + l.Lock(statemgr.NewUnlockErrorFull(nil, nil), "test-lock") + + diags := l.Unlock() + if diags.HasErrors() { + t.Log(diags.Err().Error()) + } else { + t.Error("expected error") + } +} diff --git a/internal/command/command.go b/command/command.go similarity index 100% rename from internal/command/command.go rename to command/command.go diff --git a/internal/command/command_test.go b/command/command_test.go similarity index 96% rename from internal/command/command_test.go rename to command/command_test.go index 7998a554224a..b98e44e63683 100644 --- a/internal/command/command_test.go +++ b/command/command_test.go @@ -23,29 +23,29 @@ import ( svchost "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/addrs" - backendInit "github.com/hashicorp/terraform/internal/backend/init" - backendLocal "github.com/hashicorp/terraform/internal/backend/local" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/command/workdir" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/copy" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/initwd" - legacy "github.com/hashicorp/terraform/internal/legacy/terraform" - _ "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/addrs" + backendInit "github.com/hashicorp/terraform/backend/init" + backendLocal "github.com/hashicorp/terraform/backend/local" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/command/workdir" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/copy" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/initwd" + legacy "github.com/hashicorp/terraform/legacy/terraform" + _ "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/version" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/command/console.go b/command/console.go similarity index 94% rename from internal/command/console.go rename to command/console.go index b3d83087620f..0b49500048a1 100644 --- a/internal/command/console.go +++ b/command/console.go @@ -6,12 +6,12 @@ import ( "os" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/repl" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) diff --git a/internal/command/console_interactive.go b/command/console_interactive.go similarity index 95% rename from internal/command/console_interactive.go rename to command/console_interactive.go index 32cc3a9efcce..c1b252760658 100644 --- a/internal/command/console_interactive.go +++ b/command/console_interactive.go @@ -11,7 +11,7 @@ import ( "io" "os" - "github.com/hashicorp/terraform/internal/repl" + "github.com/hashicorp/terraform/repl" "github.com/chzyer/readline" "github.com/mitchellh/cli" diff --git a/internal/command/console_interactive_solaris.go b/command/console_interactive_solaris.go similarity index 89% rename from internal/command/console_interactive_solaris.go rename to command/console_interactive_solaris.go index b6e5d4d73fbb..f218f897f57e 100644 --- a/internal/command/console_interactive_solaris.go +++ b/command/console_interactive_solaris.go @@ -6,7 +6,7 @@ package command import ( "fmt" - "github.com/hashicorp/terraform/internal/repl" + "github.com/hashicorp/terraform/repl" "github.com/mitchellh/cli" ) diff --git a/internal/command/console_test.go b/command/console_test.go similarity index 98% rename from internal/command/console_test.go rename to command/console_test.go index de8060c5b587..2a460b191c8f 100644 --- a/internal/command/console_test.go +++ b/command/console_test.go @@ -7,8 +7,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/command/e2etest/.gitignore b/command/e2etest/.gitignore similarity index 100% rename from internal/command/e2etest/.gitignore rename to command/e2etest/.gitignore diff --git a/internal/command/e2etest/automation_test.go b/command/e2etest/automation_test.go similarity index 98% rename from internal/command/e2etest/automation_test.go rename to command/e2etest/automation_test.go index 02652927d750..1a462177b33f 100644 --- a/internal/command/e2etest/automation_test.go +++ b/command/e2etest/automation_test.go @@ -7,8 +7,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/e2e" - "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/e2e" + "github.com/hashicorp/terraform/plans" ) // The tests in this file run through different scenarios recommended in our diff --git a/internal/command/e2etest/doc.go b/command/e2etest/doc.go similarity index 94% rename from internal/command/e2etest/doc.go rename to command/e2etest/doc.go index 056a43aa3ffe..702992f3de7e 100644 --- a/internal/command/e2etest/doc.go +++ b/command/e2etest/doc.go @@ -12,7 +12,7 @@ // These tests can be used in two ways. The simplest way is to just run them // with "go test" as normal: // -// go test -v github.com/hashicorp/terraform/internal/command/e2etest +// go test -v github.com/hashicorp/terraform/command/e2etest // // This will compile on the fly a Terraform binary and run the tests against // it. diff --git a/command/e2etest/init_test.go b/command/e2etest/init_test.go new file mode 100644 index 000000000000..37acac31656b --- /dev/null +++ b/command/e2etest/init_test.go @@ -0,0 +1,408 @@ +package e2etest + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/hashicorp/terraform/e2e" +) + +func TestInitProviders(t *testing.T) { + t.Parallel() + + // This test reaches out to releases.hashicorp.com to download the + // template provider, so it can only run if network access is allowed. + // We intentionally don't try to stub this here, because there's already + // a stubbed version of this in the "command" package and so the goal here + // is to test the interaction with the real repository. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "template-provider") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "Terraform has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, "- Installing hashicorp/template v") { + t.Errorf("provider download message is missing from output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + + if !strings.Contains(stdout, "Terraform has created a lock file") { + t.Errorf("lock file notification is missing from output:\n%s", stdout) + } + +} + +func TestInitProvidersInternal(t *testing.T) { + t.Parallel() + + // This test should _not_ reach out anywhere because the "terraform" + // provider is internal to the core terraform binary. + + fixturePath := filepath.Join("testdata", "terraform-provider") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "Terraform has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if strings.Contains(stdout, "Installing hashicorp/terraform") { + // Shouldn't have downloaded anything with this config, because the + // provider is built in. + t.Errorf("provider download message appeared in output:\n%s", stdout) + } + + if strings.Contains(stdout, "Installing terraform.io/builtin/terraform") { + // Shouldn't have downloaded anything with this config, because the + // provider is built in. + t.Errorf("provider download message appeared in output:\n%s", stdout) + } +} + +func TestInitProvidersVendored(t *testing.T) { + t.Parallel() + + // This test will try to reach out to registry.terraform.io as one of the + // possible installation locations for + // hashicorp/null, where it will find that + // versions do exist but will ultimately select the version that is + // vendored due to the version constraint. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "vendored-provider") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("terraform.d/plugins/registry.terraform.io/hashicorp/null/1.0.0+local/os_arch") + wantMachineDir := tf.Path("terraform.d/plugins/registry.terraform.io/hashicorp/null/1.0.0+local/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "Terraform has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, "- Installing hashicorp/null v1.0.0+local") { + t.Errorf("provider download message is missing from output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + +} + +func TestInitProvidersLocalOnly(t *testing.T) { + t.Parallel() + + // This test should not reach out to the network if it is behaving as + // intended. If it _does_ try to access an upstream registry and encounter + // an error doing so then that's a legitimate test failure that should be + // fixed. (If it incorrectly reaches out anywhere then it's likely to be + // to the host "example.com", which is the placeholder domain we use in + // the test fixture.) + + fixturePath := filepath.Join("testdata", "local-only-provider") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + // If you run this test on a workstation with a plugin-cache directory + // configured, it will leave a bad directory behind and terraform init will + // not work until you remove it. + // + // To avoid this, we will "zero out" any existing cli config file. + tf.AddEnv("TF_CLI_CONFIG_FILE=") + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch") + wantMachineDir := tf.Path("terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "Terraform has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, "- Installing example.com/awesomecorp/happycloud v1.2.0") { + t.Errorf("provider download message is missing from output:\n%s", stdout) + t.Logf("(this can happen if you have a conflicting copy of the plugin in one of the global plugin search dirs)") + } +} + +func TestInitProvidersCustomMethod(t *testing.T) { + t.Parallel() + + // This test should not reach out to the network if it is behaving as + // intended. If it _does_ try to access an upstream registry and encounter + // an error doing so then that's a legitimate test failure that should be + // fixed. (If it incorrectly reaches out anywhere then it's likely to be + // to the host "example.com", which is the placeholder domain we use in + // the test fixture.) + + for _, configFile := range []string{"cliconfig.tfrc", "cliconfig.tfrc.json"} { + t.Run(configFile, func(t *testing.T) { + fixturePath := filepath.Join("testdata", "custom-provider-install-method") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch") + wantMachineDir := tf.Path("fs-mirror/example.com/awesomecorp/happycloud/1.2.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // We'll use a local CLI configuration file taken from our fixture + // directory so we can force a custom installation method config. + tf.AddEnv("TF_CLI_CONFIG_FILE=" + tf.Path(configFile)) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "Terraform has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, "- Installing example.com/awesomecorp/happycloud v1.2.0") { + t.Errorf("provider download message is missing from output:\n%s", stdout) + } + }) + } +} + +func TestInitProviders_pluginCache(t *testing.T) { + t.Parallel() + + // This test reaches out to releases.hashicorp.com to access plugin + // metadata, and download the null plugin, though the template plugin + // should come from local cache. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "plugin-cache") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("cache/registry.terraform.io/hashicorp/template/2.1.0/os_arch") + wantMachineDir := tf.Path("cache/registry.terraform.io/hashicorp/template/2.1.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + cmd := tf.Cmd("init") + + // convert the slashes if building for windows. + p := filepath.FromSlash("./cache") + cmd.Env = append(cmd.Env, "TF_PLUGIN_CACHE_DIR="+p) + err = cmd.Run() + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + path := filepath.FromSlash(fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/template/2.1.0/%s_%s/terraform-provider-template_v2.1.0_x4", runtime.GOOS, runtime.GOARCH)) + content, err := tf.ReadFile(path) + if err != nil { + t.Fatalf("failed to read installed plugin from %s: %s", path, err) + } + if strings.TrimSpace(string(content)) != "this is not a real plugin" { + t.Errorf("template plugin was not installed from local cache") + } + + nullLinkPath := filepath.FromSlash(fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/null/2.1.0/%s_%s/terraform-provider-null_v2.1.0_x4", runtime.GOOS, runtime.GOARCH)) + if runtime.GOOS == "windows" { + nullLinkPath = nullLinkPath + ".exe" + } + if !tf.FileExists(nullLinkPath) { + t.Errorf("null plugin was not installed into %s", nullLinkPath) + } + + nullCachePath := filepath.FromSlash(fmt.Sprintf("cache/registry.terraform.io/hashicorp/null/2.1.0/%s_%s/terraform-provider-null_v2.1.0_x4", runtime.GOOS, runtime.GOARCH)) + if runtime.GOOS == "windows" { + nullCachePath = nullCachePath + ".exe" + } + if !tf.FileExists(nullCachePath) { + t.Errorf("null plugin is not in cache after install. expected in: %s", nullCachePath) + } +} + +func TestInit_fromModule(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.terraform.io and github.com to lookup + // and fetch a module. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "empty") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + cmd := tf.Cmd("init", "-from-module=hashicorp/vault/aws") + cmd.Stdin = nil + cmd.Stderr = &bytes.Buffer{} + + err := cmd.Run() + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + stderr := cmd.Stderr.(*bytes.Buffer).String() + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + content, err := tf.ReadFile("main.tf") + if err != nil { + t.Fatalf("failed to read main.tf: %s", err) + } + if !bytes.Contains(content, []byte("vault")) { + t.Fatalf("main.tf doesn't appear to be a vault configuration: \n%s", content) + } +} + +func TestInitProviderNotFound(t *testing.T) { + t.Parallel() + + // This test will reach out to registry.terraform.io as one of the possible + // installation locations for hashicorp/nonexist, which should not exist. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "provider-not-found") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + t.Run("registry provider not found", func(t *testing.T) { + _, stderr, err := tf.Run("init", "-no-color") + if err == nil { + t.Fatal("expected error, got success") + } + + oneLineStderr := strings.ReplaceAll(stderr, "\n", " ") + if !strings.Contains(oneLineStderr, "provider registry registry.terraform.io does not have a provider named registry.terraform.io/hashicorp/nonexist") { + t.Errorf("expected error message is missing from output:\n%s", stderr) + } + + if !strings.Contains(oneLineStderr, "All modules should specify their required_providers") { + t.Errorf("expected error message is missing from output:\n%s", stderr) + } + }) + + t.Run("local provider not found", func(t *testing.T) { + // The -plugin-dir directory must exist for the provider installer to search it. + pluginDir := tf.Path("empty") + if err := os.Mkdir(pluginDir, os.ModePerm); err != nil { + t.Fatal(err) + } + + _, stderr, err := tf.Run("init", "-no-color", "-plugin-dir="+pluginDir) + if err == nil { + t.Fatal("expected error, got success") + } + + if !strings.Contains(stderr, "provider registry.terraform.io/hashicorp/nonexist was not\nfound in any of the search locations\n\n - "+pluginDir) { + t.Errorf("expected error message is missing from output:\n%s", stderr) + } + }) + + t.Run("special characters enabled", func(t *testing.T) { + _, stderr, err := tf.Run("init") + if err == nil { + t.Fatal("expected error, got success") + } + + expectedErr := `╷ +│ Error: Failed to query available provider packages +│` + ` ` + ` +│ Could not retrieve the list of available versions for provider +│ hashicorp/nonexist: provider registry registry.terraform.io does not have a +│ provider named registry.terraform.io/hashicorp/nonexist +│ +│ All modules should specify their required_providers so that external +│ consumers will get the correct providers when using a module. To see which +│ modules are currently depending on hashicorp/nonexist, run the following +│ command: +│ terraform providers +╵ + +` + if stripAnsi(stderr) != expectedErr { + t.Errorf("wrong output:\n%s", cmp.Diff(stripAnsi(stderr), expectedErr)) + } + }) +} + +func TestInitProviderWarnings(t *testing.T) { + t.Parallel() + + // This test will reach out to registry.terraform.io as one of the possible + // installation locations for hashicorp/nonexist, which should not exist. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "provider-warnings") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + stdout, _, err := tf.Run("init") + if err == nil { + t.Fatal("expected error, got success") + } + + if !strings.Contains(stdout, "This provider is archived and no longer needed.") { + t.Errorf("expected warning message is missing from output:\n%s", stdout) + } + +} diff --git a/command/e2etest/main_test.go b/command/e2etest/main_test.go new file mode 100644 index 000000000000..17927c7882f5 --- /dev/null +++ b/command/e2etest/main_test.go @@ -0,0 +1,76 @@ +package e2etest + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/terraform/e2e" +) + +var terraformBin string + +// canRunGoBuild is a short-term compromise to account for the fact that we +// have a small number of tests that work by building helper programs using +// "go build" at runtime, but we can't do that in our isolated test mode +// driven by the make-archive.sh script. +// +// FIXME: Rework this a bit so that we build the necessary helper programs +// (test plugins, etc) as part of the initial suite setup, and in the +// make-archive.sh script, so that we can run all of the tests in both +// situations with the tests just using the executable already built for +// them, as we do for terraformBin. +var canRunGoBuild bool + +func TestMain(m *testing.M) { + teardown := setup() + code := m.Run() + teardown() + os.Exit(code) +} + +func setup() func() { + if terraformBin != "" { + // this is pre-set when we're running in a binary produced from + // the make-archive.sh script, since that is for testing an + // executable obtained from a real release package. However, we do + // need to turn it into an absolute path so that we can find it + // when we change the working directory during tests. + var err error + terraformBin, err = filepath.Abs(terraformBin) + if err != nil { + panic(fmt.Sprintf("failed to find absolute path of terraform executable: %s", err)) + } + return func() {} + } + + tmpFilename := e2e.GoBuild("github.com/hashicorp/terraform", "terraform") + + // Make the executable available for use in tests + terraformBin = tmpFilename + + // Tests running in the ad-hoc testing mode are allowed to use "go build" + // and similar to produce other test executables. + // (See the comment on this variable's declaration for more information.) + canRunGoBuild = true + + return func() { + os.Remove(tmpFilename) + } +} + +func canAccessNetwork() bool { + // We re-use the flag normally used for acceptance tests since that's + // established as a way to opt-in to reaching out to real systems that + // may suffer transient errors. + return os.Getenv("TF_ACC") != "" +} + +func skipIfCannotAccessNetwork(t *testing.T) { + t.Helper() + + if !canAccessNetwork() { + t.Skip("network access not allowed; use TF_ACC=1 to enable") + } +} diff --git a/internal/command/e2etest/make-archive.sh b/command/e2etest/make-archive.sh similarity index 100% rename from internal/command/e2etest/make-archive.sh rename to command/e2etest/make-archive.sh diff --git a/internal/command/e2etest/module_archive_test.go b/command/e2etest/module_archive_test.go similarity index 93% rename from internal/command/e2etest/module_archive_test.go rename to command/e2etest/module_archive_test.go index cb6a2979fde9..1a27e89ba836 100644 --- a/internal/command/e2etest/module_archive_test.go +++ b/command/e2etest/module_archive_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/e2e" + "github.com/hashicorp/terraform/e2e" ) func TestInitModuleArchive(t *testing.T) { diff --git a/internal/command/e2etest/primary_test.go b/command/e2etest/primary_test.go similarity index 98% rename from internal/command/e2etest/primary_test.go rename to command/e2etest/primary_test.go index bd570dd4ff6c..c3ff04635f49 100644 --- a/internal/command/e2etest/primary_test.go +++ b/command/e2etest/primary_test.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/internal/e2e" - "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/e2e" + "github.com/hashicorp/terraform/plans" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/command/e2etest/provider_dev_test.go b/command/e2etest/provider_dev_test.go similarity index 96% rename from internal/command/e2etest/provider_dev_test.go rename to command/e2etest/provider_dev_test.go index 1f6c9fb0d950..33de4d1b9f84 100644 --- a/internal/command/e2etest/provider_dev_test.go +++ b/command/e2etest/provider_dev_test.go @@ -8,7 +8,7 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/e2e" + "github.com/hashicorp/terraform/e2e" ) // TestProviderDevOverrides is a test for the special dev_overrides setting @@ -40,7 +40,7 @@ func TestProviderDevOverrides(t *testing.T) { // such as if it stops being buildable into an independent executable. providerExeDir := filepath.Join(tf.WorkDir(), "pkgdir") providerExePrefix := filepath.Join(providerExeDir, "terraform-provider-test_") - providerExe := e2e.GoBuild("github.com/hashicorp/terraform/internal/provider-simple/main", providerExePrefix) + providerExe := e2e.GoBuild("github.com/hashicorp/terraform/provider-simple/main", providerExePrefix) t.Logf("temporary provider executable is %s", providerExe) err := ioutil.WriteFile(filepath.Join(tf.WorkDir(), "dev.tfrc"), []byte(fmt.Sprintf(` diff --git a/internal/command/e2etest/provider_plugin_test.go b/command/e2etest/provider_plugin_test.go similarity index 93% rename from internal/command/e2etest/provider_plugin_test.go rename to command/e2etest/provider_plugin_test.go index 49fa793dc1ad..c3b106bb159d 100644 --- a/internal/command/e2etest/provider_plugin_test.go +++ b/command/e2etest/provider_plugin_test.go @@ -6,8 +6,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/e2e" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/e2e" + "github.com/hashicorp/terraform/getproviders" ) // TestProviderProtocols verifies that Terraform can execute provider plugins @@ -30,10 +30,10 @@ func TestProviderProtocols(t *testing.T) { // actually run it. Here will build the simple and simple6 (built with // protocol v6) providers. simple6Provider := filepath.Join(tf.WorkDir(), "terraform-provider-simple6") - simple6ProviderExe := e2e.GoBuild("github.com/hashicorp/terraform/internal/provider-simple-v6/main", simple6Provider) + simple6ProviderExe := e2e.GoBuild("github.com/hashicorp/terraform/provider-simple-v6/main", simple6Provider) simpleProvider := filepath.Join(tf.WorkDir(), "terraform-provider-simple") - simpleProviderExe := e2e.GoBuild("github.com/hashicorp/terraform/internal/provider-simple/main", simpleProvider) + simpleProviderExe := e2e.GoBuild("github.com/hashicorp/terraform/provider-simple/main", simpleProvider) // Move the provider binaries into a directory that we will point terraform // to using the -plugin-dir cli flag. diff --git a/internal/command/e2etest/providers_mirror_test.go b/command/e2etest/providers_mirror_test.go similarity index 98% rename from internal/command/e2etest/providers_mirror_test.go rename to command/e2etest/providers_mirror_test.go index 08c54f4c68fa..296c4d4e683b 100644 --- a/internal/command/e2etest/providers_mirror_test.go +++ b/command/e2etest/providers_mirror_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/e2e" + "github.com/hashicorp/terraform/e2e" ) // The tests in this file are for the "terraform providers mirror" command, diff --git a/internal/command/e2etest/providers_tamper_test.go b/command/e2etest/providers_tamper_test.go similarity index 98% rename from internal/command/e2etest/providers_tamper_test.go rename to command/e2etest/providers_tamper_test.go index 0f4e8312205f..f21b4f762200 100644 --- a/internal/command/e2etest/providers_tamper_test.go +++ b/command/e2etest/providers_tamper_test.go @@ -7,8 +7,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/e2e" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/e2e" + "github.com/hashicorp/terraform/getproviders" ) // TestProviderTampering tests various ways that the provider plugins in the diff --git a/internal/command/e2etest/provisioner_plugin_test.go b/command/e2etest/provisioner_plugin_test.go similarity index 92% rename from internal/command/e2etest/provisioner_plugin_test.go rename to command/e2etest/provisioner_plugin_test.go index 3f5d312b21c9..db234eb620d8 100644 --- a/internal/command/e2etest/provisioner_plugin_test.go +++ b/command/e2etest/provisioner_plugin_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/e2e" + "github.com/hashicorp/terraform/e2e" ) // TestProvisionerPlugin is a test that terraform can execute a 3rd party @@ -34,7 +34,7 @@ func TestProvisionerPlugin(t *testing.T) { // to actually run it. Here will build the local-exec provisioner into a // binary called test-provisioner provisionerExePrefix := filepath.Join(tf.WorkDir(), "terraform-provisioner-test_") - provisionerExe := e2e.GoBuild("github.com/hashicorp/terraform/internal/provisioner-local-exec/main", provisionerExePrefix) + provisionerExe := e2e.GoBuild("github.com/hashicorp/terraform/provisioner-local-exec/main", provisionerExePrefix) // provisioners must use the old binary name format, so rename this binary newExe := filepath.Join(tf.WorkDir(), "terraform-provisioner-test") diff --git a/command/e2etest/provisioner_test.go b/command/e2etest/provisioner_test.go new file mode 100644 index 000000000000..27759f124996 --- /dev/null +++ b/command/e2etest/provisioner_test.go @@ -0,0 +1,43 @@ +package e2etest + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/e2e" +) + +// TestProviderDevOverrides is a test that terraform can execute a 3rd party +// provisioner plugin. +func TestProvisioner(t *testing.T) { + t.Parallel() + + // This test reaches out to releases.hashicorp.com to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + tf := e2e.NewBinary(t, terraformBin, "testdata/provisioner") + + //// INIT + _, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + //// PLAN + _, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + //// APPLY + stdout, stderr, err := tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "HelloProvisioner") { + t.Fatalf("missing provisioner output:\n%s", stdout) + } +} diff --git a/internal/command/e2etest/remote_state_test.go b/command/e2etest/remote_state_test.go similarity index 92% rename from internal/command/e2etest/remote_state_test.go rename to command/e2etest/remote_state_test.go index 16b9d5a3f71b..bbb95067205c 100644 --- a/internal/command/e2etest/remote_state_test.go +++ b/command/e2etest/remote_state_test.go @@ -4,7 +4,7 @@ import ( "path/filepath" "testing" - "github.com/hashicorp/terraform/internal/e2e" + "github.com/hashicorp/terraform/e2e" ) func TestTerraformProviderRead(t *testing.T) { diff --git a/internal/command/e2etest/strip_ansi.go b/command/e2etest/strip_ansi.go similarity index 100% rename from internal/command/e2etest/strip_ansi.go rename to command/e2etest/strip_ansi.go diff --git a/command/e2etest/terraform_test.go b/command/e2etest/terraform_test.go new file mode 100644 index 000000000000..f139361eeccc --- /dev/null +++ b/command/e2etest/terraform_test.go @@ -0,0 +1,55 @@ +package e2etest + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/e2e" +) + +func TestTerraformProviderData(t *testing.T) { + + fixturePath := filepath.Join("testdata", "terraform-managed-data") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + _, stderr, err := tf.Run("init", "-input=false") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + stdout, stderr, err := tf.Run("plan", "-out=tfplan", "-input=false") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "4 to add, 0 to change, 0 to destroy") { + t.Errorf("incorrect plan tally; want 4 to add:\n%s", stdout) + } + + stdout, stderr, err = tf.Run("apply", "-input=false", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 4 added, 0 changed, 0 destroyed") { + t.Errorf("incorrect apply tally; want 4 added:\n%s", stdout) + } + + state, err := tf.LocalState() + if err != nil { + t.Fatalf("failed to read state file: %s", err) + } + + // we'll check the final output to validate the resources + d := state.Module(addrs.RootModuleInstance).OutputValues["d"].Value + input := d.GetAttr("input") + output := d.GetAttr("output") + if input.IsNull() { + t.Fatal("missing input from resource d") + } + if !input.RawEquals(output) { + t.Fatalf("input %#v does not equal output %#v\n", input, output) + } +} diff --git a/internal/command/e2etest/testdata/chdir-option/subdir/main.tf b/command/e2etest/testdata/chdir-option/subdir/main.tf similarity index 100% rename from internal/command/e2etest/testdata/chdir-option/subdir/main.tf rename to command/e2etest/testdata/chdir-option/subdir/main.tf diff --git a/internal/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc b/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc similarity index 100% rename from internal/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc rename to command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc diff --git a/internal/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc.json b/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc.json similarity index 100% rename from internal/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc.json rename to command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc.json diff --git a/internal/command/e2etest/testdata/custom-provider-install-method/fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 b/command/e2etest/testdata/custom-provider-install-method/fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 similarity index 100% rename from internal/command/e2etest/testdata/custom-provider-install-method/fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 rename to command/e2etest/testdata/custom-provider-install-method/fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 diff --git a/internal/command/e2etest/testdata/custom-provider-install-method/main.tf b/command/e2etest/testdata/custom-provider-install-method/main.tf similarity index 100% rename from internal/command/e2etest/testdata/custom-provider-install-method/main.tf rename to command/e2etest/testdata/custom-provider-install-method/main.tf diff --git a/internal/command/e2etest/testdata/empty/.exists b/command/e2etest/testdata/empty/.exists similarity index 100% rename from internal/command/e2etest/testdata/empty/.exists rename to command/e2etest/testdata/empty/.exists diff --git a/internal/command/e2etest/testdata/full-workflow-null/main.tf b/command/e2etest/testdata/full-workflow-null/main.tf similarity index 100% rename from internal/command/e2etest/testdata/full-workflow-null/main.tf rename to command/e2etest/testdata/full-workflow-null/main.tf diff --git a/internal/command/e2etest/testdata/local-only-provider/main.tf b/command/e2etest/testdata/local-only-provider/main.tf similarity index 100% rename from internal/command/e2etest/testdata/local-only-provider/main.tf rename to command/e2etest/testdata/local-only-provider/main.tf diff --git a/internal/command/e2etest/testdata/local-only-provider/terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 b/command/e2etest/testdata/local-only-provider/terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 similarity index 100% rename from internal/command/e2etest/testdata/local-only-provider/terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 rename to command/e2etest/testdata/local-only-provider/terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 diff --git a/internal/command/e2etest/testdata/module-archive/main.tf b/command/e2etest/testdata/module-archive/main.tf similarity index 100% rename from internal/command/e2etest/testdata/module-archive/main.tf rename to command/e2etest/testdata/module-archive/main.tf diff --git a/internal/command/e2etest/testdata/plugin-cache/.terraform.lock.hcl b/command/e2etest/testdata/plugin-cache/.terraform.lock.hcl similarity index 100% rename from internal/command/e2etest/testdata/plugin-cache/.terraform.lock.hcl rename to command/e2etest/testdata/plugin-cache/.terraform.lock.hcl diff --git a/internal/command/e2etest/testdata/plugin-cache/cache/registry.terraform.io/hashicorp/template/2.1.0/os_arch/terraform-provider-template_v2.1.0_x4 b/command/e2etest/testdata/plugin-cache/cache/registry.terraform.io/hashicorp/template/2.1.0/os_arch/terraform-provider-template_v2.1.0_x4 similarity index 100% rename from internal/command/e2etest/testdata/plugin-cache/cache/registry.terraform.io/hashicorp/template/2.1.0/os_arch/terraform-provider-template_v2.1.0_x4 rename to command/e2etest/testdata/plugin-cache/cache/registry.terraform.io/hashicorp/template/2.1.0/os_arch/terraform-provider-template_v2.1.0_x4 diff --git a/internal/command/e2etest/testdata/plugin-cache/main.tf b/command/e2etest/testdata/plugin-cache/main.tf similarity index 100% rename from internal/command/e2etest/testdata/plugin-cache/main.tf rename to command/e2etest/testdata/plugin-cache/main.tf diff --git a/internal/command/e2etest/testdata/provider-dev-override/pkgdir/.exists b/command/e2etest/testdata/provider-dev-override/pkgdir/.exists similarity index 100% rename from internal/command/e2etest/testdata/provider-dev-override/pkgdir/.exists rename to command/e2etest/testdata/provider-dev-override/pkgdir/.exists diff --git a/internal/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf b/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf similarity index 100% rename from internal/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf rename to command/e2etest/testdata/provider-dev-override/provider-dev-override.tf diff --git a/internal/command/e2etest/testdata/provider-not-found-non-default/main.tf b/command/e2etest/testdata/provider-not-found-non-default/main.tf similarity index 100% rename from internal/command/e2etest/testdata/provider-not-found-non-default/main.tf rename to command/e2etest/testdata/provider-not-found-non-default/main.tf diff --git a/internal/command/e2etest/testdata/provider-not-found/main.tf b/command/e2etest/testdata/provider-not-found/main.tf similarity index 100% rename from internal/command/e2etest/testdata/provider-not-found/main.tf rename to command/e2etest/testdata/provider-not-found/main.tf diff --git a/internal/command/e2etest/testdata/provider-plugin/main.tf b/command/e2etest/testdata/provider-plugin/main.tf similarity index 100% rename from internal/command/e2etest/testdata/provider-plugin/main.tf rename to command/e2etest/testdata/provider-plugin/main.tf diff --git a/internal/command/e2etest/testdata/provider-tampering-base/provider-tampering-base.tf b/command/e2etest/testdata/provider-tampering-base/provider-tampering-base.tf similarity index 100% rename from internal/command/e2etest/testdata/provider-tampering-base/provider-tampering-base.tf rename to command/e2etest/testdata/provider-tampering-base/provider-tampering-base.tf diff --git a/internal/command/e2etest/testdata/provider-warnings/main.tf b/command/e2etest/testdata/provider-warnings/main.tf similarity index 100% rename from internal/command/e2etest/testdata/provider-warnings/main.tf rename to command/e2etest/testdata/provider-warnings/main.tf diff --git a/internal/command/e2etest/testdata/provisioner-plugin/main.tf b/command/e2etest/testdata/provisioner-plugin/main.tf similarity index 100% rename from internal/command/e2etest/testdata/provisioner-plugin/main.tf rename to command/e2etest/testdata/provisioner-plugin/main.tf diff --git a/internal/command/e2etest/testdata/provisioner/main.tf b/command/e2etest/testdata/provisioner/main.tf similarity index 100% rename from internal/command/e2etest/testdata/provisioner/main.tf rename to command/e2etest/testdata/provisioner/main.tf diff --git a/internal/command/e2etest/testdata/template-provider/main.tf b/command/e2etest/testdata/template-provider/main.tf similarity index 100% rename from internal/command/e2etest/testdata/template-provider/main.tf rename to command/e2etest/testdata/template-provider/main.tf diff --git a/internal/command/e2etest/testdata/terraform-managed-data/main.tf b/command/e2etest/testdata/terraform-managed-data/main.tf similarity index 100% rename from internal/command/e2etest/testdata/terraform-managed-data/main.tf rename to command/e2etest/testdata/terraform-managed-data/main.tf diff --git a/internal/command/e2etest/testdata/terraform-provider/main.tf b/command/e2etest/testdata/terraform-provider/main.tf similarity index 100% rename from internal/command/e2etest/testdata/terraform-provider/main.tf rename to command/e2etest/testdata/terraform-provider/main.tf diff --git a/internal/command/e2etest/testdata/terraform-provider/test.tfstate b/command/e2etest/testdata/terraform-provider/test.tfstate similarity index 100% rename from internal/command/e2etest/testdata/terraform-provider/test.tfstate rename to command/e2etest/testdata/terraform-provider/test.tfstate diff --git a/internal/command/e2etest/testdata/terraform-providers-mirror-with-lock-file/.terraform.lock.hcl b/command/e2etest/testdata/terraform-providers-mirror-with-lock-file/.terraform.lock.hcl similarity index 100% rename from internal/command/e2etest/testdata/terraform-providers-mirror-with-lock-file/.terraform.lock.hcl rename to command/e2etest/testdata/terraform-providers-mirror-with-lock-file/.terraform.lock.hcl diff --git a/internal/command/e2etest/testdata/terraform-providers-mirror-with-lock-file/terraform-providers-mirror.tf b/command/e2etest/testdata/terraform-providers-mirror-with-lock-file/terraform-providers-mirror.tf similarity index 100% rename from internal/command/e2etest/testdata/terraform-providers-mirror-with-lock-file/terraform-providers-mirror.tf rename to command/e2etest/testdata/terraform-providers-mirror-with-lock-file/terraform-providers-mirror.tf diff --git a/internal/command/e2etest/testdata/terraform-providers-mirror/terraform-providers-mirror.tf b/command/e2etest/testdata/terraform-providers-mirror/terraform-providers-mirror.tf similarity index 100% rename from internal/command/e2etest/testdata/terraform-providers-mirror/terraform-providers-mirror.tf rename to command/e2etest/testdata/terraform-providers-mirror/terraform-providers-mirror.tf diff --git a/internal/command/e2etest/testdata/test-provider/main.tf b/command/e2etest/testdata/test-provider/main.tf similarity index 100% rename from internal/command/e2etest/testdata/test-provider/main.tf rename to command/e2etest/testdata/test-provider/main.tf diff --git a/internal/command/e2etest/testdata/vendored-provider/main.tf b/command/e2etest/testdata/vendored-provider/main.tf similarity index 100% rename from internal/command/e2etest/testdata/vendored-provider/main.tf rename to command/e2etest/testdata/vendored-provider/main.tf diff --git a/internal/command/e2etest/testdata/vendored-provider/terraform.d/plugins/registry.terraform.io/hashicorp/null/1.0.0+local/os_arch/terraform-provider-null_v1.0.0 b/command/e2etest/testdata/vendored-provider/terraform.d/plugins/registry.terraform.io/hashicorp/null/1.0.0+local/os_arch/terraform-provider-null_v1.0.0 similarity index 100% rename from internal/command/e2etest/testdata/vendored-provider/terraform.d/plugins/registry.terraform.io/hashicorp/null/1.0.0+local/os_arch/terraform-provider-null_v1.0.0 rename to command/e2etest/testdata/vendored-provider/terraform.d/plugins/registry.terraform.io/hashicorp/null/1.0.0+local/os_arch/terraform-provider-null_v1.0.0 diff --git a/internal/command/e2etest/unmanaged_test.go b/command/e2etest/unmanaged_test.go similarity index 94% rename from internal/command/e2etest/unmanaged_test.go rename to command/e2etest/unmanaged_test.go index e67e758a3308..1c1f69c33807 100644 --- a/internal/command/e2etest/unmanaged_test.go +++ b/command/e2etest/unmanaged_test.go @@ -11,14 +11,14 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/e2e" - "github.com/hashicorp/terraform/internal/grpcwrap" - tfplugin5 "github.com/hashicorp/terraform/internal/plugin" - tfplugin "github.com/hashicorp/terraform/internal/plugin6" - simple5 "github.com/hashicorp/terraform/internal/provider-simple" - simple "github.com/hashicorp/terraform/internal/provider-simple-v6" - proto5 "github.com/hashicorp/terraform/internal/tfplugin5" - proto "github.com/hashicorp/terraform/internal/tfplugin6" + "github.com/hashicorp/terraform/e2e" + "github.com/hashicorp/terraform/grpcwrap" + tfplugin5 "github.com/hashicorp/terraform/plugin" + tfplugin "github.com/hashicorp/terraform/plugin6" + simple5 "github.com/hashicorp/terraform/provider-simple" + simple "github.com/hashicorp/terraform/provider-simple-v6" + proto5 "github.com/hashicorp/terraform/tfplugin5" + proto "github.com/hashicorp/terraform/tfplugin6" ) // The tests in this file are for the "unmanaged provider workflow", which diff --git a/command/e2etest/version_test.go b/command/e2etest/version_test.go new file mode 100644 index 000000000000..e518feb37763 --- /dev/null +++ b/command/e2etest/version_test.go @@ -0,0 +1,94 @@ +package e2etest + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/hashicorp/terraform/e2e" + "github.com/hashicorp/terraform/version" +) + +func TestVersion(t *testing.T) { + // Along with testing the "version" command in particular, this serves + // as a good smoke test for whether the Terraform binary can even be + // compiled and run, since it doesn't require any external network access + // to do its job. + + t.Parallel() + + fixturePath := filepath.Join("testdata", "empty") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + stdout, stderr, err := tf.Run("version") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + wantVersion := fmt.Sprintf("Terraform v%s", version.String()) + if !strings.Contains(stdout, wantVersion) { + t.Errorf("output does not contain our current version %q:\n%s", wantVersion, stdout) + } +} + +func TestVersionWithProvider(t *testing.T) { + // This is a more elaborate use of "version" that shows the selected + // versions of plugins too. + t.Parallel() + + // This test reaches out to releases.hashicorp.com to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "template-provider") + tf := e2e.NewBinary(t, terraformBin, fixturePath) + + // Initial run (before "init") should work without error but will not + // include the provider version, since we've not "locked" one yet. + { + stdout, stderr, err := tf.Run("version") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + wantVersion := fmt.Sprintf("Terraform v%s", version.String()) + if !strings.Contains(stdout, wantVersion) { + t.Errorf("output does not contain our current version %q:\n%s", wantVersion, stdout) + } + } + + { + _, _, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + } + + // After running init, we additionally include information about the + // selected version of the "template" provider. + { + stdout, stderr, err := tf.Run("version") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + wantMsg := "+ provider registry.terraform.io/hashicorp/template v" // we don't know which version we'll get here + if !strings.Contains(stdout, wantMsg) { + t.Errorf("output does not contain provider information %q:\n%s", wantMsg, stdout) + } + } +} diff --git a/internal/command/flag_kv.go b/command/flag_kv.go similarity index 100% rename from internal/command/flag_kv.go rename to command/flag_kv.go diff --git a/internal/command/flag_kv_test.go b/command/flag_kv_test.go similarity index 100% rename from internal/command/flag_kv_test.go rename to command/flag_kv_test.go diff --git a/internal/command/fmt.go b/command/fmt.go similarity index 99% rename from internal/command/fmt.go rename to command/fmt.go index 64d12e796ff1..3f4d665ccd4e 100644 --- a/internal/command/fmt.go +++ b/command/fmt.go @@ -16,8 +16,8 @@ import ( "github.com/hashicorp/hcl/v2/hclwrite" "github.com/mitchellh/cli" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" ) const ( diff --git a/internal/command/fmt_test.go b/command/fmt_test.go similarity index 100% rename from internal/command/fmt_test.go rename to command/fmt_test.go diff --git a/command/format/diagnostic.go b/command/format/diagnostic.go new file mode 100644 index 000000000000..c979219417fb --- /dev/null +++ b/command/format/diagnostic.go @@ -0,0 +1,319 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + viewsjson "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" +) + +var disabledColorize = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, +} + +// Diagnostic formats a single diagnostic message. +// +// The width argument specifies at what column the diagnostic messages will +// be wrapped. If set to zero, messages will not be wrapped by this function +// at all. Although the long-form text parts of the message are wrapped, +// not all aspects of the message are guaranteed to fit within the specified +// terminal width. +func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { + return DiagnosticFromJSON(viewsjson.NewDiagnostic(diag, sources), color, width) +} + +func DiagnosticFromJSON(diag *viewsjson.Diagnostic, color *colorstring.Colorize, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + // these leftRule* variables are markers for the beginning of the lines + // containing the diagnostic that are intended to help sighted users + // better understand the information hierarchy when diagnostics appear + // alongside other information or alongside other diagnostics. + // + // Without this, it seems (based on folks sharing incomplete messages when + // asking questions, or including extra content that's not part of the + // diagnostic) that some readers have trouble easily identifying which + // text belongs to the diagnostic and which does not. + var leftRuleLine, leftRuleStart, leftRuleEnd string + var leftRuleWidth int // in visual character cells + + switch diag.Severity { + case viewsjson.DiagnosticSeverityError: + buf.WriteString(color.Color("[bold][red]Error: [reset]")) + leftRuleLine = color.Color("[red]│[reset] ") + leftRuleStart = color.Color("[red]╷[reset]") + leftRuleEnd = color.Color("[red]╵[reset]") + leftRuleWidth = 2 + case viewsjson.DiagnosticSeverityWarning: + buf.WriteString(color.Color("[bold][yellow]Warning: [reset]")) + leftRuleLine = color.Color("[yellow]│[reset] ") + leftRuleStart = color.Color("[yellow]╷[reset]") + leftRuleEnd = color.Color("[yellow]╵[reset]") + leftRuleWidth = 2 + default: + // Clear out any coloring that might be applied by Terraform's UI helper, + // so our result is not context-sensitive. + buf.WriteString(color.Color("\n[reset]")) + } + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), diag.Summary) + + appendSourceSnippets(&buf, diag, color) + + if diag.Detail != "" { + paraWidth := width - leftRuleWidth - 1 // leave room for the left rule + if paraWidth > 0 { + lines := strings.Split(diag.Detail, "\n") + for _, line := range lines { + if !strings.HasPrefix(line, " ") { + line = wordwrap.WrapString(line, uint(paraWidth)) + } + fmt.Fprintf(&buf, "%s\n", line) + } + } else { + fmt.Fprintf(&buf, "%s\n", diag.Detail) + } + } + + // Before we return, we'll finally add the left rule prefixes to each + // line so that the overall message is visually delimited from what's + // around it. We'll do that by scanning over what we already generated + // and adding the prefix for each line. + var ruleBuf strings.Builder + sc := bufio.NewScanner(&buf) + ruleBuf.WriteString(leftRuleStart) + ruleBuf.WriteByte('\n') + for sc.Scan() { + line := sc.Text() + prefix := leftRuleLine + if line == "" { + // Don't print the space after the line if there would be nothing + // after it anyway. + prefix = strings.TrimSpace(prefix) + } + ruleBuf.WriteString(prefix) + ruleBuf.WriteString(line) + ruleBuf.WriteByte('\n') + } + ruleBuf.WriteString(leftRuleEnd) + ruleBuf.WriteByte('\n') + + return ruleBuf.String() +} + +// DiagnosticPlain is an alternative to Diagnostic which minimises the use of +// virtual terminal formatting sequences. +// +// It is intended for use in automation and other contexts in which diagnostic +// messages are parsed from the Terraform output. +func DiagnosticPlain(diag tfdiags.Diagnostic, sources map[string][]byte, width int) string { + return DiagnosticPlainFromJSON(viewsjson.NewDiagnostic(diag, sources), width) +} + +func DiagnosticPlainFromJSON(diag *viewsjson.Diagnostic, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + switch diag.Severity { + case viewsjson.DiagnosticSeverityError: + buf.WriteString("\nError: ") + case viewsjson.DiagnosticSeverityWarning: + buf.WriteString("\nWarning: ") + default: + buf.WriteString("\n") + } + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, "%s\n\n", diag.Summary) + + appendSourceSnippets(&buf, diag, disabledColorize) + + if diag.Detail != "" { + if width > 1 { + lines := strings.Split(diag.Detail, "\n") + for _, line := range lines { + if !strings.HasPrefix(line, " ") { + line = wordwrap.WrapString(line, uint(width-1)) + } + fmt.Fprintf(&buf, "%s\n", line) + } + } else { + fmt.Fprintf(&buf, "%s\n", diag.Detail) + } + } + + return buf.String() +} + +// DiagnosticWarningsCompact is an alternative to Diagnostic for when all of +// the given diagnostics are warnings and we want to show them compactly, +// with only two lines per warning and excluding all of the detail information. +// +// The caller may optionally pre-process the given diagnostics with +// ConsolidateWarnings, in which case this function will recognize consolidated +// messages and include an indication that they are consolidated. +// +// Do not pass non-warning diagnostics to this function, or the result will +// be nonsense. +func DiagnosticWarningsCompact(diags tfdiags.Diagnostics, color *colorstring.Colorize) string { + var b strings.Builder + b.WriteString(color.Color("[bold][yellow]Warnings:[reset]\n\n")) + for _, diag := range diags { + sources := tfdiags.WarningGroupSourceRanges(diag) + b.WriteString(fmt.Sprintf("- %s\n", diag.Description().Summary)) + if len(sources) > 0 { + mainSource := sources[0] + if mainSource.Subject != nil { + if len(sources) > 1 { + b.WriteString(fmt.Sprintf( + " on %s line %d (and %d more)\n", + mainSource.Subject.Filename, + mainSource.Subject.Start.Line, + len(sources)-1, + )) + } else { + b.WriteString(fmt.Sprintf( + " on %s line %d\n", + mainSource.Subject.Filename, + mainSource.Subject.Start.Line, + )) + } + } else if len(sources) > 1 { + b.WriteString(fmt.Sprintf( + " (%d occurences of this warning)\n", + len(sources), + )) + } + } + } + + return b.String() +} + +func appendSourceSnippets(buf *bytes.Buffer, diag *viewsjson.Diagnostic, color *colorstring.Colorize) { + if diag.Address != "" { + fmt.Fprintf(buf, " with %s,\n", diag.Address) + } + + if diag.Range == nil { + return + } + + if diag.Snippet == nil { + // This should generally not happen, as long as sources are always + // loaded through the main loader. We may load things in other + // ways in weird cases, so we'll tolerate it at the expense of + // a not-so-helpful error message. + fmt.Fprintf(buf, " on %s line %d:\n (source code not available)\n", diag.Range.Filename, diag.Range.Start.Line) + } else { + snippet := diag.Snippet + code := snippet.Code + + var contextStr string + if snippet.Context != nil { + contextStr = fmt.Sprintf(", in %s", *snippet.Context) + } + fmt.Fprintf(buf, " on %s line %d%s:\n", diag.Range.Filename, diag.Range.Start.Line, contextStr) + + // Split the snippet and render the highlighted section with underlines + start := snippet.HighlightStartOffset + end := snippet.HighlightEndOffset + + // Only buggy diagnostics can have an end range before the start, but + // we need to ensure we don't crash here if that happens. + if end < start { + end = start + 1 + if end > len(code) { + end = len(code) + } + } + + // If either start or end is out of range for the code buffer then + // we'll cap them at the bounds just to avoid a panic, although + // this would happen only if there's a bug in the code generating + // the snippet objects. + if start < 0 { + start = 0 + } else if start > len(code) { + start = len(code) + } + if end < 0 { + end = 0 + } else if end > len(code) { + end = len(code) + } + + before, highlight, after := code[0:start], code[start:end], code[end:] + code = fmt.Sprintf(color.Color("%s[underline]%s[reset]%s"), before, highlight, after) + + // Split the snippet into lines and render one at a time + lines := strings.Split(code, "\n") + for i, line := range lines { + fmt.Fprintf( + buf, "%4d: %s\n", + snippet.StartLine+i, + line, + ) + } + + if len(snippet.Values) > 0 || (snippet.FunctionCall != nil && snippet.FunctionCall.Signature != nil) { + // The diagnostic may also have information about the dynamic + // values of relevant variables at the point of evaluation. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + values := make([]viewsjson.DiagnosticExpressionValue, len(snippet.Values)) + copy(values, snippet.Values) + sort.Slice(values, func(i, j int) bool { + return values[i].Traversal < values[j].Traversal + }) + + fmt.Fprint(buf, color.Color(" [dark_gray]├────────────────[reset]\n")) + if callInfo := snippet.FunctionCall; callInfo != nil && callInfo.Signature != nil { + + fmt.Fprintf(buf, color.Color(" [dark_gray]│[reset] while calling [bold]%s[reset]("), callInfo.CalledAs) + for i, param := range callInfo.Signature.Params { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(param.Name) + } + if param := callInfo.Signature.VariadicParam; param != nil { + if len(callInfo.Signature.Params) > 0 { + buf.WriteString(", ") + } + buf.WriteString(param.Name) + buf.WriteString("...") + } + buf.WriteString(")\n") + } + for _, value := range values { + fmt.Fprintf(buf, color.Color(" [dark_gray]│[reset] [bold]%s[reset] %s\n"), value.Traversal, value.Statement) + } + } + } + + buf.WriteByte('\n') +} diff --git a/command/format/diagnostic_test.go b/command/format/diagnostic_test.go new file mode 100644 index 000000000000..5c270423536f --- /dev/null +++ b/command/format/diagnostic_test.go @@ -0,0 +1,945 @@ +package format + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + + viewsjson "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/lang/marks" + + "github.com/hashicorp/terraform/tfdiags" +) + +func TestDiagnostic(t *testing.T) { + + tests := map[string]struct { + Diag interface{} + Want string + }{ + "sourceless error": { + tfdiags.Sourceless( + tfdiags.Error, + "A sourceless error", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]A sourceless error[reset] +[red]│[reset] +[red]│[reset] It has no source references but it +[red]│[reset] does have a pretty long detail that +[red]│[reset] should wrap over multiple lines. +[red]╵[reset] +`, + }, + "sourceless warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "A sourceless warning", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + `[yellow]╷[reset] +[yellow]│[reset] [bold][yellow]Warning: [reset][bold]A sourceless warning[reset] +[yellow]│[reset] +[yellow]│[reset] It has no source references but it +[yellow]│[reset] does have a pretty long detail that +[yellow]│[reset] should wrap over multiple lines. +[yellow]╵[reset] +`, + }, + "error with source code subject": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and known expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah"), + }), + }, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] is "blah" +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and expression referring to sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah").Mark(marks.Sensitive), + }), + }, + }, + Extra: diagnosticCausedBySensitive(true), + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] has a sensitive value +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and unknown string expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.String), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] is a string, known only after apply +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and unknown expression of unknown type": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.DynamicPseudoType), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] will be known only after apply +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and function call annotation": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprLiteral(cty.True), + EvalContext: &hcl.EvalContext{ + Functions: map[string]function.Function{ + "beep": function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "pos_param_0", + Type: cty.String, + }, + { + Name: "pos_param_1", + Type: cty.Number, + }, + }, + VarParam: &function.Parameter{ + Name: "var_param", + Type: cty.Bool, + }, + }), + }, + }, + // This is simulating what the HCL function call expression + // type would generate on evaluation, by implementing the + // same interface it uses. + Extra: fakeDiagFunctionCallExtra("beep"), + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] while calling [bold]beep[reset](pos_param_0, pos_param_1, var_param...) +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + } + + sources := map[string][]byte{ + "test.tf": []byte(`test source code`), + } + + // This empty Colorize just passes through all of the formatting codes + // untouched, because it doesn't define any formatting keywords. + colorize := &colorstring.Colorize{} + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(test.Diag) // to normalize it into a tfdiag.Diagnostic + diag := diags[0] + got := strings.TrimSpace(Diagnostic(diag, sources, colorize, 40)) + want := strings.TrimSpace(test.Want) + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) + } + }) + } +} + +func TestDiagnosticPlain(t *testing.T) { + + tests := map[string]struct { + Diag interface{} + Want string + }{ + "sourceless error": { + tfdiags.Sourceless( + tfdiags.Error, + "A sourceless error", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + ` +Error: A sourceless error + +It has no source references but it does +have a pretty long detail that should +wrap over multiple lines. +`, + }, + "sourceless warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "A sourceless warning", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + ` +Warning: A sourceless warning + +It has no source references but it does +have a pretty long detail that should +wrap over multiple lines. +`, + }, + "error with source code subject": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + +Whatever shall we do? +`, + }, + "error with source code subject and known expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah"), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep is "blah" + +Whatever shall we do? +`, + }, + "error with source code subject and expression referring to sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah").Mark(marks.Sensitive), + }), + }, + }, + Extra: diagnosticCausedBySensitive(true), + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep has a sensitive value + +Whatever shall we do? +`, + }, + "error with source code subject and expression referring to sensitive value when not related to sensitivity": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah").Mark(marks.Sensitive), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + +Whatever shall we do? +`, + }, + "error with source code subject and unknown string expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.String), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep is a string, known only after apply + +Whatever shall we do? +`, + }, + "error with source code subject and unknown string expression when problem isn't unknown-related": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.String), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep is a string + +Whatever shall we do? +`, + }, + "error with source code subject and unknown expression of unknown type": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.DynamicPseudoType), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep will be known only after apply + +Whatever shall we do? +`, + }, + "error with source code subject and unknown expression of unknown type when problem isn't unknown-related": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.DynamicPseudoType), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + +Whatever shall we do? +`, + }, + } + + sources := map[string][]byte{ + "test.tf": []byte(`test source code`), + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(test.Diag) // to normalize it into a tfdiag.Diagnostic + diag := diags[0] + got := strings.TrimSpace(DiagnosticPlain(diag, sources, 40)) + want := strings.TrimSpace(test.Want) + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) + } + }) + } +} + +func TestDiagnosticWarningsCompact(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.SimpleWarning("foo")) + diags = diags.Append(tfdiags.SimpleWarning("foo")) + diags = diags.Append(tfdiags.SimpleWarning("bar")) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "source foo", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 5}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 5}, + }, + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "source foo", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 3, Column: 1, Byte: 7}, + End: hcl.Pos{Line: 3, Column: 1, Byte: 7}, + }, + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "source bar", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source2.tf", + Start: hcl.Pos{Line: 1, Column: 1, Byte: 1}, + End: hcl.Pos{Line: 1, Column: 1, Byte: 1}, + }, + }) + + // ConsolidateWarnings groups together the ones + // that have source location information and that + // have the same summary text. + diags = diags.ConsolidateWarnings(1) + + // A zero-value Colorize just passes all the formatting + // codes back to us, so we can test them literally. + got := DiagnosticWarningsCompact(diags, &colorstring.Colorize{}) + want := `[bold][yellow]Warnings:[reset] + +- foo +- foo +- bar +- source foo + on source.tf line 2 (and 1 more) +- source bar + on source2.tf line 1 +` + if got != want { + t.Errorf( + "wrong result\ngot:\n%s\n\nwant:\n%s\n\ndiff:\n%s", + got, want, cmp.Diff(want, got), + ) + } +} + +// Test case via https://github.com/hashicorp/terraform/issues/21359 +func TestDiagnostic_nonOverlappingHighlightContext(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Some error", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 1, Column: 5, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 5, Byte: 5}, + }, + Context: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 1, Column: 5, Byte: 5}, + End: hcl.Pos{Line: 4, Column: 2, Byte: 60}, + }, + }) + sources := map[string][]byte{ + "source.tf": []byte(`x = somefunc("testing", { + alpha = "foo" + beta = "bar" +}) +`), + } + color := &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: true, + Disable: true, + } + expected := `╷ +│ Error: Some error +│ +│ on source.tf line 1: +│ 1: x = somefunc("testing", { +│ 2: alpha = "foo" +│ 3: beta = "bar" +│ 4: }) +│ +│ ... +╵ +` + output := Diagnostic(diags[0], sources, color, 80) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnostic_emptyOverlapHighlightContext(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Some error", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 3, Column: 10, Byte: 38}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + Context: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 2, Column: 13, Byte: 27}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + }) + sources := map[string][]byte{ + "source.tf": []byte(`variable "x" { + default = { + "foo" + } +`), + } + color := &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: true, + Disable: true, + } + expected := `╷ +│ Error: Some error +│ +│ on source.tf line 3, in variable "x": +│ 2: default = { +│ 3: "foo" +│ 4: } +│ +│ ... +╵ +` + output := Diagnostic(diags[0], sources, color, 80) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnosticPlain_emptyOverlapHighlightContext(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Some error", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 3, Column: 10, Byte: 38}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + Context: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 2, Column: 13, Byte: 27}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + }) + sources := map[string][]byte{ + "source.tf": []byte(`variable "x" { + default = { + "foo" + } +`), + } + + expected := ` +Error: Some error + + on source.tf line 3, in variable "x": + 2: default = { + 3: "foo" + 4: } + +... +` + output := DiagnosticPlain(diags[0], sources, 80) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnostic_wrapDetailIncludingCommand(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Everything went wrong", + Detail: "This is a very long sentence about whatever went wrong which is supposed to wrap onto multiple lines. Thank-you very much for listening.\n\nTo fix this, run this very long command:\n terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces\n\nHere is a coda which is also long enough to wrap and so it should eventually make it onto multiple lines. THE END", + }) + color := &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: true, + Disable: true, + } + expected := `╷ +│ Error: Everything went wrong +│ +│ This is a very long sentence about whatever went wrong which is supposed +│ to wrap onto multiple lines. Thank-you very much for listening. +│ +│ To fix this, run this very long command: +│ terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces +│ +│ Here is a coda which is also long enough to wrap and so it should +│ eventually make it onto multiple lines. THE END +╵ +` + output := Diagnostic(diags[0], nil, color, 76) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnosticPlain_wrapDetailIncludingCommand(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Everything went wrong", + Detail: "This is a very long sentence about whatever went wrong which is supposed to wrap onto multiple lines. Thank-you very much for listening.\n\nTo fix this, run this very long command:\n terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces\n\nHere is a coda which is also long enough to wrap and so it should eventually make it onto multiple lines. THE END", + }) + + expected := ` +Error: Everything went wrong + +This is a very long sentence about whatever went wrong which is supposed to +wrap onto multiple lines. Thank-you very much for listening. + +To fix this, run this very long command: + terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces + +Here is a coda which is also long enough to wrap and so it should +eventually make it onto multiple lines. THE END +` + output := DiagnosticPlain(diags[0], nil, 76) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +// Test cases covering invalid JSON diagnostics which should still render +// correctly. These JSON diagnostic values cannot be generated from the +// json.NewDiagnostic code path, but we may read and display JSON diagnostics +// in future from other sources. +func TestDiagnosticFromJSON_invalid(t *testing.T) { + tests := map[string]struct { + Diag *viewsjson.Diagnostic + Want string + }{ + "zero-value end range and highlight end byte": { + &viewsjson.Diagnostic{ + Severity: viewsjson.DiagnosticSeverityError, + Summary: "Bad end", + Detail: "It all went wrong.", + Range: &viewsjson.DiagnosticRange{ + Filename: "ohno.tf", + Start: viewsjson.Pos{Line: 1, Column: 23, Byte: 22}, + End: viewsjson.Pos{Line: 0, Column: 0, Byte: 0}, + }, + Snippet: &viewsjson.DiagnosticSnippet{ + Code: `resource "foo_bar "baz" {`, + StartLine: 1, + HighlightStartOffset: 22, + HighlightEndOffset: 0, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad end[reset] +[red]│[reset] +[red]│[reset] on ohno.tf line 1: +[red]│[reset] 1: resource "foo_bar "baz[underline]"[reset] { +[red]│[reset] +[red]│[reset] It all went wrong. +[red]╵[reset] +`, + }, + } + + // This empty Colorize just passes through all of the formatting codes + // untouched, because it doesn't define any formatting keywords. + colorize := &colorstring.Colorize{} + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := strings.TrimSpace(DiagnosticFromJSON(test.Diag, colorize, 40)) + want := strings.TrimSpace(test.Want) + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) + } + }) + } +} + +// fakeDiagFunctionCallExtra is a fake implementation of the interface that +// HCL uses to provide "extra information" associated with diagnostics that +// describe errors during a function call. +type fakeDiagFunctionCallExtra string + +var _ hclsyntax.FunctionCallDiagExtra = fakeDiagFunctionCallExtra("") + +func (e fakeDiagFunctionCallExtra) CalledFunctionName() string { + return string(e) +} + +func (e fakeDiagFunctionCallExtra) FunctionCallError() error { + return nil +} + +// diagnosticCausedByUnknown is a testing helper for exercising our logic +// for selectively showing unknown values alongside our source snippets for +// diagnostics that are explicitly marked as being caused by unknown values. +type diagnosticCausedByUnknown bool + +var _ tfdiags.DiagnosticExtraBecauseUnknown = diagnosticCausedByUnknown(true) + +func (e diagnosticCausedByUnknown) DiagnosticCausedByUnknown() bool { + return bool(e) +} + +// diagnosticCausedBySensitive is a testing helper for exercising our logic +// for selectively showing sensitive values alongside our source snippets for +// diagnostics that are explicitly marked as being caused by sensitive values. +type diagnosticCausedBySensitive bool + +var _ tfdiags.DiagnosticExtraBecauseSensitive = diagnosticCausedBySensitive(true) + +func (e diagnosticCausedBySensitive) DiagnosticCausedBySensitive() bool { + return bool(e) +} diff --git a/command/format/diff.go b/command/format/diff.go new file mode 100644 index 000000000000..bfa4c5d130af --- /dev/null +++ b/command/format/diff.go @@ -0,0 +1,2061 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "log" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/states" +) + +// DiffLanguage controls the description of the resource change reasons. +type DiffLanguage rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=DiffLanguage diff.go + +const ( + // DiffLanguageProposedChange indicates that the change is one which is + // planned to be applied. + DiffLanguageProposedChange DiffLanguage = 'P' + + // DiffLanguageDetectedDrift indicates that the change is detected drift + // from the configuration. + DiffLanguageDetectedDrift DiffLanguage = 'D' +) + +// ResourceChange returns a string representation of a change to a particular +// resource, for inclusion in user-facing plan output. +// +// The resource schema must be provided along with the change so that the +// formatted change can reflect the configuration structure for the associated +// resource. +// +// If "color" is non-nil, it will be used to color the result. Otherwise, +// no color codes will be included. +func ResourceChange( + change *plans.ResourceInstanceChange, + schema *configschema.Block, + color *colorstring.Colorize, + language DiffLanguage, +) string { + addr := change.Addr + var buf bytes.Buffer + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + Reset: false, + } + } + + dispAddr := addr.String() + if change.DeposedKey != states.NotDeposed { + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) + } + + switch change.Action { + case plans.Create: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be created"), dispAddr)) + case plans.Read: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be read during apply"), dispAddr)) + switch change.ActionReason { + case plans.ResourceInstanceReadBecauseConfigUnknown: + buf.WriteString("\n # (config refers to values not yet known)") + case plans.ResourceInstanceReadBecauseDependencyPending: + buf.WriteString("\n # (depends on a resource or a module with changes pending)") + } + case plans.Update: + switch language { + case DiffLanguageProposedChange: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be updated in-place"), dispAddr)) + case DiffLanguageDetectedDrift: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] has changed"), dispAddr)) + default: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] update (unknown reason %s)"), dispAddr, language)) + } + case plans.CreateThenDelete, plans.DeleteThenCreate: + switch change.ActionReason { + case plans.ResourceInstanceReplaceBecauseTainted: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] is tainted, so must be [bold][red]replaced"), dispAddr)) + case plans.ResourceInstanceReplaceByRequest: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be [bold][red]replaced[reset], as requested"), dispAddr)) + case plans.ResourceInstanceReplaceByTriggers: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be [bold][red]replaced[reset] due to changes in replace_triggered_by"), dispAddr)) + default: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] must be [bold][red]replaced"), dispAddr)) + } + case plans.Delete: + switch language { + case DiffLanguageProposedChange: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be [bold][red]destroyed"), dispAddr)) + case DiffLanguageDetectedDrift: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] has been deleted"), dispAddr)) + default: + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] delete (unknown reason %s)"), dispAddr, language)) + } + // We can sometimes give some additional detail about why we're + // proposing to delete. We show this as additional notes, rather than + // as additional wording in the main action statement, in an attempt + // to make the "will be destroyed" message prominent and consistent + // in all cases, for easier scanning of this often-risky action. + switch change.ActionReason { + case plans.ResourceInstanceDeleteBecauseNoResourceConfig: + buf.WriteString(fmt.Sprintf("\n # (because %s is not in configuration)", addr.Resource.Resource)) + case plans.ResourceInstanceDeleteBecauseNoMoveTarget: + buf.WriteString(fmt.Sprintf("\n # (because %s was moved to %s, which is not in configuration)", change.PrevRunAddr, addr.Resource.Resource)) + case plans.ResourceInstanceDeleteBecauseNoModule: + // FIXME: Ideally we'd truncate addr.Module to reflect the earliest + // step that doesn't exist, so it's clearer which call this refers + // to, but we don't have enough information out here in the UI layer + // to decide that; only the "expander" in Terraform Core knows + // which module instance keys are actually declared. + buf.WriteString(fmt.Sprintf("\n # (because %s is not in configuration)", addr.Module)) + case plans.ResourceInstanceDeleteBecauseWrongRepetition: + // We have some different variations of this one + switch addr.Resource.Key.(type) { + case nil: + buf.WriteString("\n # (because resource uses count or for_each)") + case addrs.IntKey: + buf.WriteString("\n # (because resource does not use count)") + case addrs.StringKey: + buf.WriteString("\n # (because resource does not use for_each)") + } + case plans.ResourceInstanceDeleteBecauseCountIndex: + buf.WriteString(fmt.Sprintf("\n # (because index %s is out of range for count)", addr.Resource.Key)) + case plans.ResourceInstanceDeleteBecauseEachKey: + buf.WriteString(fmt.Sprintf("\n # (because key %s is not in for_each map)", addr.Resource.Key)) + } + if change.DeposedKey != states.NotDeposed { + // Some extra context about this unusual situation. + buf.WriteString(color.Color("\n # (left over from a partially-failed replacement of this instance)")) + } + case plans.NoOp: + if change.Moved() { + buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] has moved to [bold]%s[reset]"), change.PrevRunAddr.String(), dispAddr)) + break + } + fallthrough + default: + // should never happen, since the above is exhaustive + buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) + } + buf.WriteString(color.Color("[reset]\n")) + + if change.Moved() && change.Action != plans.NoOp { + buf.WriteString(fmt.Sprintf(color.Color(" # [reset](moved from %s)\n"), change.PrevRunAddr.String())) + } + + if change.Moved() && change.Action == plans.NoOp { + buf.WriteString(" ") + } else { + buf.WriteString(color.Color(DiffActionSymbol(change.Action)) + " ") + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + buf.WriteString(fmt.Sprintf( + "resource %q %q", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + case addrs.DataResourceMode: + buf.WriteString(fmt.Sprintf( + "data %q %q", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + default: + // should never happen, since the above is exhaustive + buf.WriteString(addr.String()) + } + + buf.WriteString(" {") + + p := blockBodyDiffPrinter{ + buf: &buf, + color: color, + action: change.Action, + requiredReplace: change.RequiredReplace, + } + + // Most commonly-used resources have nested blocks that result in us + // going at least three traversals deep while we recurse here, so we'll + // start with that much capacity and then grow as needed for deeper + // structures. + path := make(cty.Path, 0, 3) + + result := p.writeBlockBodyDiff(schema, change.Before, change.After, 6, path) + if result.bodyWritten { + buf.WriteString("\n") + buf.WriteString(strings.Repeat(" ", 4)) + } + buf.WriteString("}\n") + + return buf.String() +} + +// OutputChanges returns a string representation of a set of changes to output +// values for inclusion in user-facing plan output. +// +// If "color" is non-nil, it will be used to color the result. Otherwise, +// no color codes will be included. +func OutputChanges( + changes []*plans.OutputChangeSrc, + color *colorstring.Colorize, +) string { + var buf bytes.Buffer + p := blockBodyDiffPrinter{ + buf: &buf, + color: color, + action: plans.Update, // not actually used in this case, because we're not printing a containing block + } + + // We're going to reuse the codepath we used for printing resource block + // diffs, by pretending that the set of defined outputs are the attributes + // of some resource. It's a little forced to do this, but it gives us all + // the same formatting heuristics as we normally use for resource + // attributes. + oldVals := make(map[string]cty.Value, len(changes)) + newVals := make(map[string]cty.Value, len(changes)) + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute, len(changes)), + } + for _, changeSrc := range changes { + name := changeSrc.Addr.OutputValue.Name + change, err := changeSrc.Decode() + if err != nil { + // It'd be weird to get a decoding error here because that would + // suggest that Terraform itself just produced an invalid plan, and + // we don't have any good way to ignore it in this codepath, so + // we'll just log it and ignore it. + log.Printf("[ERROR] format.OutputChanges: Failed to decode planned change for output %q: %s", name, err) + continue + } + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: cty.DynamicPseudoType, // output types are decided dynamically based on the given value + Optional: true, + Sensitive: change.Sensitive, + } + oldVals[name] = change.Before + newVals[name] = change.After + } + + p.writeBlockBodyDiff(synthSchema, cty.ObjectVal(oldVals), cty.ObjectVal(newVals), 2, nil) + + return buf.String() +} + +type blockBodyDiffPrinter struct { + buf *bytes.Buffer + color *colorstring.Colorize + action plans.Action + requiredReplace cty.PathSet + // verbose is set to true when using the "diff" printer to format state + verbose bool +} + +type blockBodyDiffResult struct { + bodyWritten bool + skippedAttributes int + skippedBlocks int +} + +const ( + forcesNewResourceCaption = " [red]# forces replacement[reset]" + sensitiveCaption = "(sensitive value)" +) + +// writeBlockBodyDiff writes attribute or block differences +// and returns true if any differences were found and written +func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) blockBodyDiffResult { + path = ctyEnsurePathCapacity(path, 1) + result := blockBodyDiffResult{} + + // write the attributes diff + blankBeforeBlocks := p.writeAttrsDiff(schema.Attributes, old, new, indent, path, &result) + p.writeSkippedAttr(result.skippedAttributes, indent+2) + + { + blockTypeNames := make([]string, 0, len(schema.BlockTypes)) + for name := range schema.BlockTypes { + blockTypeNames = append(blockTypeNames, name) + } + sort.Strings(blockTypeNames) + + for _, name := range blockTypeNames { + blockS := schema.BlockTypes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + result.bodyWritten = true + skippedBlocks := p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) + if skippedBlocks > 0 { + result.skippedBlocks += skippedBlocks + } + + // Always include a blank for any subsequent block types. + blankBeforeBlocks = true + } + if result.skippedBlocks > 0 { + noun := "blocks" + if result.skippedBlocks == 1 { + noun = "block" + } + p.buf.WriteString("\n\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), result.skippedBlocks, noun)) + } + } + + return result +} + +func (p *blockBodyDiffPrinter) writeAttrsDiff( + attrsS map[string]*configschema.Attribute, + old, new cty.Value, + indent int, + path cty.Path, + result *blockBodyDiffResult) bool { + + attrNames := make([]string, 0, len(attrsS)) + displayAttrNames := make(map[string]string, len(attrsS)) + attrNameLen := 0 + for name := range attrsS { + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + if oldVal.IsNull() && newVal.IsNull() { + // Skip attributes where both old and new values are null + // (we do this early here so that we'll do our value alignment + // based on the longest attribute name that has a change, rather + // than the longest attribute name in the full set.) + continue + } + + attrNames = append(attrNames, name) + displayAttrNames[name] = displayAttributeName(name) + if len(displayAttrNames[name]) > attrNameLen { + attrNameLen = len(displayAttrNames[name]) + } + } + sort.Strings(attrNames) + if len(attrNames) == 0 { + return false + } + + for _, name := range attrNames { + attrS := attrsS[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + result.bodyWritten = true + skipped := p.writeAttrDiff(displayAttrNames[name], attrS, oldVal, newVal, attrNameLen, indent, path) + if skipped { + result.skippedAttributes++ + } + } + + return true +} + +// getPlanActionAndShow returns the action value +// and a boolean for showJustNew. In this function we +// modify the old and new values to remove any possible marks +func getPlanActionAndShow(old cty.Value, new cty.Value) (plans.Action, bool) { + var action plans.Action + showJustNew := false + switch { + case old.IsNull(): + action = plans.Create + showJustNew = true + case new.IsNull(): + action = plans.Delete + case ctyEqualWithUnknown(old, new): + action = plans.NoOp + showJustNew = true + default: + action = plans.Update + } + return action, showJustNew +} + +func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) bool { + path = append(path, cty.GetAttrStep{Name: name}) + action, showJustNew := getPlanActionAndShow(old, new) + + if action == plans.NoOp && !p.verbose && !identifyingAttribute(name, attrS) { + return true + } + + if attrS.NestedType != nil { + p.writeNestedAttrDiff(name, attrS, old, new, nameLen, indent, path, action, showJustNew) + return false + } + + p.buf.WriteString("\n") + + p.writeSensitivityWarning(old, new, indent, action, false) + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.writeActionSymbol(action) + + p.buf.WriteString(p.color.Color("[bold]")) + p.buf.WriteString(name) + p.buf.WriteString(p.color.Color("[reset]")) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) + p.buf.WriteString(" = ") + + if attrS.Sensitive { + p.buf.WriteString(sensitiveCaption) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + } else { + switch { + case showJustNew: + p.writeValue(new, action, indent+2) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + default: + // We show new even if it is null to emphasize the fact + // that it is being unset, since otherwise it is easy to + // misunderstand that the value is still set to the old value. + p.writeValueDiff(old, new, indent+2, path) + } + } + + return false +} + +// writeNestedAttrDiff is responsible for formatting Attributes with NestedTypes +// in the diff. +func (p *blockBodyDiffPrinter) writeNestedAttrDiff( + name string, attrWithNestedS *configschema.Attribute, old, new cty.Value, + nameLen, indent int, path cty.Path, action plans.Action, showJustNew bool) { + + objS := attrWithNestedS.NestedType + + p.buf.WriteString("\n") + p.writeSensitivityWarning(old, new, indent, action, false) + p.buf.WriteString(strings.Repeat(" ", indent)) + p.writeActionSymbol(action) + + p.buf.WriteString(p.color.Color("[bold]")) + p.buf.WriteString(name) + p.buf.WriteString(p.color.Color("[reset]")) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) + + // Then schema of the attribute itself can be marked sensitive, or the values assigned + sensitive := attrWithNestedS.Sensitive || old.HasMark(marks.Sensitive) || new.HasMark(marks.Sensitive) + if sensitive { + p.buf.WriteString(" = ") + p.buf.WriteString(sensitiveCaption) + + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + return + } + + result := &blockBodyDiffResult{} + switch objS.Nesting { + case configschema.NestingSingle: + p.buf.WriteString(" = {") + if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.writeAttrsDiff(objS.Attributes, old, new, indent+4, path, result) + p.writeSkippedAttr(result.skippedAttributes, indent+6) + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.buf.WriteString("}") + + if !new.IsKnown() { + p.buf.WriteString(" -> (known after apply)") + } else if new.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray] -> null[reset]")) + } + + case configschema.NestingList: + p.buf.WriteString(" = [") + if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + // Here we intentionally preserve the index-based correspondance + // between old and new, rather than trying to detect insertions + // and removals in the list, because this more accurately reflects + // how Terraform Core and providers will understand the change, + // particularly when the nested block contains computed attributes + // that will themselves maintain correspondance by index. + + // commonLen is number of elements that exist in both lists, which + // will be presented as updates (~). Any additional items in one + // of the lists will be presented as either creates (+) or deletes (-) + // depending on which list they belong to. maxLen is the number of + // elements in that longer list. + var commonLen int + var maxLen int + // unchanged is the number of unchanged elements + var unchanged int + + switch { + case len(oldItems) < len(newItems): + commonLen = len(oldItems) + maxLen = len(newItems) + default: + commonLen = len(newItems) + maxLen = len(oldItems) + } + for i := 0; i < maxLen; i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + + var action plans.Action + var oldItem, newItem cty.Value + switch { + case i < commonLen: + oldItem = oldItems[i] + newItem = newItems[i] + if oldItem.RawEquals(newItem) { + action = plans.NoOp + unchanged++ + } else { + action = plans.Update + } + case i < len(oldItems): + oldItem = oldItems[i] + newItem = cty.NullVal(oldItem.Type()) + action = plans.Delete + case i < len(newItems): + newItem = newItems[i] + oldItem = cty.NullVal(newItem.Type()) + action = plans.Create + default: + action = plans.NoOp + } + + if action != plans.NoOp { + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeActionSymbol(action) + p.buf.WriteString("{") + + result := &blockBodyDiffResult{} + p.writeAttrsDiff(objS.Attributes, oldItem, newItem, indent+8, path, result) + if action == plans.Update { + p.writeSkippedAttr(result.skippedAttributes, indent+10) + } + p.buf.WriteString("\n") + + p.buf.WriteString(strings.Repeat(" ", indent+6)) + p.buf.WriteString("},\n") + } + } + p.writeSkippedElems(unchanged, indent+6) + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.buf.WriteString("]") + + if !new.IsKnown() { + p.buf.WriteString(" -> (known after apply)") + } else if new.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray] -> null[reset]")) + } + + case configschema.NestingSet: + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + var all cty.Value + if len(oldItems)+len(newItems) > 0 { + allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) + allItems = append(allItems, oldItems...) + allItems = append(allItems, newItems...) + + all = cty.SetVal(allItems) + } else { + all = cty.SetValEmpty(old.Type().ElementType()) + } + + p.buf.WriteString(" = [") + + var unchanged int + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + var action plans.Action + var oldValue, newValue cty.Value + switch { + case !val.IsKnown(): + action = plans.Update + newValue = val + case !new.IsKnown(): + action = plans.Delete + // the value must have come from the old set + oldValue = val + // Mark the new val as null, but the entire set will be + // displayed as "(unknown after apply)" + newValue = cty.NullVal(val.Type()) + case old.IsNull() || !old.HasElement(val).True(): + action = plans.Create + oldValue = cty.NullVal(val.Type()) + newValue = val + case new.IsNull() || !new.HasElement(val).True(): + action = plans.Delete + oldValue = val + newValue = cty.NullVal(val.Type()) + default: + action = plans.NoOp + oldValue = val + newValue = val + } + + if action == plans.NoOp { + unchanged++ + continue + } + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeActionSymbol(action) + p.buf.WriteString("{") + + if p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1]) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + + path := append(path, cty.IndexStep{Key: val}) + p.writeAttrsDiff(objS.Attributes, oldValue, newValue, indent+8, path, result) + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+6)) + p.buf.WriteString("},") + } + p.buf.WriteString("\n") + p.writeSkippedElems(unchanged, indent+6) + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.buf.WriteString("]") + + if !new.IsKnown() { + p.buf.WriteString(" -> (known after apply)") + } else if new.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray] -> null[reset]")) + } + + case configschema.NestingMap: + // For the sake of handling nested blocks, we'll treat a null map + // the same as an empty map since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockMapAsEmpty(old) + new = ctyNullBlockMapAsEmpty(new) + + oldItems := old.AsValueMap() + + newItems := map[string]cty.Value{} + + if new.IsKnown() { + newItems = new.AsValueMap() + } + + allKeys := make(map[string]bool) + for k := range oldItems { + allKeys[k] = true + } + for k := range newItems { + allKeys[k] = true + } + allKeysOrder := make([]string, 0, len(allKeys)) + for k := range allKeys { + allKeysOrder = append(allKeysOrder, k) + } + sort.Strings(allKeysOrder) + + p.buf.WriteString(" = {\n") + + // unchanged tracks the number of unchanged elements + unchanged := 0 + for _, k := range allKeysOrder { + var action plans.Action + oldValue := oldItems[k] + + newValue := newItems[k] + switch { + case oldValue == cty.NilVal: + oldValue = cty.NullVal(newValue.Type()) + action = plans.Create + case newValue == cty.NilVal: + newValue = cty.NullVal(oldValue.Type()) + action = plans.Delete + case !newValue.RawEquals(oldValue): + action = plans.Update + default: + action = plans.NoOp + unchanged++ + } + + if action != plans.NoOp { + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeActionSymbol(action) + fmt.Fprintf(p.buf, "%q = {", k) + if p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1]) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + + path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) + p.writeAttrsDiff(objS.Attributes, oldValue, newValue, indent+8, path, result) + p.writeSkippedAttr(result.skippedAttributes, indent+10) + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+6)) + p.buf.WriteString("},\n") + } + } + + p.writeSkippedElems(unchanged, indent+6) + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.buf.WriteString("}") + if !new.IsKnown() { + p.buf.WriteString(" -> (known after apply)") + } else if new.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray] -> null[reset]")) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) int { + skippedBlocks := 0 + path = append(path, cty.GetAttrStep{Name: name}) + if old.IsNull() && new.IsNull() { + // Nothing to do if both old and new is null + return skippedBlocks + } + + // If either the old or the new value is marked, + // Display a special diff because it is irrelevant + // to list all obfuscated attributes as (sensitive value) + if old.HasMark(marks.Sensitive) || new.HasMark(marks.Sensitive) { + p.writeSensitiveNestedBlockDiff(name, old, new, indent, blankBefore, path) + return 0 + } + + // Where old/new are collections representing a nesting mode other than + // NestingSingle, we assume the collection value can never be unknown + // since we always produce the container for the nested objects, even if + // the objects within are computed. + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + var action plans.Action + eqV := new.Equals(old) + switch { + case old.IsNull(): + action = plans.Create + case new.IsNull(): + action = plans.Delete + case !new.IsWhollyKnown() || !old.IsWhollyKnown(): + // "old" should actually always be known due to our contract + // that old values must never be unknown, but we'll allow it + // anyway to be robust. + action = plans.Update + case !eqV.IsKnown() || !eqV.True(): + action = plans.Update + } + + skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, blankBefore, path) + if skipped { + return 1 + } + case configschema.NestingList: + // For the sake of handling nested blocks, we'll treat a null list + // the same as an empty list since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockListAsEmpty(old) + new = ctyNullBlockListAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + // Here we intentionally preserve the index-based correspondance + // between old and new, rather than trying to detect insertions + // and removals in the list, because this more accurately reflects + // how Terraform Core and providers will understand the change, + // particularly when the nested block contains computed attributes + // that will themselves maintain correspondance by index. + + // commonLen is number of elements that exist in both lists, which + // will be presented as updates (~). Any additional items in one + // of the lists will be presented as either creates (+) or deletes (-) + // depending on which list they belong to. + var commonLen int + switch { + case len(oldItems) < len(newItems): + commonLen = len(oldItems) + default: + commonLen = len(newItems) + } + + blankBeforeInner := blankBefore + for i := 0; i < commonLen; i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := newItems[i] + action := plans.Update + if oldItem.RawEquals(newItem) { + action = plans.NoOp + } + skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, blankBeforeInner, path) + if skipped { + skippedBlocks++ + } else { + blankBeforeInner = false + } + } + for i := commonLen; i < len(oldItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := cty.NullVal(oldItem.Type()) + skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, blankBeforeInner, path) + if skipped { + skippedBlocks++ + } else { + blankBeforeInner = false + } + } + for i := commonLen; i < len(newItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + newItem := newItems[i] + oldItem := cty.NullVal(newItem.Type()) + skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, blankBeforeInner, path) + if skipped { + skippedBlocks++ + } else { + blankBeforeInner = false + } + } + case configschema.NestingSet: + // For the sake of handling nested blocks, we'll treat a null set + // the same as an empty set since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockSetAsEmpty(old) + new = ctyNullBlockSetAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both sets are empty + return 0 + } + + allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) + allItems = append(allItems, oldItems...) + allItems = append(allItems, newItems...) + all := cty.SetVal(allItems) + + blankBeforeInner := blankBefore + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + var action plans.Action + var oldValue, newValue cty.Value + switch { + case !val.IsKnown(): + action = plans.Update + newValue = val + case !old.HasElement(val).True(): + action = plans.Create + oldValue = cty.NullVal(val.Type()) + newValue = val + case !new.HasElement(val).True(): + action = plans.Delete + oldValue = val + newValue = cty.NullVal(val.Type()) + default: + action = plans.NoOp + oldValue = val + newValue = val + } + path := append(path, cty.IndexStep{Key: val}) + skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, blankBeforeInner, path) + if skipped { + skippedBlocks++ + } else { + blankBeforeInner = false + } + } + + case configschema.NestingMap: + // For the sake of handling nested blocks, we'll treat a null map + // the same as an empty map since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockMapAsEmpty(old) + new = ctyNullBlockMapAsEmpty(new) + + oldItems := old.AsValueMap() + newItems := new.AsValueMap() + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both maps are empty + return 0 + } + + allKeys := make(map[string]bool) + for k := range oldItems { + allKeys[k] = true + } + for k := range newItems { + allKeys[k] = true + } + allKeysOrder := make([]string, 0, len(allKeys)) + for k := range allKeys { + allKeysOrder = append(allKeysOrder, k) + } + sort.Strings(allKeysOrder) + + blankBeforeInner := blankBefore + for _, k := range allKeysOrder { + var action plans.Action + oldValue := oldItems[k] + newValue := newItems[k] + switch { + case oldValue == cty.NilVal: + oldValue = cty.NullVal(newValue.Type()) + action = plans.Create + case newValue == cty.NilVal: + newValue = cty.NullVal(oldValue.Type()) + action = plans.Delete + case !newValue.RawEquals(oldValue): + action = plans.Update + default: + action = plans.NoOp + } + + path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) + skipped := p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, blankBeforeInner, path) + if skipped { + skippedBlocks++ + } else { + blankBeforeInner = false + } + } + } + return skippedBlocks +} + +func (p *blockBodyDiffPrinter) writeSensitiveNestedBlockDiff(name string, old, new cty.Value, indent int, blankBefore bool, path cty.Path) { + var action plans.Action + switch { + case old.IsNull(): + action = plans.Create + case new.IsNull(): + action = plans.Delete + case !new.IsWhollyKnown() || !old.IsWhollyKnown(): + // "old" should actually always be known due to our contract + // that old values must never be unknown, but we'll allow it + // anyway to be robust. + action = plans.Update + case !ctyEqualValueAndMarks(old, new): + action = plans.Update + } + + if blankBefore { + p.buf.WriteRune('\n') + } + + // New line before warning printing + p.buf.WriteRune('\n') + p.writeSensitivityWarning(old, new, indent, action, true) + p.buf.WriteString(strings.Repeat(" ", indent)) + p.writeActionSymbol(action) + fmt.Fprintf(p.buf, "%s {", name) + if action != plans.NoOp && p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteRune('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.buf.WriteString("# At least one attribute in this block is (or was) sensitive,\n") + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.buf.WriteString("# so its contents will not be displayed.") + p.buf.WriteRune('\n') + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.buf.WriteString("}") +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, blankBefore bool, path cty.Path) bool { + if action == plans.NoOp && !p.verbose { + return true + } + + if blankBefore { + p.buf.WriteRune('\n') + } + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + p.writeActionSymbol(action) + + if label != nil { + fmt.Fprintf(p.buf, "%s %q {", name, *label) + } else { + fmt.Fprintf(p.buf, "%s {", name) + } + + if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + + result := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) + if result.bodyWritten { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + } + p.buf.WriteString("}") + + return false +} + +func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { + // Could check specifically for the sensitivity marker + if val.HasMark(marks.Sensitive) { + p.buf.WriteString(sensitiveCaption) + return + } + + if !val.IsKnown() { + p.buf.WriteString("(known after apply)") + return + } + if val.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) + return + } + + ty := val.Type() + + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + { + // Special behavior for JSON strings containing array or object + src := []byte(val.AsString()) + ty, err := ctyjson.ImpliedType(src) + // check for the special case of "null", which decodes to nil, + // and just allow it to be printed out directly + if err == nil && !ty.IsPrimitiveType() && strings.TrimSpace(val.AsString()) != "null" { + jv, err := ctyjson.Unmarshal(src, ty) + if err == nil { + p.buf.WriteString("jsonencode(") + if jv.LengthInt() == 0 { + p.writeValue(jv, action, 0) + } else { + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(jv, action, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteByte(')') + break // don't *also* do the normal behavior below + } + } + } + + if strings.Contains(val.AsString(), "\n") { + // It's a multi-line string, so we want to use the multi-line + // rendering so it'll be readable. Rather than re-implement + // that here, we'll just re-use the multi-line string diff + // printer with no changes, which ends up producing the + // result we want here. + // The path argument is nil because we don't track path + // information into strings and we know that a string can't + // have any indices or attributes that might need to be marked + // as (requires replacement), which is what that argument is for. + p.writeValueDiff(val, val, indent, nil) + break + } + + fmt.Fprintf(p.buf, "%q", val.AsString()) + case cty.Bool: + if val.True() { + p.buf.WriteString("true") + } else { + p.buf.WriteString("false") + } + case cty.Number: + bf := val.AsBigFloat() + p.buf.WriteString(bf.Text('f', -1)) + default: + // should never happen, since the above is exhaustive + fmt.Fprintf(p.buf, "%#v", val) + } + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + p.buf.WriteString("[") + + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",") + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("]") + case ty.IsMapType(): + p.buf.WriteString("{") + + keyLen := 0 + for it := val.ElementIterator(); it.Next(); { + key, _ := it.Element() + if keyStr := key.AsString(); len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + for it := val.ElementIterator(); it.Next(); { + key, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(key, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + case ty.IsObjectType(): + p.buf.WriteString("{") + + atys := ty.AttributeTypes() + attrNames := make([]string, 0, len(atys)) + displayAttrNames := make(map[string]string, len(atys)) + nameLen := 0 + for attrName := range atys { + attrNames = append(attrNames, attrName) + displayAttrNames[attrName] = displayAttributeName(attrName) + if len(displayAttrNames[attrName]) > nameLen { + nameLen = len(displayAttrNames[attrName]) + } + } + sort.Strings(attrNames) + + for _, attrName := range attrNames { + val := val.GetAttr(attrName) + displayAttrName := displayAttrNames[attrName] + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.buf.WriteString(displayAttrName) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(displayAttrName))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if len(attrNames) > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + } +} + +func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { + ty := old.Type() + typesEqual := ctyTypesEqual(ty, new.Type()) + + // We have some specialized diff implementations for certain complex + // values where it's useful to see a visualization of the diff of + // the nested elements rather than just showing the entire old and + // new values verbatim. + // However, these specialized implementations can apply only if both + // values are known and non-null. + if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { + if old.HasMark(marks.Sensitive) || new.HasMark(marks.Sensitive) { + p.buf.WriteString(sensitiveCaption) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + return + } + + switch { + case ty == cty.String: + // We have special behavior for both multi-line strings in general + // and for strings that can parse as JSON. For the JSON handling + // to apply, both old and new must be valid JSON. + // For single-line strings that don't parse as JSON we just fall + // out of this switch block and do the default old -> new rendering. + oldS := old.AsString() + newS := new.AsString() + + { + // Special behavior for JSON strings containing object or + // list values. + oldBytes := []byte(oldS) + newBytes := []byte(newS) + oldType, oldErr := ctyjson.ImpliedType(oldBytes) + newType, newErr := ctyjson.ImpliedType(newBytes) + if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { + oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) + newJV, newErr := ctyjson.Unmarshal(newBytes, newType) + if oldErr == nil && newErr == nil { + if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace + p.buf.WriteString("jsonencode(") + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(plans.Update) + p.writeValueDiff(oldJV, newJV, indent+4, path) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } else { + // if they differ only in insignificant whitespace + // then we'll note that but still expand out the + // effective value. + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) + } else { + p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) + } + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(oldJV, plans.NoOp, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } + return + } + } + } + + if !strings.Contains(oldS, "\n") && !strings.Contains(newS, "\n") { + break + } + + p.buf.WriteString("<<-EOT") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var oldLines, newLines []cty.Value + { + r := strings.NewReader(oldS) + sc := bufio.NewScanner(r) + for sc.Scan() { + oldLines = append(oldLines, cty.StringVal(sc.Text())) + } + } + { + r := strings.NewReader(newS) + sc := bufio.NewScanner(r) + for sc.Scan() { + newLines = append(newLines, cty.StringVal(sc.Text())) + } + } + + // Optimization for strings which are exactly equal: just print + // directly without calculating the sequence diff. This makes a + // significant difference when this code path is reached via a + // writeValue call with a large multi-line string. + if oldS == newS { + for _, line := range newLines { + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.buf.WriteString(line.AsString()) + p.buf.WriteString("\n") + } + } else { + diffLines := ctySequenceDiff(oldLines, newLines) + for _, diffLine := range diffLines { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(diffLine.Action) + + switch diffLine.Action { + case plans.NoOp, plans.Delete: + p.buf.WriteString(diffLine.Before.AsString()) + case plans.Create: + p.buf.WriteString(diffLine.After.AsString()) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return for strings + p.buf.WriteString(diffLine.After.AsString()) + + } + p.buf.WriteString("\n") + } + } + + p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol + p.buf.WriteString("EOT") + + return + + case ty.IsSetType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var addedVals, removedVals, allVals []cty.Value + for it := old.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if new.HasElement(val).False() { + removedVals = append(removedVals, val) + } + } + for it := new.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if val.IsKnown() && old.HasElement(val).False() { + addedVals = append(addedVals, val) + } + } + + var all, added, removed cty.Value + if len(allVals) > 0 { + all = cty.SetVal(allVals) + } else { + all = cty.SetValEmpty(ty.ElementType()) + } + if len(addedVals) > 0 { + added = cty.SetVal(addedVals) + } else { + added = cty.SetValEmpty(ty.ElementType()) + } + if len(removedVals) > 0 { + removed = cty.SetVal(removedVals) + } else { + removed = cty.SetValEmpty(ty.ElementType()) + } + + suppressedElements := 0 + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + + var action plans.Action + switch { + case !val.IsKnown(): + action = plans.Update + case added.HasElement(val).True(): + action = plans.Create + case removed.HasElement(val).True(): + action = plans.Delete + default: + action = plans.NoOp + } + + if action == plans.NoOp && !p.verbose { + suppressedElements++ + continue + } + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",\n") + } + + if suppressedElements > 0 { + p.writeActionSymbol(plans.NoOp) + p.buf.WriteString(strings.Repeat(" ", indent+2)) + noun := "elements" + if suppressedElements == 1 { + noun = "element" + } + p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), suppressedElements, noun)) + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + case ty.IsListType() || ty.IsTupleType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) + + // Maintain a stack of suppressed lines in the diff for later + // display or elision + var suppressedElements []*plans.Change + var changeShown bool + + for i := 0; i < len(elemDiffs); i++ { + if !p.verbose { + for i < len(elemDiffs) && elemDiffs[i].Action == plans.NoOp { + suppressedElements = append(suppressedElements, elemDiffs[i]) + i++ + } + } + + // If we have some suppressed elements on the stack… + if len(suppressedElements) > 0 { + // If we've just rendered a change, display the first + // element in the stack as context + if changeShown { + elemDiff := suppressedElements[0] + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + p.buf.WriteString(",\n") + suppressedElements = suppressedElements[1:] + } + + hidden := len(suppressedElements) + + // If we're not yet at the end of the list, capture the + // last element on the stack as context for the upcoming + // change to be rendered + var nextContextDiff *plans.Change + if hidden > 0 && i < len(elemDiffs) { + hidden-- + nextContextDiff = suppressedElements[hidden] + } + + // If there are still hidden elements, show an elision + // statement counting them + if hidden > 0 { + p.writeActionSymbol(plans.NoOp) + p.buf.WriteString(strings.Repeat(" ", indent+2)) + noun := "elements" + if hidden == 1 { + noun = "element" + } + p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), hidden, noun)) + p.buf.WriteString("\n") + } + + // Display the next context diff if it was captured above + if nextContextDiff != nil { + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(nextContextDiff.After, nextContextDiff.Action, indent+4) + p.buf.WriteString(",\n") + } + + // Suppressed elements have now been handled so clear them again + suppressedElements = nil + } + + if i >= len(elemDiffs) { + break + } + + elemDiff := elemDiffs[i] + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(elemDiff.Action) + switch elemDiff.Action { + case plans.NoOp, plans.Delete: + p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) + case plans.Update: + p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) + case plans.Create: + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return. + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + } + + p.buf.WriteString(",\n") + changeShown = true + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + + return + + case ty.IsMapType(): + p.buf.WriteString("{") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + suppressedElements := 0 + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + kV := cty.StringVal(k) + var action plans.Action + if old.HasIndex(kV).False() { + action = plans.Create + } else if new.HasIndex(kV).False() { + action = plans.Delete + } + + if old.HasIndex(kV).True() && new.HasIndex(kV).True() { + if ctyEqualValueAndMarks(old.Index(kV), new.Index(kV)) { + action = plans.NoOp + } else { + action = plans.Update + } + } + + if action == plans.NoOp && !p.verbose { + suppressedElements++ + continue + } + + path := append(path, cty.IndexStep{Key: kV}) + + oldV := old.Index(kV) + newV := new.Index(kV) + p.writeSensitivityWarning(oldV, newV, indent+2, action, false) + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(cty.StringVal(k), action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + switch action { + case plans.Create, plans.NoOp: + v := new.Index(kV) + if v.HasMark(marks.Sensitive) { + p.buf.WriteString(sensitiveCaption) + } else { + p.writeValue(v, action, indent+4) + } + case plans.Delete: + oldV := old.Index(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + if oldV.HasMark(marks.Sensitive) || newV.HasMark(marks.Sensitive) { + p.buf.WriteString(sensitiveCaption) + } else { + p.writeValueDiff(oldV, newV, indent+4, path) + } + } + + p.buf.WriteByte('\n') + } + + if suppressedElements > 0 { + p.writeActionSymbol(plans.NoOp) + p.buf.WriteString(strings.Repeat(" ", indent+2)) + noun := "elements" + if suppressedElements == 1 { + noun = "element" + } + p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), suppressedElements, noun)) + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + + return + case ty.IsObjectType(): + p.buf.WriteString("{") + p.buf.WriteString("\n") + + forcesNewResource := p.pathForcesNewResource(path) + + var allKeys []string + displayKeys := make(map[string]string) + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + displayKeys[keyStr] = displayAttributeName(keyStr) + if len(displayKeys[keyStr]) > keyLen { + keyLen = len(displayKeys[keyStr]) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + displayKeys[keyStr] = displayAttributeName(keyStr) + if len(displayKeys[keyStr]) > keyLen { + keyLen = len(displayKeys[keyStr]) + } + } + + sort.Strings(allKeys) + + suppressedElements := 0 + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + kV := k + var action plans.Action + if !old.Type().HasAttribute(kV) { + action = plans.Create + } else if !new.Type().HasAttribute(kV) { + action = plans.Delete + } else if ctyEqualValueAndMarks(old.GetAttr(kV), new.GetAttr(kV)) { + action = plans.NoOp + } else { + action = plans.Update + } + + // TODO: If in future we have a schema associated with this + // object, we should pass the attribute's schema to + // identifyingAttribute here. + if action == plans.NoOp && !p.verbose && !identifyingAttribute(k, nil) { + suppressedElements++ + continue + } + + path := append(path, cty.GetAttrStep{Name: kV}) + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.buf.WriteString(displayKeys[k]) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(displayKeys[k]))) + p.buf.WriteString(" = ") + + switch action { + case plans.Create, plans.NoOp: + v := new.GetAttr(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.GetAttr(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.GetAttr(kV) + newV := new.GetAttr(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteString("\n") + } + + if suppressedElements > 0 { + p.writeActionSymbol(plans.NoOp) + p.buf.WriteString(strings.Repeat(" ", indent+2)) + noun := "elements" + if suppressedElements == 1 { + noun = "element" + } + p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), suppressedElements, noun)) + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + + if forcesNewResource { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + return + } + } + + // In all other cases, we just show the new and old values as-is + p.writeValue(old, plans.Delete, indent) + if new.IsNull() { + p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) + } else { + p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) + } + + p.writeValue(new, plans.Create, indent) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } +} + +// writeActionSymbol writes a symbol to represent the given action, followed +// by a space. +// +// It only supports the actions that can be represented with a single character: +// Create, Delete, Update and NoAction. +func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { + switch action { + case plans.Create: + p.buf.WriteString(p.color.Color("[green]+[reset] ")) + case plans.Delete: + p.buf.WriteString(p.color.Color("[red]-[reset] ")) + case plans.Update: + p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) + case plans.NoOp: + p.buf.WriteString(" ") + default: + // Should never happen + p.buf.WriteString(p.color.Color("? ")) + } +} + +func (p *blockBodyDiffPrinter) writeSensitivityWarning(old, new cty.Value, indent int, action plans.Action, isBlock bool) { + // Dont' show this warning for create or delete + if action == plans.Create || action == plans.Delete { + return + } + + // Customize the warning based on if it is an attribute or block + diffType := "attribute value" + if isBlock { + diffType = "block" + } + + // If only attribute sensitivity is changing, clarify that the value is unchanged + var valueUnchangedSuffix string + if !isBlock { + oldUnmarked, _ := old.UnmarkDeep() + newUnmarked, _ := new.UnmarkDeep() + if oldUnmarked.RawEquals(newUnmarked) { + valueUnchangedSuffix = " The value is unchanged." + } + } + + if new.HasMark(marks.Sensitive) && !old.HasMark(marks.Sensitive) { + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString(fmt.Sprintf(p.color.Color("# [yellow]Warning:[reset] this %s will be marked as sensitive and will not\n"), diffType)) + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString(fmt.Sprintf("# display in UI output after applying this change.%s\n", valueUnchangedSuffix)) + } + + // Note if changing this attribute will change its sensitivity + if old.HasMark(marks.Sensitive) && !new.HasMark(marks.Sensitive) { + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString(fmt.Sprintf(p.color.Color("# [yellow]Warning:[reset] this %s will no longer be marked as sensitive\n"), diffType)) + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString(fmt.Sprintf("# after applying this change.%s\n", valueUnchangedSuffix)) + } +} + +func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { + if !p.action.IsReplace() || p.requiredReplace.Empty() { + // "requiredReplace" only applies when the instance is being replaced, + // and we should only inspect that set if it is not empty + return false + } + return p.requiredReplace.Has(path) +} + +func ctyEmptyString(value cty.Value) bool { + if !value.IsNull() && value.IsKnown() { + valueType := value.Type() + if valueType == cty.String && value.AsString() == "" { + return true + } + } + return false +} + +func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { + attrType := val.Type().AttributeType(name) + + if val.IsNull() { + return cty.NullVal(attrType) + } + + // We treat "" as null here + // as existing SDK doesn't support null yet. + // This allows us to avoid spurious diffs + // until we introduce null to the SDK. + attrValue := val.GetAttr(name) + // If the value is marked, the ctyEmptyString function will fail + if !val.ContainsMarked() && ctyEmptyString(attrValue) { + return cty.NullVal(attrType) + } + + return attrValue +} + +func ctyCollectionValues(val cty.Value) []cty.Value { + if !val.IsKnown() || val.IsNull() { + return nil + } + + ret := make([]cty.Value, 0, val.LengthInt()) + for it := val.ElementIterator(); it.Next(); { + _, value := it.Element() + ret = append(ret, value) + } + return ret +} + +// ctySequenceDiff returns differences between given sequences of cty.Value(s) +// in the form of Create, Delete, or Update actions (for objects). +func ctySequenceDiff(old, new []cty.Value) []*plans.Change { + var ret []*plans.Change + lcs := objchange.LongestCommonSubsequence(old, new, objchange.ValueEqual) + var oldI, newI, lcsI int + for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { + // We first process items in the old and new sequences which are not + // equal to the current common sequence item. Old items are marked as + // deletions, and new items are marked as additions. + // + // There is an exception for deleted & created object items, which we + // try to render as updates where that makes sense. + for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { + // Render this as an object update if all of these are true: + // + // - the current old item is an object; + // - there's a current new item which is also an object; + // - either there are no common items left, or the current new item + // doesn't equal the current common item. + // + // Why do we need the the last clause? If we have current items in all + // three sequences, and the current new item is equal to a common item, + // then we should just need to advance the old item list and we'll + // eventually find a common item matching both old and new. + // + // This combination of conditions allows us to render an object update + // diff instead of a combination of delete old & create new. + isObjectDiff := old[oldI].Type().IsObjectType() && newI < len(new) && new[newI].Type().IsObjectType() && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) + if isObjectDiff { + ret = append(ret, &plans.Change{ + Action: plans.Update, + Before: old[oldI], + After: new[newI], + }) + oldI++ + newI++ // we also consume the next "new" in this case + continue + } + + // Otherwise, this item is not part of the common sequence, so + // render as a deletion. + ret = append(ret, &plans.Change{ + Action: plans.Delete, + Before: old[oldI], + After: cty.NullVal(old[oldI].Type()), + }) + oldI++ + } + for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { + ret = append(ret, &plans.Change{ + Action: plans.Create, + Before: cty.NullVal(new[newI].Type()), + After: new[newI], + }) + newI++ + } + + // When we've exhausted the old & new sequences of items which are not + // in the common subsequence, we render a common item and continue. + if lcsI < len(lcs) { + ret = append(ret, &plans.Change{ + Action: plans.NoOp, + Before: lcs[lcsI], + After: lcs[lcsI], + }) + + // All of our indexes advance together now, since the line + // is common to all three sequences. + lcsI++ + oldI++ + newI++ + } + } + return ret +} + +// ctyEqualValueAndMarks checks equality of two possibly-marked values, +// considering partially-unknown values and equal values with different marks +// as inequal +func ctyEqualWithUnknown(old, new cty.Value) bool { + if !old.IsWhollyKnown() || !new.IsWhollyKnown() { + return false + } + return ctyEqualValueAndMarks(old, new) +} + +// ctyEqualValueAndMarks checks equality of two possibly-marked values, +// considering equal values with different marks as inequal +func ctyEqualValueAndMarks(old, new cty.Value) bool { + oldUnmarked, oldMarks := old.UnmarkDeep() + newUnmarked, newMarks := new.UnmarkDeep() + sameValue := oldUnmarked.Equals(newUnmarked) + return sameValue.IsKnown() && sameValue.True() && oldMarks.Equal(newMarks) +} + +// ctyTypesEqual checks equality of two types more loosely +// by avoiding checks of object/tuple elements +// as we render differences on element-by-element basis anyway +func ctyTypesEqual(oldT, newT cty.Type) bool { + if oldT.IsObjectType() && newT.IsObjectType() { + return true + } + if oldT.IsTupleType() && newT.IsTupleType() { + return true + } + return oldT.Equals(newT) +} + +func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { + if cap(path)-len(path) >= minExtra { + return path + } + newCap := cap(path) * 2 + if newCap < (len(path) + minExtra) { + newCap = len(path) + minExtra + } + newPath := make(cty.Path, len(path), newCap) + copy(newPath, path) + return newPath +} + +// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "list" is +// actually represented as a tuple type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsListType() { + return cty.ListValEmpty(ty.ElementType()) + } + return cty.EmptyTupleVal // must need a tuple, then +} + +// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "map" is +// actually represented as an object type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsMapType() { + return cty.MapValEmpty(ty.ElementType()) + } + return cty.EmptyObjectVal // must need an object, then +} + +// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + // Dynamically-typed attributes are not supported inside blocks backed by + // sets, so our result here is always a set. + return cty.SetValEmpty(in.Type().ElementType()) +} + +// DiffActionSymbol returns a string that, once passed through a +// colorstring.Colorize, will produce a result that can be written +// to a terminal to produce a symbol made of three printable +// characters, possibly interspersed with VT100 color codes. +func DiffActionSymbol(action plans.Action) string { + switch action { + case plans.DeleteThenCreate: + return "[red]-[reset]/[green]+[reset]" + case plans.CreateThenDelete: + return "[green]+[reset]/[red]-[reset]" + case plans.Create: + return " [green]+[reset]" + case plans.Delete: + return " [red]-[reset]" + case plans.Read: + return " [cyan]<=[reset]" + case plans.Update: + return " [yellow]~[reset]" + case plans.NoOp: + return " " + default: + return " ?" + } +} + +// Extremely coarse heuristic for determining whether or not a given attribute +// name is important for identifying a resource. In the future, this may be +// replaced by a flag in the schema, but for now this is likely to be good +// enough. +func identifyingAttribute(name string, attrSchema *configschema.Attribute) bool { + return name == "id" || name == "tags" || name == "name" +} + +func (p *blockBodyDiffPrinter) writeSkippedAttr(skipped, indent int) { + if skipped > 0 { + noun := "attributes" + if skipped == 1 { + noun = "attribute" + } + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), skipped, noun)) + } +} + +func (p *blockBodyDiffPrinter) writeSkippedElems(skipped, indent int) { + if skipped > 0 { + noun := "elements" + if skipped == 1 { + noun = "element" + } + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), skipped, noun)) + p.buf.WriteString("\n") + } +} + +func displayAttributeName(name string) string { + if !hclsyntax.ValidIdentifier(name) { + return fmt.Sprintf("%q", name) + } + return name +} diff --git a/command/format/diff_test.go b/command/format/diff_test.go new file mode 100644 index 000000000000..f9e8cc93e503 --- /dev/null +++ b/command/format/diff_test.go @@ -0,0 +1,7007 @@ +package format + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceChange_primitiveTypes(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + } +`, + }, + "creation (null string)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("null"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + string = "null" + } +`, + }, + "creation (null string with extra whitespace)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("null "), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + string = "null " + } +`, + }, + "creation (object with quoted keys)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "object": cty.ObjectVal(map[string]cty.Value{ + "unquoted": cty.StringVal("value"), + "quoted:key": cty.StringVal("some-value"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "object": {Type: cty.Object(map[string]cty.Type{ + "unquoted": cty.String, + "quoted:key": cty.String, + }), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + object = { + + "quoted:key" = "some-value" + + unquoted = "value" + } + } +`, + }, + "deletion": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + } +`, + }, + "deletion of deposed object": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + DeposedKey: states.DeposedKey("byebye"), + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example (deposed object byebye) will be destroyed + # (left over from a partially-failed replacement of this instance) + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + } +`, + }, + "deletion (empty string)": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "intentionally_long": cty.StringVal(""), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "intentionally_long": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + } +`, + }, + "string in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + } +`, + }, + "update with quoted key": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "saml:aud": cty.StringVal("https://example.com/saml"), + "zeta": cty.StringVal("alpha"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "saml:aud": cty.StringVal("https://saml.example.com"), + "zeta": cty.StringVal("alpha"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "saml:aud": {Type: cty.String, Optional: true}, + "zeta": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ "saml:aud" = "https://example.com/saml" -> "https://saml.example.com" + # (1 unchanged attribute hidden) + } +`, + }, + "string force-new update": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "ami"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement + id = "i-02ae66f368e8518a9" + } +`, + }, + "string in-place update (null values)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "unchanged": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "unchanged": cty.NullVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "unchanged": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update of multi-line string field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.StringVal(`original +long +multi-line +string +field +`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +extremely long +multi-line +string +field +`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ more_lines = <<-EOT + original + - long + + extremely long + multi-line + string + field + EOT + } +`, + }, + "addition of multi-line string field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +new line +`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + more_lines = <<-EOT + original + new line + EOT + } +`, + }, + "force-new update of multi-line string field": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.StringVal(`original +`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +new line +`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "more_lines"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ more_lines = <<-EOT # forces replacement + original + + new line + EOT + } +`, + }, + + // Sensitive + + "creation with sensitive field": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "password": cty.StringVal("top-secret"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("top-secret"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + "conn_info": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "user": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + conn_info = { + + password = (sensitive value) + + user = "not-secret" + } + + id = (known after apply) + + password = (sensitive value) + } +`, + }, + "update with equal sensitive field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("blah"), + "str": cty.StringVal("before"), + "password": cty.StringVal("top-secret"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "str": cty.StringVal("after"), + "password": cty.StringVal("top-secret"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "str": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "blah" -> (known after apply) + ~ str = "before" -> "after" + # (1 unchanged attribute hidden) + } +`, + }, + + // tainted objects + "replace tainted resource": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "ami"}, + }), + ExpectedOutput: ` # test_instance.example is tainted, so must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + } +`, + }, + "force replacement with empty before value": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal("example"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "forced": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "forced"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + + forced = "example" # forces replacement + name = "name" + } +`, + }, + "force replacement with empty before value legacy": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal(""), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal("example"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "forced": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "forced"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + + forced = "example" # forces replacement + name = "name" + } +`, + }, + "read during apply because of unknown configuration": { + Action: plans.Read, + ActionReason: plans.ResourceInstanceReadBecauseConfigUnknown, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + # (config refers to values not yet known) + <= data "test_instance" "example" { + name = "name" + } +`, + }, + "read during apply because of pending changes to upstream dependency": { + Action: plans.Read, + ActionReason: plans.ResourceInstanceReadBecauseDependencyPending, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + # (depends on a resource or a module with changes pending) + <= data "test_instance" "example" { + name = "name" + } +`, + }, + "read during apply for unspecified reason": { + Action: plans.Read, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + <= data "test_instance" "example" { + name = "name" + } +`, + }, + "show all identifying attributes even if unchanged": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "bar": cty.StringVal("bar"), + "foo": cty.StringVal("foo"), + "name": cty.StringVal("alice"), + "tags": cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("bob"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "bar": cty.StringVal("bar"), + "foo": cty.StringVal("foo"), + "name": cty.StringVal("alice"), + "tags": cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("bob"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + "foo": {Type: cty.String, Optional: true}, + "name": {Type: cty.String, Optional: true}, + "tags": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + name = "alice" + tags = { + "name" = "bob" + } + # (2 unchanged attributes hidden) + } +`, + }, + } + + runTestCases(t, testCases) +} + +func TestResourceChange_JSON(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{ + "str": "value", + "list":["a","b", 234, true], + "obj": {"key": "val"} + }`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + + json_field = jsonencode( + { + + list = [ + + "a", + + "b", + + 234, + + true, + ] + + obj = { + + key = "val" + } + + str = "value" + } + ) + } +`, + }, + "in-place update of object": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value","ccc": 5}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + bbb = "new_value" + - ccc = 5 -> null + # (1 unchanged element hidden) + } + ) + } +`, + }, + "in-place update of object with quoted keys": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "c:c": "old_value"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "b:bb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + "b:bb" = "new_value" + - "c:c" = "old_value" -> null + # (1 unchanged element hidden) + } + ) + } +`, + }, + "in-place update (from empty tuple)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": []}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": ["value"]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + + "value", + ] + } + ) + } +`, + }, + "in-place update (to empty tuple)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": ["value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": []}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + - "value", + ] + } + ) + } +`, + }, + "in-place update (tuple of different types)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"baz"}, "value"]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + 42, + ~ { + ~ foo = "bar" -> "baz" + }, + "value", + ] + } + ) + } +`, + }, + "force-new update": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "json_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + bbb = "new_value" + # (1 unchanged element hidden) + } # forces replacement + ) + } +`, + }, + "in-place update (whitespace change)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa":"value", + "bbb":"another"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( # whitespace changes + { + aaa = "value" + bbb = "another" + } + ) + } +`, + }, + "force-new update (whitespace change)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa":"value", + "bbb":"another"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "json_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( # whitespace changes force replacement + { + aaa = "value" + bbb = "another" + } + ) + } +`, + }, + "creation (empty)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + + json_field = jsonencode({}) + } +`, + }, + "JSON list item removal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`["first","second","third"]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["first","second"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + # (1 unchanged element hidden) + "second", + - "third", + ] + ) + } +`, + }, + "JSON list item addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`["first","second"]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["first","second","third"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + # (1 unchanged element hidden) + "second", + + "third", + ] + ) + } +`, + }, + "JSON list object addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"first":"111"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"first":"111","second":"222"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + second = "222" + # (1 unchanged element hidden) + } + ) + } +`, + }, + "JSON object with nested list": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{ + "Statement": ["first"] + }`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{ + "Statement": ["first", "second"] + }`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ Statement = [ + "first", + + "second", + ] + } + ) + } +`, + }, + "JSON list of objects - adding item": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`[{"one": "111"}]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + { + one = "111" + }, + + { + + two = "222" + }, + ] + ) + } +`, + }, + "JSON list of objects - removing item": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}, {"three": "333"}]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`[{"one": "111"}, {"three": "333"}]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + { + one = "111" + }, + - { + - two = "222" + }, + { + three = "333" + }, + ] + ) + } +`, + }, + "JSON object with list of objects": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"parent":[{"one": "111"}]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"parent":[{"one": "111"}, {"two": "222"}]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ parent = [ + { + one = "111" + }, + + { + + two = "222" + }, + ] + } + ) + } +`, + }, + "JSON object double nested lists": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"parent":[{"another_list": ["111"]}]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"parent":[{"another_list": ["111", "222"]}]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ parent = [ + ~ { + ~ another_list = [ + "111", + + "222", + ] + }, + ] + } + ) + } +`, + }, + "in-place update from object to tuple": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["aaa", 42, "something"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + - aaa = [ + - 42, + - { + - foo = "bar" + }, + - "value", + ] + } -> [ + + "aaa", + + 42, + + "something", + ] + ) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_listObject(t *testing.T) { + testCases := map[string]testCase{ + // https://github.com/hashicorp/terraform/issues/30641 + "updating non-identifying attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "accounts": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1"), + "name": cty.StringVal("production"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("2"), + "name": cty.StringVal("staging"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("3"), + "name": cty.StringVal("disaster-recovery"), + "status": cty.StringVal("ACTIVE"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "accounts": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1"), + "name": cty.StringVal("production"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("2"), + "name": cty.StringVal("staging"), + "status": cty.StringVal("EXPLODED"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("3"), + "name": cty.StringVal("disaster-recovery"), + "status": cty.StringVal("ACTIVE"), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "accounts": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "id": cty.String, + "name": cty.String, + "status": cty.String, + })), + }, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ accounts = [ + { + id = "1" + name = "production" + status = "ACTIVE" + }, + ~ { + id = "2" + name = "staging" + ~ status = "ACTIVE" -> "EXPLODED" + }, + { + id = "3" + name = "disaster-recovery" + status = "ACTIVE" + }, + ] + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveList(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.NullVal(cty.List(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + list_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - first addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + # (1 unchanged element hidden) + "bbbb", + + "cccc", + "dddd", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + } +`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "list_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ # forces replacement + "aaaa", + + "bbbb", + "cccc", + ] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + - "aaaa", + "bbbb", + - "cccc", + "dddd", + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + } +`, + }, + "creation - empty list": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + list_field = [] + } +`, + }, + "in-place update - full to empty": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + - "aaaa", + - "bbbb", + - "cccc", + ] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - null to empty": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.NullVal(cty.List(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + list_field = [] + # (1 unchanged attribute hidden) + } +`, + }, + "update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + "aaaa", + - "bbbb", + + (known after apply), + "cccc", + ] + # (1 unchanged attribute hidden) + } +`, + }, + "update - two new unknown elements": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + "aaaa", + - "bbbb", + + (known after apply), + + (known after apply), + "cccc", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveTuple(t *testing.T) { + testCases := map[string]testCase{ + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "tuple_field": cty.TupleVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "tuple_field": cty.TupleVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Required: true}, + "tuple_field": {Type: cty.Tuple([]cty.Type{cty.String, cty.String, cty.String, cty.String, cty.String}), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ tuple_field = [ + # (1 unchanged element hidden) + "bbbb", + - "dddd", + + "cccc", + "eeee", + # (1 unchanged element hidden) + ] + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveSet(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.NullVal(cty.Set(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + set_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + + "bbbb", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + } +`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "set_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ # forces replacement + + "bbbb", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("bbbb"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "cccc", + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + } +`, + }, + "creation - empty set": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + set_field = [] + } +`, + }, + "in-place update - full to empty set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "bbbb", + ] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - null to empty set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.NullVal(cty.Set(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + set_field = [] + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update to unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.UnknownVal(cty.Set(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "bbbb", + ] -> (known after apply) + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "bbbb", + ~ (known after apply), + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_map(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.NullVal(cty.Map(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "new-key": cty.StringVal("new-element"), + "be:ep": cty.StringVal("boop"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + map_field = { + + "be:ep" = "boop" + + "new-key" = "new-element" + } + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "new-key": cty.StringVal("new-element"), + "be:ep": cty.StringVal("boop"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + + "be:ep" = "boop" + + "new-key" = "new-element" + } + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "b:b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + + "b" = "bbbb" + + "b:b" = "bbbb" + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + } +`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "map_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { # forces replacement + + "b" = "bbbb" + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "b": cty.StringVal("bbbb"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + - "a" = "aaaa" -> null + - "c" = "cccc" -> null + # (1 unchanged element hidden) + } + # (1 unchanged attribute hidden) + } +`, + }, + "creation - empty": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + map_field = {} + } +`, + }, + "update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.UnknownVal(cty.String), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + ~ "b" = "bbbb" -> (known after apply) + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedList(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - equal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + })}), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device {} + } +`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + } +`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + + size = "50GB" + # (1 unchanged attribute hidden) + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + + new_field = "new_value" + # (1 unchanged attribute hidden) + } + } +`, + }, + "force-new update (inside blocks)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "volume_type"}, + }, + cty.Path{ + cty.GetAttrStep{Name: "disks"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "mount_point"}, + }, + ), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement + # (1 unchanged attribute hidden) + }, + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + ~ volume_type = "gp2" -> "different" # forces replacement + } + } +`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ # forces replacement + ~ { + ~ mount_point = "/var/diska" -> "/var/diskb" + # (1 unchanged attribute hidden) + }, + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { # forces replacement + ~ volume_type = "gp2" -> "different" + } + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + } +`, + }, + "with dynamically-typed attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "block": cty.EmptyTupleVal, + }), + After: cty.ObjectVal(map[string]cty.Value{ + "block": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.True, + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + block { + + attr = "foo" + } + + block { + + attr = true + } + } +`, + }, + "in-place sequence update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("x")}), + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("z")}), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": { + Type: cty.String, + Required: true, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ list { + ~ attr = "x" -> "y" + } + ~ list { + ~ attr = "y" -> "z" + } + } +`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - modification": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskc"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("75GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskc"), + "size": cty.StringVal("25GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + ~ size = "50GB" -> "75GB" + # (1 unchanged attribute hidden) + }, + ~ { + ~ size = "50GB" -> "25GB" + # (1 unchanged attribute hidden) + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSet(t *testing.T) { + testCases := map[string]testCase{ + "creation from null - sensitive set": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "ami": cty.String, + "disks": cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.Set(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + })), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + } +`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + } +`, + }, + "in-place update - creation - sensitive set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + } +`, + }, + "in-place update - marking set sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. The value is unchanged. + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + - { + - mount_point = "/var/diska" -> null + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { + + new_field = "new_value" + + volume_type = "gp2" + } + - root_block_device { + - volume_type = "gp2" -> null + } + } +`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { # forces replacement + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + + { # forces replacement + + mount_point = "/var/diskb" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { # forces replacement + + volume_type = "different" + } + - root_block_device { # forces replacement + - volume_type = "gp2" -> null + } + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + "new_field": cty.String, + })), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - new_field = "new_value" -> null + - volume_type = "gp2" -> null + } + } +`, + }, + "in-place update - empty nested sets": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disks = [ + ] + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update - null insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disks = [ + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { + + new_field = "new_value" + + volume_type = "gp2" + } + - root_block_device { + - volume_type = "gp2" -> null + } + } +`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedMap(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = { + + "disk_a" = { + + mount_point = "/var/diska" + }, + } + + id = "i-02ae66f368e8518a9" + + + root_block_device "a" { + + volume_type = "gp2" + } + } +`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_a" = { + + mount_point = "/var/diska" + }, + } + id = "i-02ae66f368e8518a9" + + + root_block_device "a" { + + volume_type = "gp2" + } + } +`, + }, + "in-place update - change attr": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + ~ "disk_a" = { + + size = "50GB" + # (1 unchanged attribute hidden) + }, + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device "a" { + + new_field = "new_value" + # (1 unchanged attribute hidden) + } + } +`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "disk_2": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/disk2"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_2" = { + + mount_point = "/var/disk2" + + size = "50GB" + }, + # (1 unchanged element hidden) + } + id = "i-02ae66f368e8518a9" + + + root_block_device "b" { + + new_field = "new_value" + + volume_type = "gp2" + } + + # (1 unchanged block hidden) + } +`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("standard"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("standard"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.IndexStep{Key: cty.StringVal("a")}, + }, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + ~ "disk_a" = { # forces replacement + ~ size = "50GB" -> "100GB" + # (1 unchanged attribute hidden) + }, + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device "a" { # forces replacement + ~ volume_type = "gp2" -> "different" + } + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + "new_field": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + - "disk_a" = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + } + id = "i-02ae66f368e8518a9" + + - root_block_device "a" { + - new_field = "new_value" -> null + - volume_type = "gp2" -> null + } + } +`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + - "disk_a" = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + } -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - insertion sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}, + cty.IndexStep{Key: cty.StringVal("disk_a")}, + cty.GetAttrStep{Name: "mount_point"}, + }, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_a" = { + + mount_point = (sensitive value) + + size = "50GB" + }, + } + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - multiple unchanged blocks": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (2 unchanged blocks hidden) + } +`, + }, + "in-place update - multiple blocks first changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - multiple blocks second changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - multiple blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + } +`, + }, + "in-place update - multiple different unchanged blocks": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (2 unchanged blocks hidden) + } +`, + }, + "in-place update - multiple different blocks first changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - multiple different blocks second changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - multiple different blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + } +`, + }, + "in-place update - mixed blocks unchanged": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (4 unchanged blocks hidden) + } +`, + }, + "in-place update - mixed blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (2 unchanged blocks hidden) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSingle(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - equal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disk": cty.NullVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.NullVal(cty.String), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disk = { + + mount_point = "/var/diska" + + size = "50GB" + } + id = "i-02ae66f368e8518a9" + + + root_block_device {} + } +`, + }, + "force-new update (inside blocks)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.GetAttrStep{Name: "volume_type"}, + }, + cty.Path{ + cty.GetAttrStep{Name: "disk"}, + cty.GetAttrStep{Name: "mount_point"}, + }, + ), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + ~ volume_type = "gp2" -> "different" # forces replacement + } + } +`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disk"}}, + ), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { # forces replacement + ~ mount_point = "/var/diska" -> "/var/diskb" + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device { # forces replacement + ~ volume_type = "gp2" -> "different" + } + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disk": cty.NullVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disk = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + } -> null + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + } +`, + }, + "with dynamically-typed attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "block": cty.NullVal(cty.Object(map[string]cty.Type{ + "attr": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "block": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + block { + + attr = "foo" + } + } +`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ mount_point = "/var/diska" -> (known after apply) + ~ size = "50GB" -> (known after apply) + } -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + } +`, + }, + "in-place update - modification": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("25GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ size = "50GB" -> "25GB" + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedMapSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedListSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSetSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_actionReason(t *testing.T) { + emptySchema := &configschema.Block{} + nullVal := cty.NullVal(cty.EmptyObject) + emptyVal := cty.EmptyObjectVal + + testCases := map[string]testCase{ + "delete for no particular reason": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" {} +`, + }, + "delete because of wrong repetition mode (NoKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.NoKey, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + # (because resource uses count or for_each) + - resource "test_instance" "example" {} +`, + }, + "delete because of wrong repetition mode (IntKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.IntKey(1), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example[1] will be destroyed + # (because resource does not use count) + - resource "test_instance" "example" {} +`, + }, + "delete because of wrong repetition mode (StringKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.StringKey("a"), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example["a"] will be destroyed + # (because resource does not use for_each) + - resource "test_instance" "example" {} +`, + }, + "delete because no resource configuration": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseNoResourceConfig, + ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.NoKey), + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # module.foo.test_instance.example will be destroyed + # (because test_instance.example is not in configuration) + - resource "test_instance" "example" {} +`, + }, + "delete because no module": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseNoModule, + ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.IntKey(1)), + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # module.foo[1].test_instance.example will be destroyed + # (because module.foo[1] is not in configuration) + - resource "test_instance" "example" {} +`, + }, + "delete because out of range for count": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseCountIndex, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.IntKey(1), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example[1] will be destroyed + # (because index [1] is out of range for count) + - resource "test_instance" "example" {} +`, + }, + "delete because out of range for for_each": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseEachKey, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.StringKey("boop"), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example["boop"] will be destroyed + # (because key ["boop"] is not in for_each map) + - resource "test_instance" "example" {} +`, + }, + "replace for no particular reason (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" {} +`, + }, + "replace for no particular reason (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example must be replaced ++/- resource "test_instance" "example" {} +`, + }, + "replace by request (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceByRequest, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be replaced, as requested +-/+ resource "test_instance" "example" {} +`, + }, + "replace by request (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceByRequest, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be replaced, as requested ++/- resource "test_instance" "example" {} +`, + }, + "replace because tainted (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example is tainted, so must be replaced +-/+ resource "test_instance" "example" {} +`, + }, + "replace because tainted (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example is tainted, so must be replaced ++/- resource "test_instance" "example" {} +`, + }, + "replace because cannot update (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + // This one has no special message, because the fuller explanation + // typically appears inline as a "# forces replacement" comment. + // (not shown here) + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" {} +`, + }, + "replace because cannot update (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + // This one has no special message, because the fuller explanation + // typically appears inline as a "# forces replacement" comment. + // (not shown here) + ExpectedOutput: ` # test_instance.example must be replaced ++/- resource "test_instance" "example" {} +`, + }, + } + + runTestCases(t, testCases) +} + +func TestResourceChange_sensitiveVariable(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-123"), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "nested_block_list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + "another": cty.StringVal("not secret"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + "another": cty.StringVal("not secret"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + // Nested blocks/sets will mark the whole set/block as sensitive + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_list"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_list": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = (sensitive value) + + id = "i-02ae66f368e8518a9" + + list_field = [ + + "hello", + + (sensitive value), + + "!", + ] + + map_key = { + + "breakfast" = 800 + + "dinner" = (sensitive value) + } + + map_whole = (sensitive value) + + + nested_block_list { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + + nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + } +`, + }, + "in-place update - before sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "special": cty.BoolVal(false), + "some_number": cty.NumberIntVal(2), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("."), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(1900), + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "special"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "special": {Type: cty.Bool, Optional: true}, + "some_number": {Type: cty.Number, Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + # (1 unchanged element hidden) + "friends", + - (sensitive value), + + ".", + ] + ~ map_key = { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ map_whole = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ some_number = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ special = (sensitive value) + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + } +`, + }, + "in-place update - after sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_single": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("original"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(700), + "dinner": cty.NumberIntVal(2100), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_single": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "tags"}, cty.IndexStep{Key: cty.StringVal("address")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_single"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_single": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ list_field = [ + - "hello", + + (sensitive value), + "friends", + ] + ~ map_key = { + ~ "breakfast" = 800 -> 700 + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ "dinner" = (sensitive value) + } + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ map_whole = (sensitive value) + + # Warning: this block will be marked as sensitive and will not + # display in UI output after applying this change. + ~ nested_block_single { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + } +`, + }, + "in-place update - both sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_map": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("original"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(1800), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_map": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.UnknownVal(cty.String), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_map": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingMap, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + - (sensitive value), + + (sensitive value), + "friends", + ] + ~ map_key = { + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + ~ map_whole = (sensitive value) + + ~ nested_block_map { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + } +`, + }, + "in-place update - value unchanged, sensitivity changes": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "special"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "special": {Type: cty.Bool, Optional: true}, + "some_number": {Type: cty.Number, Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + # (1 unchanged element hidden) + "friends", + - (sensitive value), + + "!", + ] + ~ map_key = { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ map_whole = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ some_number = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ special = (sensitive value) + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + } +`, + }, + "deletion": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + "another": cty.StringVal("not secret"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + "another": cty.StringVal("not secret"), + }), + }), + }), + After: cty.NullVal(cty.EmptyObject), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - ami = (sensitive value) -> null + - id = "i-02ae66f368e8518a9" -> null + - list_field = [ + - "hello", + - (sensitive value), + ] -> null + - map_key = { + - "breakfast" = 800 + - "dinner" = (sensitive value) + } -> null + - map_whole = (sensitive value) -> null + + - nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + } +`, + }, + "update with sensitive value forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "nested_block_set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "nested_block_set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("ami"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.GetAttrPath("nested_block_set"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("ami"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.GetAttrPath("nested_block_set"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Required: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("ami"), + cty.GetAttrPath("nested_block_set"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + + ~ nested_block_set { # forces replacement + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + } +`, + }, + "update with sensitive attribute forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true, Computed: true, Sensitive: true}, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("ami"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + } +`, + }, + "update with sensitive nested type attribute forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("top-secret"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("new-secret"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "conn_info": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "user": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("conn_info"), + cty.GetAttrPath("password"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ conn_info = { # forces replacement + ~ password = (sensitive value) + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + } +`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_moved(t *testing.T) { + prevRunAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "previous", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + testCases := map[string]testCase{ + "moved and updated": { + PrevRunAddr: prevRunAddr, + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("boop"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + # (moved from test_instance.previous) + ~ resource "test_instance" "example" { + ~ bar = "baz" -> "boop" + id = "12345" + # (1 unchanged attribute hidden) + } +`, + }, + "moved without changes": { + PrevRunAddr: prevRunAddr, + Action: plans.NoOp, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.previous has moved to test_instance.example + resource "test_instance" "example" { + id = "12345" + # (2 unchanged attributes hidden) + } +`, + }, + } + + runTestCases(t, testCases) +} + +type testCase struct { + Action plans.Action + ActionReason plans.ResourceInstanceChangeActionReason + ModuleInst addrs.ModuleInstance + Mode addrs.ResourceMode + InstanceKey addrs.InstanceKey + DeposedKey states.DeposedKey + Before cty.Value + BeforeValMarks []cty.PathValueMarks + AfterValMarks []cty.PathValueMarks + After cty.Value + Schema *configschema.Block + RequiredReplace cty.PathSet + ExpectedOutput string + PrevRunAddr addrs.AbsResourceInstance +} + +func runTestCases(t *testing.T, testCases map[string]testCase) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + ty := tc.Schema.ImpliedType() + + beforeVal := tc.Before + switch { // Some fixups to make the test cases a little easier to write + case beforeVal.IsNull(): + beforeVal = cty.NullVal(ty) // allow mistyped nulls + case !beforeVal.IsKnown(): + beforeVal = cty.UnknownVal(ty) // allow mistyped unknowns + } + + afterVal := tc.After + switch { // Some fixups to make the test cases a little easier to write + case afterVal.IsNull(): + afterVal = cty.NullVal(ty) // allow mistyped nulls + case !afterVal.IsKnown(): + afterVal = cty.UnknownVal(ty) // allow mistyped unknowns + } + + addr := addrs.Resource{ + Mode: tc.Mode, + Type: "test_instance", + Name: "example", + }.Instance(tc.InstanceKey).Absolute(tc.ModuleInst) + + prevRunAddr := tc.PrevRunAddr + // If no previous run address is given, reuse the current address + // to make initialization easier + if prevRunAddr.Resource.Resource.Type == "" { + prevRunAddr = addr + } + + change := &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: prevRunAddr, + DeposedKey: tc.DeposedKey, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + Change: plans.Change{ + Action: tc.Action, + Before: beforeVal.MarkWithPaths(tc.BeforeValMarks), + After: afterVal.MarkWithPaths(tc.AfterValMarks), + }, + ActionReason: tc.ActionReason, + RequiredReplace: tc.RequiredReplace, + } + + output := ResourceChange(change, tc.Schema, color, DiffLanguageProposedChange) + if diff := cmp.Diff(output, tc.ExpectedOutput); diff != "" { + t.Errorf("wrong output\n%s", diff) + } + }) + } +} + +func TestOutputChanges(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + testCases := map[string]struct { + changes []*plans.OutputChangeSrc + output string + }{ + "new output value": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.NullVal(cty.DynamicPseudoType), + cty.StringVal("bar"), + false, + ), + }, + ` + + foo = "bar"`, + }, + "removed output": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.StringVal("bar"), + cty.NullVal(cty.DynamicPseudoType), + false, + ), + }, + ` + - foo = "bar" -> null`, + }, + "single string change": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.StringVal("bar"), + cty.StringVal("baz"), + false, + ), + }, + ` + ~ foo = "bar" -> "baz"`, + }, + "element added to list": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.ListVal([]cty.Value{ + cty.StringVal("alpha"), + cty.StringVal("beta"), + cty.StringVal("delta"), + cty.StringVal("epsilon"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("alpha"), + cty.StringVal("beta"), + cty.StringVal("gamma"), + cty.StringVal("delta"), + cty.StringVal("epsilon"), + }), + false, + ), + }, + ` + ~ foo = [ + # (1 unchanged element hidden) + "beta", + + "gamma", + "delta", + # (1 unchanged element hidden) + ]`, + }, + "multiple outputs changed, one sensitive": { + []*plans.OutputChangeSrc{ + outputChange( + "a", + cty.NumberIntVal(1), + cty.NumberIntVal(2), + false, + ), + outputChange( + "b", + cty.StringVal("hunter2"), + cty.StringVal("correct-horse-battery-staple"), + true, + ), + outputChange( + "c", + cty.BoolVal(false), + cty.BoolVal(true), + false, + ), + }, + ` + ~ a = 1 -> 2 + ~ b = (sensitive value) + ~ c = false -> true`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + output := OutputChanges(tc.changes, color) + if output != tc.output { + t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.output) + } + }) + } +} + +func outputChange(name string, before, after cty.Value, sensitive bool) *plans.OutputChangeSrc { + addr := addrs.AbsOutputValue{ + OutputValue: addrs.OutputValue{Name: name}, + } + + change := &plans.OutputChange{ + Addr: addr, Change: plans.Change{ + Before: before, + After: after, + }, + Sensitive: sensitive, + } + + changeSrc, err := change.Encode() + if err != nil { + panic(fmt.Sprintf("failed to encode change for %s: %s", addr, err)) + } + + return changeSrc +} + +// A basic test schema using a configurable NestingMode for one (NestedType) attribute and one block +func testSchema(nesting configschema.NestingMode) *configschema.Block { + var diskKey = "disks" + if nesting == configschema.NestingSingle { + diskKey = "disk" + } + + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + diskKey: { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} + +// A basic test schema using a configurable NestingMode for one (NestedType) +// attribute marked sensitive. +func testSchemaSensitive(nesting configschema.NestingMode) *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "disks": { + Sensitive: true, + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + } +} + +func testSchemaMultipleBlocks(nesting configschema.NestingMode) *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "disks": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + "leaf_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} + +// similar to testSchema with the addition of a "new_field" block +func testSchemaPlus(nesting configschema.NestingMode) *configschema.Block { + var diskKey = "disks" + if nesting == configschema.NestingSingle { + diskKey = "disk" + } + + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + diskKey: { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "new_field": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} diff --git a/internal/command/format/difflanguage_string.go b/command/format/difflanguage_string.go similarity index 100% rename from internal/command/format/difflanguage_string.go rename to command/format/difflanguage_string.go diff --git a/internal/command/format/format.go b/command/format/format.go similarity index 100% rename from internal/command/format/format.go rename to command/format/format.go diff --git a/internal/command/format/object_id.go b/command/format/object_id.go similarity index 98% rename from internal/command/format/object_id.go rename to command/format/object_id.go index 75b427b8d465..7d5cb5287037 100644 --- a/internal/command/format/object_id.go +++ b/command/format/object_id.go @@ -1,7 +1,7 @@ package format import ( - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/command/format/object_id_test.go b/command/format/object_id_test.go similarity index 98% rename from internal/command/format/object_id_test.go rename to command/format/object_id_test.go index 2f13e1366caa..ba85784f1925 100644 --- a/internal/command/format/object_id_test.go +++ b/command/format/object_id_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/command/format/state.go b/command/format/state.go new file mode 100644 index 000000000000..9fcb41f1c1e9 --- /dev/null +++ b/command/format/state.go @@ -0,0 +1,216 @@ +package format + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/colorstring" +) + +// StateOpts are the options for formatting a state. +type StateOpts struct { + // State is the state to format. This is required. + State *states.State + + // Schemas are used to decode attributes. This is required. + Schemas *terraform.Schemas + + // Color is the colorizer. This is optional. + Color *colorstring.Colorize +} + +// State takes a state and returns a string +func State(opts *StateOpts) string { + if opts.Color == nil { + panic("colorize not given") + } + + if opts.Schemas == nil { + panic("schemas not given") + } + + s := opts.State + if len(s.Modules) == 0 { + return "The state file is empty. No resources are represented." + } + + buf := bytes.NewBufferString("[reset]") + p := blockBodyDiffPrinter{ + buf: buf, + color: opts.Color, + action: plans.NoOp, + verbose: true, + } + + // Format all the modules + for _, m := range s.Modules { + formatStateModule(p, m, opts.Schemas) + } + + // Write the outputs for the root module + m := s.RootModule() + + if m.OutputValues != nil { + if len(m.OutputValues) > 0 { + p.buf.WriteString("Outputs:\n\n") + } + + // Sort the outputs + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + // Output each output k/v pair + for _, k := range ks { + v := m.OutputValues[k] + p.buf.WriteString(fmt.Sprintf("%s = ", k)) + if v.Sensitive { + p.buf.WriteString("(sensitive value)") + } else { + p.writeValue(v.Value, plans.NoOp, 0) + } + p.buf.WriteString("\n") + } + } + + trimmedOutput := strings.TrimSpace(p.buf.String()) + trimmedOutput += "[reset]" + + return opts.Color.Color(trimmedOutput) + +} + +func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { + // First get the names of all the resources so we can show them + // in alphabetical order. + names := make([]string, 0, len(m.Resources)) + for name := range m.Resources { + names = append(names, name) + } + sort.Strings(names) + + // Go through each resource and begin building up the output. + for _, key := range names { + for k, v := range m.Resources[key].Instances { + // keep these in order to keep the current object first, and + // provide deterministic output for the deposed objects + type obj struct { + header string + instance *states.ResourceInstanceObjectSrc + } + instances := []obj{} + + addr := m.Resources[key].Addr + resAddr := addr.Resource + + taintStr := "" + if v.Current != nil && v.Current.Status == 'T' { + taintStr = " (tainted)" + } + + instances = append(instances, + obj{fmt.Sprintf("# %s:%s\n", addr.Instance(k), taintStr), v.Current}) + + for dk, v := range v.Deposed { + instances = append(instances, + obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Instance(k), dk), v}) + } + + // Sort the instances for consistent output. + // Starting the sort from the second index, so the current instance + // is always first. + sort.Slice(instances[1:], func(i, j int) bool { + return instances[i+1].header < instances[j+1].header + }) + + for _, obj := range instances { + header := obj.header + instance := obj.instance + p.buf.WriteString(header) + if instance == nil { + // this shouldn't happen, but there's nothing to do here so + // don't panic below. + continue + } + + var schema *configschema.Block + + provider := m.Resources[key].ProviderConfig.Provider + if _, exists := schemas.Providers[provider]; !exists { + // This should never happen in normal use because we should've + // loaded all of the schemas and checked things prior to this + // point. We can't return errors here, but since this is UI code + // we will try to do _something_ reasonable. + p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider.String())) + continue + } + + switch resAddr.Mode { + case addrs.ManagedResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + resAddr.Mode, + resAddr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q resource type %s\n\n", provider, resAddr.Type)) + continue + } + + p.buf.WriteString(fmt.Sprintf( + "resource %q %q {", + resAddr.Type, + resAddr.Name, + )) + case addrs.DataResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + resAddr.Mode, + resAddr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q data source %s\n\n", provider, resAddr.Type)) + continue + } + + p.buf.WriteString(fmt.Sprintf( + "data %q %q {", + resAddr.Type, + resAddr.Name, + )) + default: + // should never happen, since the above is exhaustive + p.buf.WriteString(resAddr.String()) + } + + val, err := instance.Decode(schema.ImpliedType()) + if err != nil { + fmt.Println(err.Error()) + break + } + + path := make(cty.Path, 0, 3) + result := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) + if result.bodyWritten { + p.buf.WriteString("\n") + } + + p.buf.WriteString("}\n\n") + } + } + } + p.buf.WriteString("\n") +} diff --git a/command/format/state_test.go b/command/format/state_test.go new file mode 100644 index 000000000000..fb9da80fb0fd --- /dev/null +++ b/command/format/state_test.go @@ -0,0 +1,400 @@ +package format + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" +) + +func TestState(t *testing.T) { + tests := []struct { + State *StateOpts + Want string + }{ + { + &StateOpts{ + State: &states.State{}, + Color: disabledColorize, + Schemas: &terraform.Schemas{}, + }, + "The state file is empty. No resources are represented.", + }, + { + &StateOpts{ + State: basicState(t), + Color: disabledColorize, + Schemas: testSchemas(), + }, + basicStateOutput, + }, + { + &StateOpts{ + State: nestedState(t), + Color: disabledColorize, + Schemas: testSchemas(), + }, + nestedStateOutput, + }, + { + &StateOpts{ + State: deposedState(t), + Color: disabledColorize, + Schemas: testSchemas(), + }, + deposedNestedStateOutput, + }, + { + &StateOpts{ + State: onlyDeposedState(t), + Color: disabledColorize, + Schemas: testSchemas(), + }, + onlyDeposedOutput, + }, + { + &StateOpts{ + State: stateWithMoreOutputs(t), + Color: disabledColorize, + Schemas: testSchemas(), + }, + stateWithMoreOutputsOutput, + }, + } + + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + got := State(tt.State) + if got != tt.Want { + t.Errorf( + "wrong result\ninput: %v\ngot: \n%q\nwant: \n%q", + tt.State.State, got, tt.Want, + ) + } + }) + } +} + +func testProvider() *terraform.MockProvider { + p := new(terraform.MockProvider) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + + p.GetProviderSchemaResponse = testProviderSchema() + + return p +} + +func testProviderSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "woozles": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } +} + +func testSchemas() *terraform.Schemas { + provider := testProvider() + return &terraform.Schemas{ + Providers: map[addrs.Provider]*terraform.ProviderSchema{ + addrs.NewDefaultProvider("test"): provider.ProviderSchema(), + }, + } +} + +const basicStateOutput = `# data.test_data_source.data: +data "test_data_source" "data" { + compute = "sure" +} + +# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" +} + + +Outputs: + +bar = "bar value"` + +const nestedStateOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +}` + +const deposedNestedStateOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} + +# test_resource.baz[0]: (deposed object 1234) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +}` + +const onlyDeposedOutput = `# test_resource.baz[0]: +# test_resource.baz[0]: (deposed object 1234) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} + +# test_resource.baz[0]: (deposed object 5678) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +}` + +const stateWithMoreOutputsOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" +} + + +Outputs: + +bool_var = true +int_var = 42 +map_var = { + "first" = "foo" + "second" = "bar" +} +sensitive_var = (sensitive value) +string_var = "string value"` + +func basicState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetLocalValue("foo", cty.StringVal("foo value")) + rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_data_source", + Name: "data", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"compute":"sure"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func stateWithMoreOutputs(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetOutputValue("string_var", cty.StringVal("string value"), false) + rootModule.SetOutputValue("int_var", cty.NumberIntVal(42), false) + rootModule.SetOutputValue("bool_var", cty.BoolVal(true), false) + rootModule.SetOutputValue("sensitive_var", cty.StringVal("secret!!!"), true) + rootModule.SetOutputValue("map_var", cty.MapVal(map[string]cty.Value{ + "first": cty.StringVal("foo"), + "second": cty.StringVal("bar"), + }), false) + + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func nestedState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func deposedState(t *testing.T) *states.State { + state := nestedState(t) + rootModule := state.RootModule() + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("1234"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +// replicate a corrupt resource where only a deposed exists +func onlyDeposedState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("1234"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("5678"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} diff --git a/internal/command/format/trivia.go b/command/format/trivia.go similarity index 100% rename from internal/command/format/trivia.go rename to command/format/trivia.go diff --git a/internal/command/get.go b/command/get.go similarity index 97% rename from internal/command/get.go rename to command/get.go index 0f541c3b1e89..c191f70f963a 100644 --- a/internal/command/get.go +++ b/command/get.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // GetCommand is a Command implementation that takes a Terraform diff --git a/internal/command/get_test.go b/command/get_test.go similarity index 100% rename from internal/command/get_test.go rename to command/get_test.go diff --git a/command/graph.go b/command/graph.go new file mode 100644 index 000000000000..72784d42a20b --- /dev/null +++ b/command/graph.go @@ -0,0 +1,225 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// GraphCommand is a Command implementation that takes a Terraform +// configuration and outputs the dependency tree in graphical form. +type GraphCommand struct { + Meta +} + +func (c *GraphCommand) Run(args []string) int { + var drawCycles bool + var graphTypeStr string + var moduleDepth int + var verbose bool + var planPath string + + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("graph") + cmdFlags.BoolVar(&drawCycles, "draw-cycles", false, "draw-cycles") + cmdFlags.StringVar(&graphTypeStr, "type", "", "type") + cmdFlags.IntVar(&moduleDepth, "module-depth", -1, "module-depth") + cmdFlags.BoolVar(&verbose, "verbose", false, "verbose") + cmdFlags.StringVar(&planPath, "plan", "", "plan") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + configPath, err := ModulePath(cmdFlags.Args()) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + // Try to load plan if path is specified + var planFile *planfile.Reader + if planPath != "" { + planFile, err = c.PlanFile(planPath) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.showDiagnostics(diags) // in case of any warnings in here + c.Ui.Error(ErrUnsupportedLocalOp) + return 1 + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + // Build the operation + opReq := c.Operation(b, arguments.ViewHuman) + opReq.ConfigDir = configPath + opReq.ConfigLoader, err = c.initConfigLoader() + opReq.PlanFile = planFile + opReq.AllowUnsetVariables = true + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + + // Get the context + lr, _, ctxDiags := local.LocalRun(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + if graphTypeStr == "" { + switch { + case lr.Plan != nil: + graphTypeStr = "apply" + default: + graphTypeStr = "plan" + } + } + + var g *terraform.Graph + var graphDiags tfdiags.Diagnostics + switch graphTypeStr { + case "plan": + g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.NormalMode) + case "plan-refresh-only": + g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.RefreshOnlyMode) + case "plan-destroy": + g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.DestroyMode) + case "apply": + plan := lr.Plan + + // Historically "terraform graph" would allow the nonsensical request to + // render an apply graph without a plan, so we continue to support that + // here, though perhaps one day this should be an error. + if lr.Plan == nil { + plan = &plans.Plan{ + Changes: plans.NewChanges(), + UIMode: plans.NormalMode, + PriorState: lr.InputState, + PrevRunState: lr.InputState, + } + } + + g, graphDiags = lr.Core.ApplyGraphForUI(plan, lr.Config) + case "eval", "validate": + // Terraform v0.12 through v1.0 supported both of these, but the + // graph variants for "eval" and "validate" are purely implementation + // details and don't reveal anything (user-model-wise) that you can't + // see in the plan graph. + graphDiags = graphDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Graph type no longer available", + fmt.Sprintf("The graph type %q is no longer available. Use -type=plan instead to get a similar result.", graphTypeStr), + )) + default: + graphDiags = graphDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported graph type", + `The -type=... argument must be either "plan", "plan-refresh-only", "plan-destroy", or "apply".`, + )) + } + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + graphStr, err := terraform.GraphDot(g, &dag.DotOpts{ + DrawCycles: drawCycles, + MaxDepth: moduleDepth, + Verbose: verbose, + }) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error converting graph: %s", err)) + return 1 + } + + if diags.HasErrors() { + // For this command we only show diagnostics if there are errors, + // because printing out naked warnings could upset a naive program + // consuming our dot output. + c.showDiagnostics(diags) + return 1 + } + + c.Ui.Output(graphStr) + + return 0 +} + +func (c *GraphCommand) Help() string { + helpText := ` +Usage: terraform [global options] graph [options] + + Produces a representation of the dependency graph between different + objects in the current configuration and state. + + The graph is presented in the DOT language. The typical program that can + read this format is GraphViz, but many web services are also available + to read this format. + +Options: + + -plan=tfplan Render graph using the specified plan file instead of the + configuration in the current directory. + + -draw-cycles Highlight any cycles in the graph with colored edges. + This helps when diagnosing cycle errors. + + -type=plan Type of graph to output. Can be: plan, plan-refresh-only, + plan-destroy, or apply. By default Terraform chooses + "plan", or "apply" if you also set the -plan=... option. + + -module-depth=n (deprecated) In prior versions of Terraform, specified the + depth of modules to show in the output. +` + return strings.TrimSpace(helpText) +} + +func (c *GraphCommand) Synopsis() string { + return "Generate a Graphviz graph of the steps in an operation" +} diff --git a/command/graph_test.go b/command/graph_test.go new file mode 100644 index 000000000000..fc21fc611f28 --- /dev/null +++ b/command/graph_test.go @@ -0,0 +1,159 @@ +package command + +import ( + "os" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" +) + +func TestGraph(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("graph"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, `provider[\"registry.terraform.io/hashicorp/test\"]`) { + t.Fatalf("doesn't look like digraph: %s", output) + } +} + +func TestGraph_multipleArgs(t *testing.T) { + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + args := []string{ + "bad", + "bad", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: \n%s", ui.OutputWriter.String()) + } +} + +func TestGraph_noArgs(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("graph"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, `provider[\"registry.terraform.io/hashicorp/test\"]`) { + t.Fatalf("doesn't look like digraph: %s", output) + } +} + +func TestGraph_noConfig(t *testing.T) { + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + // Running the graph command without a config should not panic, + // but this may be an error at some point in the future. + args := []string{"-type", "apply"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestGraph_plan(t *testing.T) { + testCwd(t) + + plan := &plans.Plan{ + Changes: plans.NewChanges(), + } + plan.Changes.Resources = append(plan.Changes.Resources, &plans.ResourceInstanceChangeSrc{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + Before: plans.DynamicValue(`{}`), + After: plans.DynamicValue(`null`), + }, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }) + emptyConfig, err := plans.NewDynamicValue(cty.EmptyObjectVal, cty.EmptyObject) + if err != nil { + t.Fatal(err) + } + plan.Backend = plans.Backend{ + // Doesn't actually matter since we aren't going to activate the backend + // for this command anyway, but we need something here for the plan + // file writer to succeed. + Type: "placeholder", + Config: emptyConfig, + } + _, configSnap := testModuleWithSnapshot(t, "graph") + + planPath := testPlanFile(t, configSnap, states.NewState(), plan) + + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + args := []string{ + "-plan", planPath, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, `provider[\"registry.terraform.io/hashicorp/test\"]`) { + t.Fatalf("doesn't look like digraph: %s", output) + } +} diff --git a/command/helper.go b/command/helper.go new file mode 100644 index 000000000000..1c91d4b1be95 --- /dev/null +++ b/command/helper.go @@ -0,0 +1,28 @@ +package command + +import ( + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/cloud" +) + +const failedToLoadSchemasMessage = ` +Warning: Failed to update data for external integrations + +Terraform was unable to generate a description of the updated +state for use with external integrations in Terraform Cloud. +Any integrations configured for this workspace which depend on +information from the state may not work correctly when using the +result of this action. + +This problem occurs when Terraform cannot read the schema for +one or more of the providers used in the state. The next successful +apply will correct the problem by re-generating the JSON description +of the state: + terraform apply +` + +func isCloudMode(b backend.Enhanced) bool { + _, ok := b.(*cloud.Cloud) + + return ok +} diff --git a/internal/command/hook_module_install.go b/command/hook_module_install.go similarity index 94% rename from internal/command/hook_module_install.go rename to command/hook_module_install.go index 4afa7072c30d..d1fa27510b2f 100644 --- a/internal/command/hook_module_install.go +++ b/command/hook_module_install.go @@ -4,7 +4,7 @@ import ( "fmt" version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/initwd" + "github.com/hashicorp/terraform/initwd" "github.com/mitchellh/cli" ) diff --git a/internal/command/import.go b/command/import.go similarity index 96% rename from internal/command/import.go rename to command/import.go index 5042cc82e7d6..b79f4cb7b32c 100644 --- a/internal/command/import.go +++ b/command/import.go @@ -10,13 +10,13 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // ImportCommand is a cli.Command implementation that imports resources diff --git a/internal/command/import_test.go b/command/import_test.go similarity index 99% rename from internal/command/import_test.go rename to command/import_test.go index 6f1ad71b0ca1..3397e82faf84 100644 --- a/internal/command/import_test.go +++ b/command/import_test.go @@ -11,10 +11,10 @@ import ( "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/copy" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/copy" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" ) func TestImport(t *testing.T) { diff --git a/command/init.go b/command/init.go new file mode 100644 index 000000000000..54eef83aca9d --- /dev/null +++ b/command/init.go @@ -0,0 +1,1230 @@ +package command + +import ( + "context" + "fmt" + "log" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/posener/complete" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + backendInit "github.com/hashicorp/terraform/backend/init" + "github.com/hashicorp/terraform/cloud" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/providercache" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" +) + +// InitCommand is a Command implementation that takes a Terraform +// module and clones it to the working directory. +type InitCommand struct { + Meta +} + +func (c *InitCommand) Run(args []string) int { + var flagFromModule, flagLockfile string + var flagBackend, flagCloud, flagGet, flagUpgrade bool + var flagPluginPath FlagStringSlice + flagConfigExtra := newRawFlags("-backend-config") + + args = c.Meta.process(args) + cmdFlags := c.Meta.extendedFlagSet("init") + cmdFlags.BoolVar(&flagBackend, "backend", true, "") + cmdFlags.BoolVar(&flagCloud, "cloud", true, "") + cmdFlags.Var(flagConfigExtra, "backend-config", "") + cmdFlags.StringVar(&flagFromModule, "from-module", "", "copy the source of the given module into the directory before init") + cmdFlags.BoolVar(&flagGet, "get", true, "") + cmdFlags.BoolVar(&c.forceInitCopy, "force-copy", false, "suppress prompts about copying state data") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.BoolVar(&c.reconfigure, "reconfigure", false, "reconfigure") + cmdFlags.BoolVar(&c.migrateState, "migrate-state", false, "migrate state") + cmdFlags.BoolVar(&flagUpgrade, "upgrade", false, "") + cmdFlags.Var(&flagPluginPath, "plugin-dir", "plugin directory") + cmdFlags.StringVar(&flagLockfile, "lockfile", "", "Set a dependency lockfile mode") + cmdFlags.BoolVar(&c.Meta.ignoreRemoteVersion, "ignore-remote-version", false, "continue even if remote and local Terraform versions are incompatible") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + backendFlagSet := arguments.FlagIsSet(cmdFlags, "backend") + cloudFlagSet := arguments.FlagIsSet(cmdFlags, "cloud") + + switch { + case backendFlagSet && cloudFlagSet: + c.Ui.Error("The -backend and -cloud options are aliases of one another and mutually-exclusive in their use") + return 1 + case backendFlagSet: + flagCloud = flagBackend + case cloudFlagSet: + flagBackend = flagCloud + } + + if c.migrateState && c.reconfigure { + c.Ui.Error("The -migrate-state and -reconfigure options are mutually-exclusive") + return 1 + } + + // Copying the state only happens during backend migration, so setting + // -force-copy implies -migrate-state + if c.forceInitCopy { + c.migrateState = true + } + + var diags tfdiags.Diagnostics + + if len(flagPluginPath) > 0 { + c.pluginPath = flagPluginPath + } + + // Validate the arg count and get the working directory + args = cmdFlags.Args() + path, err := ModulePath(args) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + if err := c.storePluginPath(c.pluginPath); err != nil { + c.Ui.Error(fmt.Sprintf("Error saving -plugin-path values: %s", err)) + return 1 + } + + // This will track whether we outputted anything so that we know whether + // to output a newline before the success message + var header bool + + if flagFromModule != "" { + src := flagFromModule + + empty, err := configs.IsEmptyDir(path) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error validating destination directory: %s", err)) + return 1 + } + if !empty { + c.Ui.Error(strings.TrimSpace(errInitCopyNotEmpty)) + return 1 + } + + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + "[reset][bold]Copying configuration[reset] from %q...", src, + ))) + header = true + + hooks := uiModuleInstallHooks{ + Ui: c.Ui, + ShowLocalPaths: false, // since they are in a weird location for init + } + + initDirFromModuleAbort, initDirFromModuleDiags := c.initDirFromModule(path, src, hooks) + diags = diags.Append(initDirFromModuleDiags) + if initDirFromModuleAbort || initDirFromModuleDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + c.Ui.Output("") + } + + // If our directory is empty, then we're done. We can't get or set up + // the backend with an empty directory. + empty, err := configs.IsEmptyDir(path) + if err != nil { + diags = diags.Append(fmt.Errorf("Error checking configuration: %s", err)) + c.showDiagnostics(diags) + return 1 + } + if empty { + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitEmpty))) + return 0 + } + + // Load just the root module to begin backend and module initialization + rootModEarly, earlyConfDiags := c.loadSingleModule(path) + + // There may be parsing errors in config loading but these will be shown later _after_ + // checking for core version requirement errors. Not meeting the version requirement should + // be the first error displayed if that is an issue, but other operations are required + // before being able to check core version requirements. + if rootModEarly == nil { + c.Ui.Error(c.Colorize().Color(strings.TrimSpace(errInitConfigError))) + diags = diags.Append(earlyConfDiags) + c.showDiagnostics(diags) + + return 1 + } + + var back backend.Backend + + // There may be config errors or backend init errors but these will be shown later _after_ + // checking for core version requirement errors. + var backDiags tfdiags.Diagnostics + var backendOutput bool + + switch { + case flagCloud && rootModEarly.CloudConfig != nil: + back, backendOutput, backDiags = c.initCloud(rootModEarly, flagConfigExtra) + case flagBackend: + back, backendOutput, backDiags = c.initBackend(rootModEarly, flagConfigExtra) + default: + // load the previously-stored backend config + back, backDiags = c.Meta.backendFromState() + } + if backendOutput { + header = true + } + + var state *states.State + + // If we have a functional backend (either just initialized or initialized + // on a previous run) we'll use the current state as a potential source + // of provider dependencies. + if back != nil { + c.ignoreRemoteVersionConflict(back) + workspace, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + sMgr, err := back.StateMgr(workspace) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error loading state: %s", err)) + return 1 + } + + if err := sMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Error refreshing state: %s", err)) + return 1 + } + + state = sMgr.State() + } + + if flagGet { + modsOutput, modsAbort, modsDiags := c.getModules(path, rootModEarly, flagUpgrade) + diags = diags.Append(modsDiags) + if modsAbort || modsDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + if modsOutput { + header = true + } + } + + // With all of the modules (hopefully) installed, we can now try to load the + // whole configuration tree. + config, confDiags := c.loadConfig(path) + // configDiags will be handled after the version constraint check, since an + // incorrect version of terraform may be producing errors for configuration + // constructs added in later versions. + + // Before we go further, we'll check to make sure none of the modules in + // the configuration declare that they don't support this Terraform + // version, so we can produce a version-related error message rather than + // potentially-confusing downstream errors. + versionDiags := terraform.CheckCoreVersionRequirements(config) + if versionDiags.HasErrors() { + c.showDiagnostics(versionDiags) + return 1 + } + + // If we pass the core version check, we want to show any errors from initializing the backend next, + // which will include syntax errors from loading the configuration. However, there's a special case + // where we are unable to load the backend from configuration or state _and_ the configuration has + // errors. In that case, we want to show a slightly friendlier error message for newcomers. + showBackendDiags := back != nil || rootModEarly.Backend != nil || rootModEarly.CloudConfig != nil + if showBackendDiags { + diags = diags.Append(backDiags) + if backDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } else { + diags = diags.Append(earlyConfDiags) + if earlyConfDiags.HasErrors() { + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + c.showDiagnostics(diags) + return 1 + } + } + + // If everything is ok with the core version check and backend initialization, + // show other errors from loading the full configuration tree. + diags = diags.Append(confDiags) + if confDiags.HasErrors() { + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + c.showDiagnostics(diags) + return 1 + } + + // Now that we have loaded all modules, check the module tree for missing providers. + providersOutput, providersAbort, providerDiags := c.getProviders(config, state, flagUpgrade, flagPluginPath, flagLockfile) + diags = diags.Append(providerDiags) + if providersAbort || providerDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + if providersOutput { + header = true + } + + // If we outputted information, then we need to output a newline + // so that our success message is nicely spaced out from prior text. + if header { + c.Ui.Output("") + } + + // If we accumulated any warnings along the way that weren't accompanied + // by errors then we'll output them here so that the success message is + // still the final thing shown. + c.showDiagnostics(diags) + _, cloud := back.(*cloud.Cloud) + output := outputInitSuccess + if cloud { + output = outputInitSuccessCloud + } + + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(output))) + + if !c.RunningInAutomation { + // If we're not running in an automation wrapper, give the user + // some more detailed next steps that are appropriate for interactive + // shell usage. + output = outputInitSuccessCLI + if cloud { + output = outputInitSuccessCLICloud + } + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(output))) + } + return 0 +} + +func (c *InitCommand) getModules(path string, earlyRoot *configs.Module, upgrade bool) (output bool, abort bool, diags tfdiags.Diagnostics) { + if len(earlyRoot.ModuleCalls) == 0 { + // Nothing to do + return false, false, nil + } + + if upgrade { + c.Ui.Output(c.Colorize().Color("[reset][bold]Upgrading modules...")) + } else { + c.Ui.Output(c.Colorize().Color("[reset][bold]Initializing modules...")) + } + + hooks := uiModuleInstallHooks{ + Ui: c.Ui, + ShowLocalPaths: true, + } + + installAbort, installDiags := c.installModules(path, upgrade, hooks) + diags = diags.Append(installDiags) + + // At this point, installModules may have generated error diags or been + // aborted by SIGINT. In any case we continue and the manifest as best + // we can. + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if c.configLoader != nil { + if err := c.configLoader.RefreshModules(); err != nil { + // Should never happen + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read module manifest", + fmt.Sprintf("After installing modules, Terraform could not re-read the manifest of installed modules. This is a bug in Terraform. %s.", err), + )) + } + } + + return true, installAbort, diags +} + +func (c *InitCommand) initCloud(root *configs.Module, extraConfig rawFlags) (be backend.Backend, output bool, diags tfdiags.Diagnostics) { + c.Ui.Output(c.Colorize().Color("\n[reset][bold]Initializing Terraform Cloud...")) + + if len(extraConfig.AllItems()) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid command-line option", + "The -backend-config=... command line option is only for state backends, and is not applicable to Terraform Cloud-based configurations.\n\nTo change the set of workspaces associated with this configuration, edit the Cloud configuration block in the root module.", + )) + return nil, true, diags + } + + backendConfig := root.CloudConfig.ToBackendConfig() + + opts := &BackendOpts{ + Config: &backendConfig, + Init: true, + } + + back, backDiags := c.Backend(opts) + diags = diags.Append(backDiags) + return back, true, diags +} + +func (c *InitCommand) initBackend(root *configs.Module, extraConfig rawFlags) (be backend.Backend, output bool, diags tfdiags.Diagnostics) { + c.Ui.Output(c.Colorize().Color("\n[reset][bold]Initializing the backend...")) + + var backendConfig *configs.Backend + var backendConfigOverride hcl.Body + if root.Backend != nil { + backendType := root.Backend.Type + if backendType == "cloud" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported backend type", + Detail: fmt.Sprintf("There is no explicit backend type named %q. To configure Terraform Cloud, declare a 'cloud' block instead.", backendType), + Subject: &root.Backend.TypeRange, + }) + return nil, true, diags + } + + bf := backendInit.Backend(backendType) + if bf == nil { + detail := fmt.Sprintf("There is no backend type named %q.", backendType) + if msg, removed := backendInit.RemovedBackends[backendType]; removed { + detail = msg + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported backend type", + Detail: detail, + Subject: &root.Backend.TypeRange, + }) + return nil, true, diags + } + + b := bf() + backendSchema := b.ConfigSchema() + backendConfig = root.Backend + + var overrideDiags tfdiags.Diagnostics + backendConfigOverride, overrideDiags = c.backendConfigOverrideBody(extraConfig, backendSchema) + diags = diags.Append(overrideDiags) + if overrideDiags.HasErrors() { + return nil, true, diags + } + } else { + // If the user supplied a -backend-config on the CLI but no backend + // block was found in the configuration, it's likely - but not + // necessarily - a mistake. Return a warning. + if !extraConfig.Empty() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Missing backend configuration", + `-backend-config was used without a "backend" block in the configuration. + +If you intended to override the default local backend configuration, +no action is required, but you may add an explicit backend block to your +configuration to clear this warning: + +terraform { + backend "local" {} +} + +However, if you intended to override a defined backend, please verify that +the backend configuration is present and valid. +`, + )) + } + } + + opts := &BackendOpts{ + Config: backendConfig, + ConfigOverride: backendConfigOverride, + Init: true, + } + + back, backDiags := c.Backend(opts) + diags = diags.Append(backDiags) + return back, true, diags +} + +// Load the complete module tree, and fetch any missing providers. +// This method outputs its own Ui. +func (c *InitCommand) getProviders(config *configs.Config, state *states.State, upgrade bool, pluginDirs []string, flagLockfile string) (output, abort bool, diags tfdiags.Diagnostics) { + // Dev overrides cause the result of "terraform init" to be irrelevant for + // any overridden providers, so we'll warn about it to avoid later + // confusion when Terraform ends up using a different provider than the + // lock file called for. + diags = diags.Append(c.providerDevOverrideInitWarnings()) + + // First we'll collect all the provider dependencies we can see in the + // configuration and the state. + reqs, hclDiags := config.ProviderRequirements() + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return false, true, diags + } + if state != nil { + stateReqs := state.ProviderRequirements() + reqs = reqs.Merge(stateReqs) + } + + for providerAddr := range reqs { + if providerAddr.IsLegacy() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid legacy provider address", + fmt.Sprintf( + "This configuration or its associated state refers to the unqualified provider %q.\n\nYou must complete the Terraform 0.13 upgrade process before upgrading to later versions.", + providerAddr.Type, + ), + )) + } + } + + previousLocks, moreDiags := c.lockedDependencies() + diags = diags.Append(moreDiags) + + if diags.HasErrors() { + return false, true, diags + } + + var inst *providercache.Installer + if len(pluginDirs) == 0 { + // By default we use a source that looks for providers in all of the + // standard locations, possibly customized by the user in CLI config. + inst = c.providerInstaller() + } else { + // If the user passes at least one -plugin-dir then that circumvents + // the usual sources and forces Terraform to consult only the given + // directories. Anything not available in one of those directories + // is not available for installation. + source := c.providerCustomLocalDirectorySource(pluginDirs) + inst = c.providerInstallerCustomSource(source) + + // The default (or configured) search paths are logged earlier, in provider_source.go + // Log that those are being overridden by the `-plugin-dir` command line options + log.Println("[DEBUG] init: overriding provider plugin search paths") + log.Printf("[DEBUG] will search for provider plugins in %s", pluginDirs) + } + + // Installation can be aborted by interruption signals + ctx, done := c.InterruptibleContext() + defer done() + + // We want to print out a nice warning if we don't manage to pull + // checksums for all our providers. This is tracked via callbacks + // and incomplete providers are stored here for later analysis. + var incompleteProviders []string + + // Because we're currently just streaming a series of events sequentially + // into the terminal, we're showing only a subset of the events to keep + // things relatively concise. Later it'd be nice to have a progress UI + // where statuses update in-place, but we can't do that as long as we + // are shimming our vt100 output to the legacy console API on Windows. + evts := &providercache.InstallerEvents{ + PendingProviders: func(reqs map[addrs.Provider]getproviders.VersionConstraints) { + c.Ui.Output(c.Colorize().Color( + "\n[reset][bold]Initializing provider plugins...", + )) + }, + ProviderAlreadyInstalled: func(provider addrs.Provider, selectedVersion getproviders.Version) { + c.Ui.Info(fmt.Sprintf("- Using previously-installed %s v%s", provider.ForDisplay(), selectedVersion)) + }, + BuiltInProviderAvailable: func(provider addrs.Provider) { + c.Ui.Info(fmt.Sprintf("- %s is built in to Terraform", provider.ForDisplay())) + }, + BuiltInProviderFailure: func(provider addrs.Provider, err error) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid dependency on built-in provider", + fmt.Sprintf("Cannot use %s: %s.", provider.ForDisplay(), err), + )) + }, + QueryPackagesBegin: func(provider addrs.Provider, versionConstraints getproviders.VersionConstraints, locked bool) { + if locked { + c.Ui.Info(fmt.Sprintf("- Reusing previous version of %s from the dependency lock file", provider.ForDisplay())) + } else { + if len(versionConstraints) > 0 { + c.Ui.Info(fmt.Sprintf("- Finding %s versions matching %q...", provider.ForDisplay(), getproviders.VersionConstraintsString(versionConstraints))) + } else { + c.Ui.Info(fmt.Sprintf("- Finding latest version of %s...", provider.ForDisplay())) + } + } + }, + LinkFromCacheBegin: func(provider addrs.Provider, version getproviders.Version, cacheRoot string) { + c.Ui.Info(fmt.Sprintf("- Using %s v%s from the shared cache directory", provider.ForDisplay(), version)) + }, + FetchPackageBegin: func(provider addrs.Provider, version getproviders.Version, location getproviders.PackageLocation) { + c.Ui.Info(fmt.Sprintf("- Installing %s v%s...", provider.ForDisplay(), version)) + }, + QueryPackagesFailure: func(provider addrs.Provider, err error) { + switch errorTy := err.(type) { + case getproviders.ErrProviderNotFound: + sources := errorTy.Sources + displaySources := make([]string, len(sources)) + for i, source := range sources { + displaySources[i] = fmt.Sprintf(" - %s", source) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to query available provider packages", + fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s\n\n%s", + provider.ForDisplay(), err, strings.Join(displaySources, "\n"), + ), + )) + case getproviders.ErrRegistryProviderNotKnown: + // We might be able to suggest an alternative provider to use + // instead of this one. + suggestion := fmt.Sprintf("\n\nAll modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending on %s, run the following command:\n terraform providers", provider.ForDisplay()) + alternative := getproviders.MissingProviderSuggestion(ctx, provider, inst.ProviderSource(), reqs) + if alternative != provider { + suggestion = fmt.Sprintf( + "\n\nDid you intend to use %s? If so, you must specify that source address in each module which requires that provider. To see which modules are currently depending on %s, run the following command:\n terraform providers", + alternative.ForDisplay(), provider.ForDisplay(), + ) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to query available provider packages", + fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s%s", + provider.ForDisplay(), err, suggestion, + ), + )) + case getproviders.ErrHostNoProviders: + switch { + case errorTy.Hostname == svchost.Hostname("github.com") && !errorTy.HasOtherVersion: + // If a user copies the URL of a GitHub repository into + // the source argument and removes the schema to make it + // provider-address-shaped then that's one way we can end up + // here. We'll use a specialized error message in anticipation + // of that mistake. We only do this if github.com isn't a + // provider registry, to allow for the (admittedly currently + // rather unlikely) possibility that github.com starts being + // a real Terraform provider registry in the future. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider registry host", + fmt.Sprintf("The given source address %q specifies a GitHub repository rather than a Terraform provider. Refer to the documentation of the provider to find the correct source address to use.", + provider.String(), + ), + )) + + case errorTy.HasOtherVersion: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider registry host", + fmt.Sprintf("The host %q given in in provider source address %q does not offer a Terraform provider registry that is compatible with this Terraform version, but it may be compatible with a different Terraform version.", + errorTy.Hostname, provider.String(), + ), + )) + + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider registry host", + fmt.Sprintf("The host %q given in in provider source address %q does not offer a Terraform provider registry.", + errorTy.Hostname, provider.String(), + ), + )) + } + + case getproviders.ErrRequestCanceled: + // We don't attribute cancellation to any particular operation, + // but rather just emit a single general message about it at + // the end, by checking ctx.Err(). + + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to query available provider packages", + fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s", + provider.ForDisplay(), err, + ), + )) + } + + }, + QueryPackagesWarning: func(provider addrs.Provider, warnings []string) { + displayWarnings := make([]string, len(warnings)) + for i, warning := range warnings { + displayWarnings[i] = fmt.Sprintf("- %s", warning) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Additional provider information from registry", + fmt.Sprintf("The remote registry returned warnings for %s:\n%s", + provider.String(), + strings.Join(displayWarnings, "\n"), + ), + )) + }, + LinkFromCacheFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to install provider from shared cache", + fmt.Sprintf("Error while importing %s v%s from the shared cache directory: %s.", provider.ForDisplay(), version, err), + )) + }, + FetchPackageFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + const summaryIncompatible = "Incompatible provider version" + switch err := err.(type) { + case getproviders.ErrProtocolNotSupported: + closestAvailable := err.Suggestion + switch { + case closestAvailable == getproviders.UnspecifiedVersion: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf(errProviderVersionIncompatible, provider.String()), + )) + case version.GreaterThan(closestAvailable): + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf(providerProtocolTooNew, provider.ForDisplay(), + version, tfversion.String(), closestAvailable, closestAvailable, + getproviders.VersionConstraintsString(reqs[provider]), + ), + )) + default: // version is less than closestAvailable + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf(providerProtocolTooOld, provider.ForDisplay(), + version, tfversion.String(), closestAvailable, closestAvailable, + getproviders.VersionConstraintsString(reqs[provider]), + ), + )) + } + case getproviders.ErrPlatformNotSupported: + switch { + case err.MirrorURL != nil: + // If we're installing from a mirror then it may just be + // the mirror lacking the package, rather than it being + // unavailable from upstream. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf( + "Your chosen provider mirror at %s does not have a %s v%s package available for your current platform, %s.\n\nProvider releases are separate from Terraform CLI releases, so this provider might not support your current platform. Alternatively, the mirror itself might have only a subset of the plugin packages available in the origin registry, at %s.", + err.MirrorURL, err.Provider, err.Version, err.Platform, + err.Provider.Hostname, + ), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf( + "Provider %s v%s does not have a package available for your current platform, %s.\n\nProvider releases are separate from Terraform CLI releases, so not all providers are available for all platforms. Other versions of this provider may have different platforms supported.", + err.Provider, err.Version, err.Platform, + ), + )) + } + + case getproviders.ErrRequestCanceled: + // We don't attribute cancellation to any particular operation, + // but rather just emit a single general message about it at + // the end, by checking ctx.Err(). + + default: + // We can potentially end up in here under cancellation too, + // in spite of our getproviders.ErrRequestCanceled case above, + // because not all of the outgoing requests we do under the + // "fetch package" banner are source metadata requests. + // In that case we will emit a redundant error here about + // the request being cancelled, but we'll still detect it + // as a cancellation after the installer returns and do the + // normal cancellation handling. + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to install provider", + fmt.Sprintf("Error while installing %s v%s: %s", provider.ForDisplay(), version, err), + )) + } + }, + FetchPackageSuccess: func(provider addrs.Provider, version getproviders.Version, localDir string, authResult *getproviders.PackageAuthenticationResult) { + var keyID string + if authResult != nil && authResult.ThirdPartySigned() { + keyID = authResult.KeyID + } + if keyID != "" { + keyID = c.Colorize().Color(fmt.Sprintf(", key ID [reset][bold]%s[reset]", keyID)) + } + + c.Ui.Info(fmt.Sprintf("- Installed %s v%s (%s%s)", provider.ForDisplay(), version, authResult, keyID)) + }, + ProvidersLockUpdated: func(provider addrs.Provider, version getproviders.Version, localHashes []getproviders.Hash, signedHashes []getproviders.Hash, priorHashes []getproviders.Hash) { + // We're going to use this opportunity to track if we have any + // "incomplete" installs of providers. An incomplete install is + // when we are only going to write the local hashes into our lock + // file which means a `terraform init` command will fail in future + // when used on machines of a different architecture. + // + // We want to print a warning about this. + + if len(signedHashes) > 0 { + // If we have any signedHashes hashes then we don't worry - as + // we know we retrieved all available hashes for this version + // anyway. + return + } + + // If local hashes and prior hashes are exactly the same then + // it means we didn't record any signed hashes previously, and + // we know we're not adding any extra in now (because we already + // checked the signedHashes), so that's a problem. + // + // In the actual check here, if we have any priorHashes and those + // hashes are not the same as the local hashes then we're going to + // accept that this provider has been configured correctly. + if len(priorHashes) > 0 && !reflect.DeepEqual(localHashes, priorHashes) { + return + } + + // Now, either signedHashes is empty, or priorHashes is exactly the + // same as our localHashes which means we never retrieved the + // signedHashes previously. + // + // Either way, this is bad. Let's complain/warn. + incompleteProviders = append(incompleteProviders, provider.ForDisplay()) + }, + ProvidersFetched: func(authResults map[addrs.Provider]*getproviders.PackageAuthenticationResult) { + thirdPartySigned := false + for _, authResult := range authResults { + if authResult.ThirdPartySigned() { + thirdPartySigned = true + break + } + } + if thirdPartySigned { + c.Ui.Info(fmt.Sprintf("\nPartner and community providers are signed by their developers.\n" + + "If you'd like to know more about provider signing, you can read about it here:\n" + + "https://www.terraform.io/docs/cli/plugins/signing.html")) + } + }, + } + ctx = evts.OnContext(ctx) + + mode := providercache.InstallNewProvidersOnly + if upgrade { + if flagLockfile == "readonly" { + c.Ui.Error("The -upgrade flag conflicts with -lockfile=readonly.") + return true, true, diags + } + + mode = providercache.InstallUpgrades + } + newLocks, err := inst.EnsureProviderVersions(ctx, previousLocks, reqs, mode) + if ctx.Err() == context.Canceled { + c.showDiagnostics(diags) + c.Ui.Error("Provider installation was canceled by an interrupt signal.") + return true, true, diags + } + if err != nil { + // The errors captured in "err" should be redundant with what we + // received via the InstallerEvents callbacks above, so we'll + // just return those as long as we have some. + if !diags.HasErrors() { + diags = diags.Append(err) + } + + return true, true, diags + } + + // If the provider dependencies have changed since the last run then we'll + // say a little about that in case the reader wasn't expecting a change. + // (When we later integrate module dependencies into the lock file we'll + // probably want to refactor this so that we produce one lock-file related + // message for all changes together, but this is here for now just because + // it's the smallest change relative to what came before it, which was + // a hidden JSON file specifically for tracking providers.) + if !newLocks.Equal(previousLocks) { + // if readonly mode + if flagLockfile == "readonly" { + // check if required provider dependences change + if !newLocks.EqualProviderAddress(previousLocks) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + `Provider dependency changes detected`, + `Changes to the required provider dependencies were detected, but the lock file is read-only. To use and record these requirements, run "terraform init" without the "-lockfile=readonly" flag.`, + )) + return true, true, diags + } + + // suppress updating the file to record any new information it learned, + // such as a hash using a new scheme. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + `Provider lock file not updated`, + `Changes to the provider selections were detected, but not saved in the .terraform.lock.hcl file. To record these selections, run "terraform init" without the "-lockfile=readonly" flag.`, + )) + return true, false, diags + } + + // Jump in here and add a warning if any of the providers are incomplete. + if len(incompleteProviders) > 0 { + // We don't really care about the order here, we just want the + // output to be deterministic. + sort.Slice(incompleteProviders, func(i, j int) bool { + return incompleteProviders[i] < incompleteProviders[j] + }) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + incompleteLockFileInformationHeader, + fmt.Sprintf( + incompleteLockFileInformationBody, + strings.Join(incompleteProviders, "\n - "), + getproviders.CurrentPlatform.String()))) + } + + if previousLocks.Empty() { + // A change from empty to non-empty is special because it suggests + // we're running "terraform init" for the first time against a + // new configuration. In that case we'll take the opportunity to + // say a little about what the dependency lock file is, for new + // users or those who are upgrading from a previous Terraform + // version that didn't have dependency lock files. + c.Ui.Output(c.Colorize().Color(` +Terraform has created a lock file [bold].terraform.lock.hcl[reset] to record the provider +selections it made above. Include this file in your version control repository +so that Terraform can guarantee to make the same selections by default when +you run "terraform init" in the future.`)) + } else { + c.Ui.Output(c.Colorize().Color(` +Terraform has made some changes to the provider dependency selections recorded +in the .terraform.lock.hcl file. Review those changes and commit them to your +version control system if they represent changes you intended to make.`)) + } + + moreDiags = c.replaceLockedDependencies(newLocks) + diags = diags.Append(moreDiags) + } + + return true, false, diags +} + +// backendConfigOverrideBody interprets the raw values of -backend-config +// arguments into a hcl Body that should override the backend settings given +// in the configuration. +// +// If the result is nil then no override needs to be provided. +// +// If the returned diagnostics contains errors then the returned body may be +// incomplete or invalid. +func (c *InitCommand) backendConfigOverrideBody(flags rawFlags, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + items := flags.AllItems() + if len(items) == 0 { + return nil, nil + } + + var ret hcl.Body + var diags tfdiags.Diagnostics + synthVals := make(map[string]cty.Value) + + mergeBody := func(newBody hcl.Body) { + if ret == nil { + ret = newBody + } else { + ret = configs.MergeBodies(ret, newBody) + } + } + flushVals := func() { + if len(synthVals) == 0 { + return + } + newBody := configs.SynthBody("-backend-config=...", synthVals) + mergeBody(newBody) + synthVals = make(map[string]cty.Value) + } + + if len(items) == 1 && items[0].Value == "" { + // Explicitly remove all -backend-config options. + // We do this by setting an empty but non-nil ConfigOverrides. + return configs.SynthBody("-backend-config=''", synthVals), diags + } + + for _, item := range items { + eq := strings.Index(item.Value, "=") + + if eq == -1 { + // The value is interpreted as a filename. + newBody, fileDiags := c.loadHCLFile(item.Value) + diags = diags.Append(fileDiags) + if fileDiags.HasErrors() { + continue + } + // Generate an HCL body schema for the backend block. + var bodySchema hcl.BodySchema + for name := range schema.Attributes { + // We intentionally ignore the `Required` attribute here + // because backend config override files can be partial. The + // goal is to make sure we're not loading a file with + // extraneous attributes or blocks. + bodySchema.Attributes = append(bodySchema.Attributes, hcl.AttributeSchema{ + Name: name, + }) + } + for name, block := range schema.BlockTypes { + var labelNames []string + if block.Nesting == configschema.NestingMap { + labelNames = append(labelNames, "key") + } + bodySchema.Blocks = append(bodySchema.Blocks, hcl.BlockHeaderSchema{ + Type: name, + LabelNames: labelNames, + }) + } + // Verify that the file body matches the expected backend schema. + _, schemaDiags := newBody.Content(&bodySchema) + diags = diags.Append(schemaDiags) + if schemaDiags.HasErrors() { + continue + } + flushVals() // deal with any accumulated individual values first + mergeBody(newBody) + } else { + name := item.Value[:eq] + rawValue := item.Value[eq+1:] + attrS := schema.Attributes[name] + if attrS == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid backend configuration argument", + fmt.Sprintf("The backend configuration argument %q given on the command line is not expected for the selected backend type.", name), + )) + continue + } + value, valueDiags := configValueFromCLI(item.String(), rawValue, attrS.Type) + diags = diags.Append(valueDiags) + if valueDiags.HasErrors() { + continue + } + synthVals[name] = value + } + } + + flushVals() + + return ret, diags +} + +func (c *InitCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictDirs("") +} + +func (c *InitCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-backend": completePredictBoolean, + "-cloud": completePredictBoolean, + "-backend-config": complete.PredictFiles("*.tfvars"), // can also be key=value, but we can't "predict" that + "-force-copy": complete.PredictNothing, + "-from-module": completePredictModuleSource, + "-get": completePredictBoolean, + "-input": completePredictBoolean, + "-lock": completePredictBoolean, + "-lock-timeout": complete.PredictAnything, + "-no-color": complete.PredictNothing, + "-plugin-dir": complete.PredictDirs(""), + "-reconfigure": complete.PredictNothing, + "-migrate-state": complete.PredictNothing, + "-upgrade": completePredictBoolean, + } +} + +func (c *InitCommand) Help() string { + helpText := ` +Usage: terraform [global options] init [options] + + Initialize a new or existing Terraform working directory by creating + initial files, loading any remote state, downloading modules, etc. + + This is the first command that should be run for any new or existing + Terraform configuration per machine. This sets up all the local data + necessary to run Terraform that is typically not committed to version + control. + + This command is always safe to run multiple times. Though subsequent runs + may give errors, this command will never delete your configuration or + state. Even so, if you have important information, please back it up prior + to running this command, just in case. + +Options: + + -backend=false Disable backend or Terraform Cloud initialization + for this configuration and use what was previously + initialized instead. + + aliases: -cloud=false + + -backend-config=path Configuration to be merged with what is in the + configuration file's 'backend' block. This can be + either a path to an HCL file with key/value + assignments (same format as terraform.tfvars) or a + 'key=value' format, and can be specified multiple + times. The backend type must be in the configuration + itself. + + -force-copy Suppress prompts about copying state data when + initializating a new state backend. This is + equivalent to providing a "yes" to all confirmation + prompts. + + -from-module=SOURCE Copy the contents of the given module into the target + directory before initialization. + + -get=false Disable downloading modules for this configuration. + + -input=false Disable interactive prompts. Note that some actions may + require interactive prompts and will error if input is + disabled. + + -lock=false Don't hold a state lock during backend migration. + This is dangerous if others might concurrently run + commands against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -no-color If specified, output won't contain any color. + + -plugin-dir Directory containing plugin binaries. This overrides all + default search paths for plugins, and prevents the + automatic installation of plugins. This flag can be used + multiple times. + + -reconfigure Reconfigure a backend, ignoring any saved + configuration. + + -migrate-state Reconfigure a backend, and attempt to migrate any + existing state. + + -upgrade Install the latest module and provider versions + allowed within configured constraints, overriding the + default behavior of selecting exactly the version + recorded in the dependency lockfile. + + -lockfile=MODE Set a dependency lockfile mode. + Currently only "readonly" is valid. + + -ignore-remote-version A rare option used for Terraform Cloud and the remote backend + only. Set this to ignore checking that the local and remote + Terraform versions use compatible state representations, making + an operation proceed even when there is a potential mismatch. + See the documentation on configuring Terraform with + Terraform Cloud for more information. + +` + return strings.TrimSpace(helpText) +} + +func (c *InitCommand) Synopsis() string { + return "Prepare your working directory for other commands" +} + +const errInitConfigError = ` +[reset]There are some problems with the configuration, described below. + +The Terraform configuration must be valid before initialization so that +Terraform can determine which modules and providers need to be installed. +` + +const errInitCopyNotEmpty = ` +The working directory already contains files. The -from-module option requires +an empty directory into which a copy of the referenced module will be placed. + +To initialize the configuration already in this working directory, omit the +-from-module option. +` + +const outputInitEmpty = ` +[reset][bold]Terraform initialized in an empty directory![reset] + +The directory has no Terraform configuration files. You may begin working +with Terraform immediately by creating Terraform configuration files. +` + +const outputInitSuccess = ` +[reset][bold][green]Terraform has been successfully initialized![reset][green] +` + +const outputInitSuccessCloud = ` +[reset][bold][green]Terraform Cloud has been successfully initialized![reset][green] +` + +const outputInitSuccessCLI = `[reset][green] +You may now begin working with Terraform. Try running "terraform plan" to see +any changes that are required for your infrastructure. All Terraform commands +should now work. + +If you ever set or change modules or backend configuration for Terraform, +rerun this command to reinitialize your working directory. If you forget, other +commands will detect it and remind you to do so if necessary. +` + +const outputInitSuccessCLICloud = `[reset][green] +You may now begin working with Terraform Cloud. Try running "terraform plan" to +see any changes that are required for your infrastructure. + +If you ever set or change modules or Terraform Settings, run "terraform init" +again to reinitialize your working directory. +` + +// providerProtocolTooOld is a message sent to the CLI UI if the provider's +// supported protocol versions are too old for the user's version of terraform, +// but a newer version of the provider is compatible. +const providerProtocolTooOld = `Provider %q v%s is not compatible with Terraform %s. +Provider version %s is the latest compatible version. Select it with the following version constraint: + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on compatibility between provider and Terraform versions. +` + +// providerProtocolTooNew is a message sent to the CLI UI if the provider's +// supported protocol versions are too new for the user's version of terraform, +// and the user could either upgrade terraform or choose an older version of the +// provider. +const providerProtocolTooNew = `Provider %q v%s is not compatible with Terraform %s. +You need to downgrade to v%s or earlier. Select it with the following constraint: + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on compatibility between provider and Terraform versions. +Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. +` + +// No version of the provider is compatible. +const errProviderVersionIncompatible = `No compatible versions of provider %s were found.` + +// incompleteLockFileInformationHeader is the summary displayed to users when +// the lock file has only recorded local hashes. +const incompleteLockFileInformationHeader = `Incomplete lock file information for providers` + +// incompleteLockFileInformationBody is the body of text displayed to users when +// the lock file has only recorded local hashes. +const incompleteLockFileInformationBody = `Due to your customized provider installation methods, Terraform was forced to calculate lock file checksums locally for the following providers: + - %s + +The current .terraform.lock.hcl file only includes checksums for %s, so Terraform running on another platform will fail to install these providers. + +To calculate additional checksums for another platform, run: + terraform providers lock -platform=linux_amd64 +(where linux_amd64 is the platform to generate)` diff --git a/command/init_test.go b/command/init_test.go new file mode 100644 index 000000000000..0cbc4de33731 --- /dev/null +++ b/command/init_test.go @@ -0,0 +1,2852 @@ +package command + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/providercache" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" +) + +func TestInit_empty(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestInit_multipleArgs(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{ + "bad", + "bad", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: \n%s", ui.OutputWriter.String()) + } +} + +func TestInit_fromModule_cwdDest(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, os.ModePerm) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-from-module=" + testFixturePath("init"), + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + if _, err := os.Stat(filepath.Join(td, "hello.tf")); err != nil { + t.Fatalf("err: %s", err) + } +} + +// https://github.com/hashicorp/terraform/issues/518 +func TestInit_fromModule_dstInSrc(t *testing.T) { + dir := t.TempDir() + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("err: %s", err) + } + + // Change to the temporary directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(dir); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + if err := os.Mkdir("foo", os.ModePerm); err != nil { + t.Fatal(err) + } + + if _, err := os.Create("issue518.tf"); err != nil { + t.Fatalf("err: %s", err) + } + + if err := os.Chdir("foo"); err != nil { + t.Fatalf("err: %s", err) + } + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-from-module=./..", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + if _, err := os.Stat(filepath.Join(dir, "foo", "issue518.tf")); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestInit_get(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Check output + output := ui.OutputWriter.String() + if !strings.Contains(output, "foo in foo") { + t.Fatalf("doesn't look like we installed module 'foo': %s", output) + } +} + +func TestInit_getUpgradeModules(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-get=true", + "-upgrade", + } + if code := c.Run(args); code != 0 { + t.Fatalf("command did not complete successfully:\n%s", ui.ErrorWriter.String()) + } + + // Check output + output := ui.OutputWriter.String() + if !strings.Contains(output, "Upgrading modules...") { + t.Fatalf("doesn't look like get upgrade: %s", output) + } +} + +func TestInit_backend(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + if _, err := os.Stat(filepath.Join(DefaultDataDir, DefaultStateFilename)); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestInit_backendUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + { + log.Printf("[TRACE] TestInit_backendUnset: beginning first init") + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // Init + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + log.Printf("[TRACE] TestInit_backendUnset: first init complete") + t.Logf("First run output:\n%s", ui.OutputWriter.String()) + t.Logf("First run errors:\n%s", ui.ErrorWriter.String()) + + if _, err := os.Stat(filepath.Join(DefaultDataDir, DefaultStateFilename)); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + log.Printf("[TRACE] TestInit_backendUnset: beginning second init") + + // Unset + if err := ioutil.WriteFile("main.tf", []byte(""), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-force-copy"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + log.Printf("[TRACE] TestInit_backendUnset: second init complete") + t.Logf("Second run output:\n%s", ui.OutputWriter.String()) + t.Logf("Second run errors:\n%s", ui.ErrorWriter.String()) + + s := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if !s.Backend.Empty() { + t.Fatal("should not have backend config") + } + } +} + +func TestInit_backendConfigFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-file"), td) + defer testChdir(t, td)() + + t.Run("good-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config", "input.config"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + }) + + // the backend config file must not be a full terraform block + t.Run("full-backend-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config", "backend.config"} + if code := c.Run(args); code != 1 { + t.Fatalf("expected error, got success\n") + } + if !strings.Contains(ui.ErrorWriter.String(), "Unsupported block type") { + t.Fatalf("wrong error: %s", ui.ErrorWriter) + } + }) + + // the backend config file must match the schema for the backend + t.Run("invalid-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config", "invalid.config"} + if code := c.Run(args); code != 1 { + t.Fatalf("expected error, got success\n") + } + if !strings.Contains(ui.ErrorWriter.String(), "Unsupported argument") { + t.Fatalf("wrong error: %s", ui.ErrorWriter) + } + }) + + // missing file is an error + t.Run("missing-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config", "missing.config"} + if code := c.Run(args); code != 1 { + t.Fatalf("expected error, got success\n") + } + if !strings.Contains(ui.ErrorWriter.String(), "Failed to read file") { + t.Fatalf("wrong error: %s", ui.ErrorWriter) + } + }) + + // blank filename clears the backend config + t.Run("blank-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config=", "-migrate-state"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify the backend config is empty + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":null,"workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + }) + + // simulate the local backend having a required field which is not + // specified in the override file + t.Run("required-argument", func(t *testing.T) { + c := &InitCommand{} + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "path": { + Type: cty.String, + Optional: true, + }, + "workspace_dir": { + Type: cty.String, + Required: true, + }, + }, + } + flagConfigExtra := newRawFlags("-backend-config") + flagConfigExtra.Set("input.config") + _, diags := c.backendConfigOverrideBody(flagConfigExtra, schema) + if len(diags) != 0 { + t.Errorf("expected no diags, got: %s", diags.Err()) + } + }) +} + +func TestInit_backendConfigFilePowershellConfusion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-file"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // SUBTLE: when using -flag=value with Powershell, unquoted values are + // broken into separate arguments. This results in the init command + // interpreting the flags as an empty backend-config setting (which is + // semantically valid!) followed by a custom configuration path. + // + // Adding the "=" here forces this codepath to be checked, and it should + // result in an early exit with a diagnostic that the provided + // configuration file is not a diretory. + args := []string{"-backend-config=", "./input.config"} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + output := ui.ErrorWriter.String() + if got, want := output, `Too many command line arguments`; !strings.Contains(got, want) { + t.Fatalf("wrong output\ngot:\n%s\n\nwant: message containing %q", got, want) + } +} + +func TestInit_backendReconfigure(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "hashicorp/test": {"1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + ProviderSource: providerSource, + Ui: ui, + View: view, + }, + } + + // create some state, so the backend has something to migrate. + f, err := os.Create("foo") // this is the path" in the backend config + if err != nil { + t.Fatalf("err: %s", err) + } + err = writeStateForTesting(testState(), f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // now run init again, changing the path. + // The -reconfigure flag prevents init from migrating + // Without -reconfigure, the test fails since the backend asks for input on migrating state + args = []string{"-reconfigure", "-backend-config", "path=changed"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestInit_backendConfigFileChange(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-file-change"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "input.config", "-migrate-state"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } +} + +func TestInit_backendMigrateWhileLocked(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-migrate-while-locked"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "hashicorp/test": {"1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + ProviderSource: providerSource, + Ui: ui, + View: view, + }, + } + + // Create some state, so the backend has something to migrate from + f, err := os.Create("local-state.tfstate") + if err != nil { + t.Fatalf("err: %s", err) + } + err = writeStateForTesting(testState(), f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Lock the source state + unlock, err := testLockState(t, testDataDir, "local-state.tfstate") + if err != nil { + t.Fatal(err) + } + defer unlock() + + // Attempt to migrate + args := []string{"-backend-config", "input.config", "-migrate-state", "-force-copy"} + if code := c.Run(args); code == 0 { + t.Fatalf("expected nonzero exit code: %s", ui.OutputWriter.String()) + } + + // Disabling locking should work + args = []string{"-backend-config", "input.config", "-migrate-state", "-force-copy", "-lock=false"} + if code := c.Run(args); code != 0 { + t.Fatalf("expected zero exit code, got %d: %s", code, ui.ErrorWriter.String()) + } +} + +func TestInit_backendConfigFileChangeWithExistingState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-file-change-migrate-existing"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + oldState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + + // we deliberately do not provide the answer for backend-migrate-copy-to-empty to trigger error + args := []string{"-migrate-state", "-backend-config", "input.config", "-input=true"} + if code := c.Run(args); code == 0 { + t.Fatal("expected error") + } + + // Read our backend config and verify new settings are not saved + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"local-state.tfstate"}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + + // without changing config, hash should not change + if oldState.Backend.Hash != state.Backend.Hash { + t.Errorf("backend hash should not have changed\ngot: %d\nwant: %d", state.Backend.Hash, oldState.Backend.Hash) + } +} + +func TestInit_backendConfigKV(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-kv"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "path=hello"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } +} + +func TestInit_backendConfigKVReInit(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-kv"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "path=test"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // a second init should require no changes, nor should it change the backend. + args = []string{"-input=false"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // make sure the backend is configured how we expect + configState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + cfg := map[string]interface{}{} + if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { + t.Fatal(err) + } + if cfg["path"] != "test" { + t.Fatalf(`expected backend path="test", got path="%v"`, cfg["path"]) + } + + // override the -backend-config options by settings + args = []string{"-input=false", "-backend-config", "", "-migrate-state"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // make sure the backend is configured how we expect + configState = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + cfg = map[string]interface{}{} + if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { + t.Fatal(err) + } + if cfg["path"] != nil { + t.Fatalf(`expected backend path="", got path="%v"`, cfg["path"]) + } +} + +func TestInit_backendConfigKVReInitWithConfigDiff(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-input=false"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // a second init with identical config should require no changes, nor + // should it change the backend. + args = []string{"-input=false", "-backend-config", "path=foo"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // make sure the backend is configured how we expect + configState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + cfg := map[string]interface{}{} + if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { + t.Fatal(err) + } + if cfg["path"] != "foo" { + t.Fatalf(`expected backend path="foo", got path="%v"`, cfg["foo"]) + } +} + +func TestInit_backendCli_no_config_block(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "path=test"} + if code := c.Run(args); code != 0 { + t.Fatalf("got exit status %d; want 0\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + errMsg := ui.ErrorWriter.String() + if !strings.Contains(errMsg, "Warning: Missing backend configuration") { + t.Fatal("expected missing backend block warning, got", errMsg) + } +} + +func TestInit_backendReinitWithExtra(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-empty"), td) + defer testChdir(t, td)() + + m := testMetaBackend(t, nil) + opts := &BackendOpts{ + ConfigOverride: configs.SynthBody("synth", map[string]cty.Value{ + "path": cty.StringVal("hello"), + }), + Init: true, + } + + _, cHash, err := m.backendConfig(opts) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "path=hello"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + + if state.Backend.Hash != uint64(cHash) { + t.Fatal("mismatched state and config backend hashes") + } + + // init again and make sure nothing changes + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + state = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + if state.Backend.Hash != uint64(cHash) { + t.Fatal("mismatched state and config backend hashes") + } +} + +// move option from config to -backend-config args +func TestInit_backendReinitConfigToExtra(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + if code := c.Run([]string{"-input=false"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"foo","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + + backendHash := state.Backend.Hash + + // init again but remove the path option from the config + cfg := "terraform {\n backend \"local\" {}\n}\n" + if err := ioutil.WriteFile("main.tf", []byte(cfg), 0644); err != nil { + t.Fatal(err) + } + + // We need a fresh InitCommand here because the old one now has our configuration + // file cached inside it, so it won't re-read the modification we just made. + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-input=false", "-backend-config=path=foo"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + state = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"foo","workspace_dir":null}`; got != want { + t.Errorf("wrong config after moving to arg\ngot: %s\nwant: %s", got, want) + } + + if state.Backend.Hash == backendHash { + t.Fatal("state.Backend.Hash was not updated") + } +} + +func TestInit_backendCloudInvalidOptions(t *testing.T) { + // There are various "terraform init" options that are only for + // traditional backends and not applicable to Terraform Cloud mode. + // For those, we want to return an explicit error rather than + // just silently ignoring them, so that users will be aware that + // Cloud mode has more of an expected "happy path" than the + // less-vertically-integrated backends do, and to avoid these + // unapplicable options becoming compatibility constraints for + // future evolution of Cloud mode. + + // We use the same starting fixture for all of these tests, but some + // of them will customize it a bit as part of their work. + setupTempDir := func(t *testing.T) func() { + t.Helper() + td := t.TempDir() + testCopyDir(t, testFixturePath("init-cloud-simple"), td) + unChdir := testChdir(t, td) + return unChdir + } + + // Some of the tests need a non-empty placeholder state file to work + // with. + fakeState := states.BuildState(func(cb *states.SyncState) { + // Having a root module output value should be enough for this + // state file to be considered "non-empty" and thus a candidate + // for migration. + cb.SetOutputValue( + addrs.OutputValue{Name: "a"}.Absolute(addrs.RootModuleInstance), + cty.True, + false, + ) + }) + fakeStateFile := &statefile.File{ + Lineage: "boop", + Serial: 4, + TerraformVersion: version.Must(version.NewVersion("1.0.0")), + State: fakeState, + } + var fakeStateBuf bytes.Buffer + err := statefile.WriteForTest(fakeStateFile, &fakeStateBuf) + if err != nil { + t.Error(err) + } + fakeStateBytes := fakeStateBuf.Bytes() + + t.Run("-backend-config", func(t *testing.T) { + defer setupTempDir(t)() + + // We have -backend-config as a pragmatic way to dynamically set + // certain settings of backends that tend to vary depending on + // where Terraform is running, such as AWS authentication profiles + // that are naturally local only to the machine where Terraform is + // running. Those needs don't apply to Terraform Cloud, because + // the remote workspace encapsulates all of the details of how + // operations and state work in that case, and so the Cloud + // configuration is only about which workspaces we'll be working + // with. + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config=anything"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -backend-config=... command line option is only for state backends, and +is not applicable to Terraform Cloud-based configurations. + +To change the set of workspaces associated with this configuration, edit the +Cloud configuration block in the root module. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-reconfigure", func(t *testing.T) { + defer setupTempDir(t)() + + // The -reconfigure option was originally imagined as a way to force + // skipping state migration when migrating between backends, but it + // has a historical flaw that it doesn't work properly when the + // initial situation is the implicit local backend with a state file + // present. The Terraform Cloud migration path has some additional + // steps to take care of more details automatically, and so + // -reconfigure doesn't really make sense in that context, particularly + // with its design bug with the handling of the implicit local backend. + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-reconfigure"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -reconfigure option is for in-place reconfiguration of state backends +only, and is not needed when changing Terraform Cloud settings. + +When using Terraform Cloud, initialization automatically activates any new +Cloud configuration settings. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-reconfigure when migrating in", func(t *testing.T) { + defer setupTempDir(t)() + + // We have a slightly different error message for the case where we + // seem to be trying to migrate to Terraform Cloud with existing + // state or explicit backend already present. + + if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-reconfigure"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -reconfigure option is unsupported when migrating to Terraform Cloud, +because activating Terraform Cloud involves some additional steps. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-migrate-state", func(t *testing.T) { + defer setupTempDir(t)() + + // In Cloud mode, migrating in or out always proposes migrating state + // and changing configuration while staying in cloud mode never migrates + // state, so this special option isn't relevant. + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-migrate-state"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -migrate-state option is for migration between state backends only, and +is not applicable when using Terraform Cloud. + +State storage is handled automatically by Terraform Cloud and so the state +storage location is not configurable. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-migrate-state when migrating in", func(t *testing.T) { + defer setupTempDir(t)() + + // We have a slightly different error message for the case where we + // seem to be trying to migrate to Terraform Cloud with existing + // state or explicit backend already present. + + if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-migrate-state"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -migrate-state option is for migration between state backends only, and +is not applicable when using Terraform Cloud. + +Terraform Cloud migration has additional steps, configured by interactive +prompts. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-force-copy", func(t *testing.T) { + defer setupTempDir(t)() + + // In Cloud mode, migrating in or out always proposes migrating state + // and changing configuration while staying in cloud mode never migrates + // state, so this special option isn't relevant. + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-force-copy"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -force-copy option is for migration between state backends only, and is +not applicable when using Terraform Cloud. + +State storage is handled automatically by Terraform Cloud and so the state +storage location is not configurable. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-force-copy when migrating in", func(t *testing.T) { + defer setupTempDir(t)() + + // We have a slightly different error message for the case where we + // seem to be trying to migrate to Terraform Cloud with existing + // state or explicit backend already present. + + if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-force-copy"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -force-copy option is for migration between state backends only, and is +not applicable when using Terraform Cloud. + +Terraform Cloud migration has additional steps, configured by interactive +prompts. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + +} + +// make sure inputFalse stops execution on migrate +func TestInit_inputFalse(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-input=false", "-backend-config=path=foo"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter) + } + + // write different states for foo and bar + fooState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("foo"), + false, // not sensitive + ) + }) + if err := statemgr.NewFilesystem("foo").WriteState(fooState); err != nil { + t.Fatal(err) + } + barState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, // not sensitive + ) + }) + if err := statemgr.NewFilesystem("bar").WriteState(barState); err != nil { + t.Fatal(err) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args = []string{"-input=false", "-backend-config=path=bar", "-migrate-state"} + if code := c.Run(args); code == 0 { + t.Fatal("init should have failed", ui.OutputWriter) + } + + errMsg := ui.ErrorWriter.String() + if !strings.Contains(errMsg, "interactive input is disabled") { + t.Fatal("expected input disabled error, got", errMsg) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // A missing input=false should abort rather than loop infinitely + args = []string{"-backend-config=path=baz"} + if code := c.Run(args); code == 0 { + t.Fatal("init should have failed", ui.OutputWriter) + } +} + +func TestInit_getProvider(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + overrides := metaOverridesForProvider(testProvider()) + ui := new(cli.MockUi) + view, _ := testView(t) + providerSource, close := newMockProviderSource(t, map[string][]string{ + // looking for an exact version + "exact": {"1.2.3"}, + // config requires >= 2.3.3 + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, + // config specifies + "between": {"3.4.5", "2.3.4", "1.2.3"}, + }) + defer close() + m := Meta{ + testingOverrides: overrides, + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // check that we got the providers for our config + exactPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/exact/1.2.3/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(exactPath); os.IsNotExist(err) { + t.Fatal("provider 'exact' not downloaded") + } + greaterThanPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/greater-than/2.3.4/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(greaterThanPath); os.IsNotExist(err) { + t.Fatal("provider 'greater-than' not downloaded") + } + betweenPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/between/2.3.4/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(betweenPath); os.IsNotExist(err) { + t.Fatal("provider 'between' not downloaded") + } + + t.Run("future-state", func(t *testing.T) { + // getting providers should fail if a state from a newer version of + // terraform exists, since InitCommand.getProviders needs to inspect that + // state. + + f, err := os.Create(DefaultStateFilename) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + // Construct a mock state file from the far future + type FutureState struct { + Version uint `json:"version"` + Lineage string `json:"lineage"` + TerraformVersion string `json:"terraform_version"` + Outputs map[string]interface{} `json:"outputs"` + Resources []map[string]interface{} `json:"resources"` + } + fs := &FutureState{ + Version: 999, + Lineage: "123-456-789", + TerraformVersion: "999.0.0", + Outputs: make(map[string]interface{}), + Resources: make([]map[string]interface{}, 0), + } + src, err := json.MarshalIndent(fs, "", " ") + if err != nil { + t.Fatalf("failed to marshal future state: %s", err) + } + src = append(src, '\n') + _, err = f.Write(src) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + view, _ := testView(t) + m.Ui = ui + m.View = view + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatal("expected error, got:", ui.OutputWriter) + } + + errMsg := ui.ErrorWriter.String() + if !strings.Contains(errMsg, "Unsupported state file format") { + t.Fatal("unexpected error:", errMsg) + } + }) +} + +func TestInit_getProviderSource(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-source"), td) + defer testChdir(t, td)() + + overrides := metaOverridesForProvider(testProvider()) + ui := new(cli.MockUi) + view, _ := testView(t) + providerSource, close := newMockProviderSource(t, map[string][]string{ + // looking for an exact version + "acme/alpha": {"1.2.3"}, + // config doesn't specify versions for other providers + "registry.example.com/acme/beta": {"1.0.0"}, + "gamma": {"2.0.0"}, + }) + defer close() + m := Meta{ + testingOverrides: overrides, + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // check that we got the providers for our config + exactPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/acme/alpha/1.2.3/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(exactPath); os.IsNotExist(err) { + t.Error("provider 'alpha' not downloaded") + } + greaterThanPath := fmt.Sprintf(".terraform/providers/registry.example.com/acme/beta/1.0.0/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(greaterThanPath); os.IsNotExist(err) { + t.Error("provider 'beta' not downloaded") + } + betweenPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/gamma/2.0.0/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(betweenPath); os.IsNotExist(err) { + t.Error("provider 'gamma' not downloaded") + } +} + +func TestInit_getProviderLegacyFromState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-legacy-from-state"), td) + defer testChdir(t, td)() + + overrides := metaOverridesForProvider(testProvider()) + ui := new(cli.MockUi) + view, _ := testView(t) + providerSource, close := newMockProviderSource(t, map[string][]string{ + "acme/alpha": {"1.2.3"}, + }) + defer close() + m := Meta{ + testingOverrides: overrides, + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // Expect this diagnostic output + wants := []string{ + "Invalid legacy provider address", + "You must complete the Terraform 0.13 upgrade process", + } + got := ui.ErrorWriter.String() + for _, want := range wants { + if !strings.Contains(got, want) { + t.Fatalf("expected output to contain %q, got:\n\n%s", want, got) + } + } +} + +func TestInit_getProviderInvalidPackage(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-invalid-package"), td) + defer testChdir(t, td)() + + overrides := metaOverridesForProvider(testProvider()) + ui := new(cli.MockUi) + view, _ := testView(t) + + // create a provider source which allows installing an invalid package + addr := addrs.MustParseProviderSourceString("invalid/package") + version := getproviders.MustParseVersion("1.0.0") + meta, close, err := getproviders.FakeInstallablePackageMeta( + addr, + version, + getproviders.VersionList{getproviders.MustParseVersion("5.0")}, + getproviders.CurrentPlatform, + "terraform-package", // should be "terraform-provider-package" + ) + defer close() + if err != nil { + t.Fatalf("failed to prepare fake package for %s %s: %s", addr.ForDisplay(), version, err) + } + providerSource := getproviders.NewMockSource([]getproviders.PackageMeta{meta}, nil) + + m := Meta{ + testingOverrides: overrides, + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // invalid provider should be installed + packagePath := fmt.Sprintf(".terraform/providers/registry.terraform.io/invalid/package/1.0.0/%s/terraform-package", getproviders.CurrentPlatform) + if _, err := os.Stat(packagePath); os.IsNotExist(err) { + t.Fatal("provider 'invalid/package' not downloaded") + } + + wantErrors := []string{ + "Failed to install provider", + "could not find executable file starting with terraform-provider-package", + } + got := ui.ErrorWriter.String() + for _, wantError := range wantErrors { + if !strings.Contains(got, wantError) { + t.Fatalf("missing error:\nwant: %q\ngot:\n%s", wantError, got) + } + } +} + +func TestInit_getProviderDetectedLegacy(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-detected-legacy"), td) + defer testChdir(t, td)() + + // We need to construct a multisource with a mock source and a registry + // source: the mock source will return ErrRegistryProviderNotKnown for an + // unknown provider, and the registry source will allow us to look up the + // appropriate namespace if possible. + providerSource, psClose := newMockProviderSource(t, map[string][]string{ + "hashicorp/foo": {"1.2.3"}, + "terraform-providers/baz": {"2.3.4"}, // this will not be installed + }) + defer psClose() + registrySource, rsClose := testRegistrySource(t) + defer rsClose() + multiSource := getproviders.MultiSource{ + {Source: providerSource}, + {Source: registrySource}, + } + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + ProviderSource: multiSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code == 0 { + t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) + } + + // foo should be installed + fooPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/foo/1.2.3/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(fooPath); os.IsNotExist(err) { + t.Error("provider 'foo' not installed") + } + // baz should not be installed + bazPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/terraform-providers/baz/2.3.4/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(bazPath); !os.IsNotExist(err) { + t.Error("provider 'baz' installed, but should not be") + } + + // error output is the main focus of this test + errOutput := ui.ErrorWriter.String() + errors := []string{ + "Failed to query available provider packages", + "Could not retrieve the list of available versions", + "registry.terraform.io/hashicorp/baz", + "registry.terraform.io/hashicorp/frob", + } + for _, want := range errors { + if !strings.Contains(errOutput, want) { + t.Fatalf("expected error %q: %s", want, errOutput) + } + } +} + +func TestInit_providerSource(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-required-providers"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3", "1.2.4"}, + "test-beta": {"1.2.4"}, + "source": {"1.2.2", "1.2.3", "1.2.1"}, + }) + defer close() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + if strings.Contains(ui.OutputWriter.String(), "Terraform has initialized, but configuration upgrades may be needed") { + t.Fatalf("unexpected \"configuration upgrade\" warning in output") + } + + cacheDir := m.providerLocalCacheDir() + gotPackages := cacheDir.AllAvailablePackages() + wantPackages := map[addrs.Provider][]providercache.CachedProvider{ + addrs.NewDefaultProvider("test"): { + { + Provider: addrs.NewDefaultProvider("test"), + Version: getproviders.MustParseVersion("1.2.3"), + PackageDir: expectedPackageInstallPath("test", "1.2.3", false), + }, + }, + addrs.NewDefaultProvider("test-beta"): { + { + Provider: addrs.NewDefaultProvider("test-beta"), + Version: getproviders.MustParseVersion("1.2.4"), + PackageDir: expectedPackageInstallPath("test-beta", "1.2.4", false), + }, + }, + addrs.NewDefaultProvider("source"): { + { + Provider: addrs.NewDefaultProvider("source"), + Version: getproviders.MustParseVersion("1.2.3"), + PackageDir: expectedPackageInstallPath("source", "1.2.3", false), + }, + }, + } + if diff := cmp.Diff(wantPackages, gotPackages); diff != "" { + t.Errorf("wrong cache directory contents after upgrade\n%s", diff) + } + + locks, err := m.lockedDependencies() + if err != nil { + t.Fatalf("failed to get locked dependencies: %s", err) + } + gotProviderLocks := locks.AllProviders() + wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ + addrs.NewDefaultProvider("test-beta"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("test-beta"), + getproviders.MustParseVersion("1.2.4"), + getproviders.MustParseVersionConstraints("= 1.2.4"), + []getproviders.Hash{ + getproviders.HashScheme1.New("see6W06w09Ea+AobFJ+mbvPTie6ASqZAAdlFZbs8BSM="), + }, + ), + addrs.NewDefaultProvider("test"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("test"), + getproviders.MustParseVersion("1.2.3"), + getproviders.MustParseVersionConstraints("= 1.2.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("wlbEC2mChQZ2hhgUhl6SeVLPP7fMqOFUZAQhQ9GIIno="), + }, + ), + addrs.NewDefaultProvider("source"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("source"), + getproviders.MustParseVersion("1.2.3"), + getproviders.MustParseVersionConstraints("= 1.2.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("myS3qb3px3tRBq1ZWRYJeUH+kySWpBc0Yy8rw6W7/p4="), + }, + ), + } + + if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong version selections after upgrade\n%s", diff) + } + + if got, want := ui.OutputWriter.String(), "Installed hashicorp/test v1.2.3 (verified checksum)"; !strings.Contains(got, want) { + t.Fatalf("unexpected output: %s\nexpected to include %q", got, want) + } + if got, want := ui.ErrorWriter.String(), "\n - hashicorp/source\n - hashicorp/test\n - hashicorp/test-beta"; !strings.Contains(got, want) { + t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) + } +} + +func TestInit_cancelModules(t *testing.T) { + // This test runs `terraform init` as if SIGINT (or similar on other + // platforms) were sent to it, testing that it is interruptible. + + td := t.TempDir() + testCopyDir(t, testFixturePath("init-registry-module"), td) + defer testChdir(t, td)() + + // Our shutdown channel is pre-closed so init will exit as soon as it + // starts a cancelable portion of the process. + shutdownCh := make(chan struct{}) + close(shutdownCh) + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ShutdownCh: shutdownCh, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + + if code := c.Run(args); code == 0 { + t.Fatalf("succeeded; wanted error\n%s", ui.OutputWriter.String()) + } + + if got, want := ui.ErrorWriter.String(), `Module installation was canceled by an interrupt signal`; !strings.Contains(got, want) { + t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) + } +} + +func TestInit_cancelProviders(t *testing.T) { + // This test runs `terraform init` as if SIGINT (or similar on other + // platforms) were sent to it, testing that it is interruptible. + + td := t.TempDir() + testCopyDir(t, testFixturePath("init-required-providers"), td) + defer testChdir(t, td)() + + // Use a provider source implementation which is designed to hang indefinitely, + // to avoid a race between the closed shutdown channel and the provider source + // operations. + providerSource := &getproviders.HangingSource{} + + // Our shutdown channel is pre-closed so init will exit as soon as it + // starts a cancelable portion of the process. + shutdownCh := make(chan struct{}) + close(shutdownCh) + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + ShutdownCh: shutdownCh, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + + if code := c.Run(args); code == 0 { + t.Fatalf("succeeded; wanted error\n%s", ui.OutputWriter.String()) + } + // Currently the first operation that is cancelable is provider + // installation, so our error message comes from there. If we + // make the earlier steps cancelable in future then it'd be + // expected for this particular message to change. + if got, want := ui.ErrorWriter.String(), `Provider installation was canceled by an interrupt signal`; !strings.Contains(got, want) { + t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) + } +} + +func TestInit_getUpgradePlugins(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + // looking for an exact version + "exact": {"1.2.3"}, + // config requires >= 2.3.3 + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, + // config specifies > 1.0.0 , < 3.0.0 + "between": {"3.4.5", "2.3.4", "1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + installFakeProviderPackages(t, &m, map[string][]string{ + "exact": {"0.0.1"}, + "greater-than": {"2.3.3"}, + }) + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-upgrade=true", + } + if code := c.Run(args); code != 0 { + t.Fatalf("command did not complete successfully:\n%s", ui.ErrorWriter.String()) + } + + cacheDir := m.providerLocalCacheDir() + gotPackages := cacheDir.AllAvailablePackages() + wantPackages := map[addrs.Provider][]providercache.CachedProvider{ + // "between" wasn't previously installed at all, so we installed + // the newest available version that matched the version constraints. + addrs.NewDefaultProvider("between"): { + { + Provider: addrs.NewDefaultProvider("between"), + Version: getproviders.MustParseVersion("2.3.4"), + PackageDir: expectedPackageInstallPath("between", "2.3.4", false), + }, + }, + // The existing version of "exact" did not match the version constraints, + // so we installed what the configuration selected as well. + addrs.NewDefaultProvider("exact"): { + { + Provider: addrs.NewDefaultProvider("exact"), + Version: getproviders.MustParseVersion("1.2.3"), + PackageDir: expectedPackageInstallPath("exact", "1.2.3", false), + }, + // Previous version is still there, but not selected + { + Provider: addrs.NewDefaultProvider("exact"), + Version: getproviders.MustParseVersion("0.0.1"), + PackageDir: expectedPackageInstallPath("exact", "0.0.1", false), + }, + }, + // The existing version of "greater-than" _did_ match the constraints, + // but a newer version was available and the user specified + // -upgrade and so we upgraded it anyway. + addrs.NewDefaultProvider("greater-than"): { + { + Provider: addrs.NewDefaultProvider("greater-than"), + Version: getproviders.MustParseVersion("2.3.4"), + PackageDir: expectedPackageInstallPath("greater-than", "2.3.4", false), + }, + // Previous version is still there, but not selected + { + Provider: addrs.NewDefaultProvider("greater-than"), + Version: getproviders.MustParseVersion("2.3.3"), + PackageDir: expectedPackageInstallPath("greater-than", "2.3.3", false), + }, + }, + } + if diff := cmp.Diff(wantPackages, gotPackages); diff != "" { + t.Errorf("wrong cache directory contents after upgrade\n%s", diff) + } + + locks, err := m.lockedDependencies() + if err != nil { + t.Fatalf("failed to get locked dependencies: %s", err) + } + gotProviderLocks := locks.AllProviders() + wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ + addrs.NewDefaultProvider("between"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("between"), + getproviders.MustParseVersion("2.3.4"), + getproviders.MustParseVersionConstraints("> 1.0.0, < 3.0.0"), + []getproviders.Hash{ + getproviders.HashScheme1.New("JVqAvZz88A+hS2wHVtTWQkHaxoA/LrUAz0H3jPBWPIA="), + }, + ), + addrs.NewDefaultProvider("exact"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("exact"), + getproviders.MustParseVersion("1.2.3"), + getproviders.MustParseVersionConstraints("= 1.2.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("H1TxWF8LyhBb6B4iUdKhLc/S9sC/jdcrCykpkbGcfbg="), + }, + ), + addrs.NewDefaultProvider("greater-than"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("greater-than"), + getproviders.MustParseVersion("2.3.4"), + getproviders.MustParseVersionConstraints(">= 2.3.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("SJPpXx/yoFE/W+7eCipjJ+G21xbdnTBD7lWodZ8hWkU="), + }, + ), + } + if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong version selections after upgrade\n%s", diff) + } +} + +func TestInit_getProviderMissing(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + // looking for exact version 1.2.3 + "exact": {"1.2.4"}, + // config requires >= 2.3.3 + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, + // config specifies + "between": {"3.4.5", "2.3.4", "1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + if code := c.Run(args); code == 0 { + t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) + } + + if !strings.Contains(ui.ErrorWriter.String(), "no available releases match") { + t.Fatalf("unexpected error output: %s", ui.ErrorWriter) + } +} + +func TestInit_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-check-required-version"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} + +// Verify that init will error out with an invalid version constraint, even if +// there are other invalid configuration constructs. +func TestInit_checkRequiredVersionFirst(t *testing.T) { + t.Run("root_module", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-check-required-version-first"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `Unsupported Terraform Core version`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + }) + t.Run("sub_module", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-check-required-version-first-module"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `Unsupported Terraform Core version`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + }) +} + +func TestInit_providerLockFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-provider-lock-file"), td) + // The temporary directory does not have write permission (dr-xr-xr-x) after the copy + defer os.Chmod(td, os.ModePerm) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + lockFile := ".terraform.lock.hcl" + buf, err := ioutil.ReadFile(lockFile) + if err != nil { + t.Fatalf("failed to read dependency lock file %s: %s", lockFile, err) + } + buf = bytes.TrimSpace(buf) + // The hash in here is for the fake package that newMockProviderSource produces + // (so it'll change if newMockProviderSource starts producing different contents) + wantLockFile := strings.TrimSpace(` +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/test" { + version = "1.2.3" + constraints = "1.2.3" + hashes = [ + "h1:wlbEC2mChQZ2hhgUhl6SeVLPP7fMqOFUZAQhQ9GIIno=", + ] +} +`) + if diff := cmp.Diff(wantLockFile, string(buf)); diff != "" { + t.Errorf("wrong dependency lock file contents\n%s", diff) + } + + // Make the local directory read-only, and verify that rerunning init + // succeeds, to ensure that we don't try to rewrite an unchanged lock file + os.Chmod(".", 0555) + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestInit_providerLockFileReadonly(t *testing.T) { + // The hash in here is for the fake package that newMockProviderSource produces + // (so it'll change if newMockProviderSource starts producing different contents) + inputLockFile := strings.TrimSpace(` +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/test" { + version = "1.2.3" + constraints = "1.2.3" + hashes = [ + "zh:e919b507a91e23a00da5c2c4d0b64bcc7900b68d43b3951ac0f6e5d80387fbdc", + ] +} +`) + + badLockFile := strings.TrimSpace(` +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/test" { + version = "1.2.3" + constraints = "1.2.3" + hashes = [ + "zh:0000000000000000000000000000000000000000000000000000000000000000", + ] +} +`) + + updatedLockFile := strings.TrimSpace(` +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/test" { + version = "1.2.3" + constraints = "1.2.3" + hashes = [ + "h1:wlbEC2mChQZ2hhgUhl6SeVLPP7fMqOFUZAQhQ9GIIno=", + "zh:e919b507a91e23a00da5c2c4d0b64bcc7900b68d43b3951ac0f6e5d80387fbdc", + ] +} +`) + + emptyUpdatedLockFile := strings.TrimSpace(` +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. +`) + + cases := []struct { + desc string + fixture string + providers map[string][]string + input string + args []string + ok bool + want string + }{ + { + desc: "default", + fixture: "init-provider-lock-file", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{}, + ok: true, + want: updatedLockFile, + }, + { + desc: "unused provider", + fixture: "init-provider-now-unused", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{}, + ok: true, + want: emptyUpdatedLockFile, + }, + { + desc: "readonly", + fixture: "init-provider-lock-file", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{"-lockfile=readonly"}, + ok: true, + want: inputLockFile, + }, + { + desc: "unused provider readonly", + fixture: "init-provider-now-unused", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{"-lockfile=readonly"}, + ok: false, + want: inputLockFile, + }, + { + desc: "conflict", + fixture: "init-provider-lock-file", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{"-lockfile=readonly", "-upgrade"}, + ok: false, + want: inputLockFile, + }, + { + desc: "checksum mismatch", + fixture: "init-provider-lock-file", + providers: map[string][]string{"test": {"1.2.3"}}, + input: badLockFile, + args: []string{"-lockfile=readonly"}, + ok: false, + want: badLockFile, + }, + { + desc: "reject to change required provider dependences", + fixture: "init-provider-lock-file-readonly-add", + providers: map[string][]string{ + "test": {"1.2.3"}, + "foo": {"1.0.0"}, + }, + input: inputLockFile, + args: []string{"-lockfile=readonly"}, + ok: false, + want: inputLockFile, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath(tc.fixture), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, tc.providers) + defer close() + + ui := new(cli.MockUi) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + // write input lockfile + lockFile := ".terraform.lock.hcl" + if err := ioutil.WriteFile(lockFile, []byte(tc.input), 0644); err != nil { + t.Fatalf("failed to write input lockfile: %s", err) + } + + code := c.Run(tc.args) + if tc.ok && code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + if !tc.ok && code == 0 { + t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) + } + + buf, err := ioutil.ReadFile(lockFile) + if err != nil { + t.Fatalf("failed to read dependency lock file %s: %s", lockFile, err) + } + buf = bytes.TrimSpace(buf) + if diff := cmp.Diff(tc.want, string(buf)); diff != "" { + t.Errorf("wrong dependency lock file contents\n%s", diff) + } + }) + } +} + +func TestInit_pluginDirReset(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + // An empty provider source + providerSource, close := newMockProviderSource(t, nil) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + }, + } + + // make our vendor paths + pluginPath := []string{"a", "b", "c"} + for _, p := range pluginPath { + if err := os.MkdirAll(p, 0755); err != nil { + t.Fatal(err) + } + } + + // run once and save the -plugin-dir + args := []string{"-plugin-dir", "a"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter) + } + + pluginDirs, err := c.loadPluginPath() + if err != nil { + t.Fatal(err) + } + + if len(pluginDirs) != 1 || pluginDirs[0] != "a" { + t.Fatalf(`expected plugin dir ["a"], got %q`, pluginDirs) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, // still empty + }, + } + + // make sure we remove the plugin-dir record + args = []string{"-plugin-dir="} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter) + } + + pluginDirs, err = c.loadPluginPath() + if err != nil { + t.Fatal(err) + } + + if len(pluginDirs) != 0 { + t.Fatalf("expected no plugin dirs got %q", pluginDirs) + } +} + +// Test user-supplied -plugin-dir +func TestInit_pluginDirProviders(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + // An empty provider source + providerSource, close := newMockProviderSource(t, nil) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + // make our vendor paths + pluginPath := []string{"a", "b", "c"} + for _, p := range pluginPath { + if err := os.MkdirAll(p, 0755); err != nil { + t.Fatal(err) + } + } + + // We'll put some providers in our plugin dirs. To do this, we'll pretend + // for a moment that they are provider cache directories just because that + // allows us to lean on our existing test helper functions to do this. + for i, def := range [][]string{ + {"exact", "1.2.3"}, + {"greater-than", "2.3.4"}, + {"between", "2.3.4"}, + } { + name, version := def[0], def[1] + dir := providercache.NewDir(pluginPath[i]) + installFakeProviderPackagesElsewhere(t, dir, map[string][]string{ + name: {version}, + }) + } + + args := []string{ + "-plugin-dir", "a", + "-plugin-dir", "b", + "-plugin-dir", "c", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter) + } + + locks, err := m.lockedDependencies() + if err != nil { + t.Fatalf("failed to get locked dependencies: %s", err) + } + gotProviderLocks := locks.AllProviders() + wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ + addrs.NewDefaultProvider("between"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("between"), + getproviders.MustParseVersion("2.3.4"), + getproviders.MustParseVersionConstraints("> 1.0.0, < 3.0.0"), + []getproviders.Hash{ + getproviders.HashScheme1.New("JVqAvZz88A+hS2wHVtTWQkHaxoA/LrUAz0H3jPBWPIA="), + }, + ), + addrs.NewDefaultProvider("exact"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("exact"), + getproviders.MustParseVersion("1.2.3"), + getproviders.MustParseVersionConstraints("= 1.2.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("H1TxWF8LyhBb6B4iUdKhLc/S9sC/jdcrCykpkbGcfbg="), + }, + ), + addrs.NewDefaultProvider("greater-than"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("greater-than"), + getproviders.MustParseVersion("2.3.4"), + getproviders.MustParseVersionConstraints(">= 2.3.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("SJPpXx/yoFE/W+7eCipjJ+G21xbdnTBD7lWodZ8hWkU="), + }, + ), + } + if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong version selections after upgrade\n%s", diff) + } + + // -plugin-dir overrides the normal provider source, so it should not have + // seen any calls at all. + if calls := providerSource.CallLog(); len(calls) > 0 { + t.Errorf("unexpected provider source calls (want none)\n%s", spew.Sdump(calls)) + } +} + +// Test user-supplied -plugin-dir doesn't allow auto-install +func TestInit_pluginDirProvidersDoesNotGet(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + // Our provider source has a suitable package for "between" available, + // but we should ignore it because -plugin-dir is set and thus this + // source is temporarily overridden during install. + providerSource, close := newMockProviderSource(t, map[string][]string{ + "between": {"2.3.4"}, + }) + defer close() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + // make our vendor paths + pluginPath := []string{"a", "b"} + for _, p := range pluginPath { + if err := os.MkdirAll(p, 0755); err != nil { + t.Fatal(err) + } + } + + // We'll put some providers in our plugin dirs. To do this, we'll pretend + // for a moment that they are provider cache directories just because that + // allows us to lean on our existing test helper functions to do this. + for i, def := range [][]string{ + {"exact", "1.2.3"}, + {"greater-than", "2.3.4"}, + } { + name, version := def[0], def[1] + dir := providercache.NewDir(pluginPath[i]) + installFakeProviderPackagesElsewhere(t, dir, map[string][]string{ + name: {version}, + }) + } + + args := []string{ + "-plugin-dir", "a", + "-plugin-dir", "b", + } + if code := c.Run(args); code == 0 { + // should have been an error + t.Fatalf("succeeded; want error\nstdout:\n%s\nstderr\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + // The error output should mention the "between" provider but should not + // mention either the "exact" or "greater-than" provider, because the + // latter two are available via the -plugin-dir directories. + errStr := ui.ErrorWriter.String() + if subStr := "hashicorp/between"; !strings.Contains(errStr, subStr) { + t.Errorf("error output should mention the 'between' provider\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "hashicorp/exact"; strings.Contains(errStr, subStr) { + t.Errorf("error output should not mention the 'exact' provider\ndo not want substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "hashicorp/greater-than"; strings.Contains(errStr, subStr) { + t.Errorf("error output should not mention the 'greater-than' provider\ndo not want substr: %s\ngot:\n%s", subStr, errStr) + } + + if calls := providerSource.CallLog(); len(calls) > 0 { + t.Errorf("unexpected provider source calls (want none)\n%s", spew.Sdump(calls)) + } +} + +// Verify that plugin-dir doesn't prevent discovery of internal providers +func TestInit_pluginDirWithBuiltIn(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-internal"), td) + defer testChdir(t, td)() + + // An empty provider source + providerSource, close := newMockProviderSource(t, nil) + defer close() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{"-plugin-dir", "./"} + if code := c.Run(args); code != 0 { + t.Fatalf("error: %s", ui.ErrorWriter) + } + + outputStr := ui.OutputWriter.String() + if subStr := "terraform.io/builtin/terraform is built in to Terraform"; !strings.Contains(outputStr, subStr) { + t.Errorf("output should mention the terraform provider\nwant substr: %s\ngot:\n%s", subStr, outputStr) + } +} + +func TestInit_invalidBuiltInProviders(t *testing.T) { + // This test fixture includes two invalid provider dependencies: + // - an implied dependency on terraform.io/builtin/terraform with an + // explicit version number, which is not allowed because it's builtin. + // - an explicit dependency on terraform.io/builtin/nonexist, which does + // not exist at all. + td := t.TempDir() + testCopyDir(t, testFixturePath("init-internal-invalid"), td) + defer testChdir(t, td)() + + // An empty provider source + providerSource, close := newMockProviderSource(t, nil) + defer close() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "Cannot use terraform.io/builtin/terraform: built-in"; !strings.Contains(errStr, subStr) { + t.Errorf("error output should mention the terraform provider\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Cannot use terraform.io/builtin/nonexist: this Terraform release"; !strings.Contains(errStr, subStr) { + t.Errorf("error output should mention the 'nonexist' provider\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +func TestInit_invalidSyntaxNoBackend(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-syntax-invalid-no-backend"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "There are some problems with the configuration, described below"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Unsupported block type"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention the syntax problem\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +func TestInit_invalidSyntaxWithBackend(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-syntax-invalid-with-backend"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "There are some problems with the configuration, described below"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Unsupported block type"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention the syntax problem\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +func TestInit_invalidSyntaxInvalidBackend(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-syntax-invalid-backend-invalid"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "There are some problems with the configuration, described below"; strings.Contains(errStr, subStr) { + t.Errorf("Error output should not include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Unsupported block type"; strings.Contains(errStr, subStr) { + t.Errorf("Error output should not mention syntax errors\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Unsupported backend type"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention the invalid backend\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +// newMockProviderSource is a helper to succinctly construct a mock provider +// source that contains a set of packages matching the given provider versions +// that are available for installation (from temporary local files). +// +// The caller must call the returned close callback once the source is no +// longer needed, at which point it will clean up all of the temporary files +// and the packages in the source will no longer be available for installation. +// +// Provider addresses must be valid source strings, and passing only the +// provider name will be interpreted as a "default" provider under +// registry.terraform.io/hashicorp. If you need more control over the +// provider addresses, pass a full provider source string. +// +// This function also registers providers as belonging to the current platform, +// to ensure that they will be available to a provider installer operating in +// its default configuration. +// +// In case of any errors while constructing the source, this function will +// abort the current test using the given testing.T. Therefore a caller can +// assume that if this function returns then the result is valid and ready +// to use. +func newMockProviderSource(t *testing.T, availableProviderVersions map[string][]string) (source *getproviders.MockSource, close func()) { + t.Helper() + var packages []getproviders.PackageMeta + var closes []func() + close = func() { + for _, f := range closes { + f() + } + } + for source, versions := range availableProviderVersions { + addr := addrs.MustParseProviderSourceString(source) + for _, versionStr := range versions { + version, err := getproviders.ParseVersion(versionStr) + if err != nil { + close() + t.Fatalf("failed to parse %q as a version number for %q: %s", versionStr, addr.ForDisplay(), err) + } + meta, close, err := getproviders.FakeInstallablePackageMeta(addr, version, getproviders.VersionList{getproviders.MustParseVersion("5.0")}, getproviders.CurrentPlatform, "") + if err != nil { + close() + t.Fatalf("failed to prepare fake package for %s %s: %s", addr.ForDisplay(), versionStr, err) + } + closes = append(closes, close) + packages = append(packages, meta) + } + } + + return getproviders.NewMockSource(packages, nil), close +} + +// installFakeProviderPackages installs a fake package for the given provider +// names (interpreted as a "default" provider address) and versions into the +// local plugin cache for the given "meta". +// +// Any test using this must be using testChdir or some similar mechanism to +// make sure that it isn't writing directly into a test fixture or source +// directory within the codebase. +// +// If a requested package cannot be installed for some reason, this function +// will abort the test using the given testing.T. Therefore if this function +// returns the caller can assume that the requested providers have been +// installed. +func installFakeProviderPackages(t *testing.T, meta *Meta, providerVersions map[string][]string) { + t.Helper() + + cacheDir := meta.providerLocalCacheDir() + installFakeProviderPackagesElsewhere(t, cacheDir, providerVersions) +} + +// installFakeProviderPackagesElsewhere is a variant of installFakeProviderPackages +// that will install packages into the given provider cache directory, rather +// than forcing the use of the local cache of the current "Meta". +func installFakeProviderPackagesElsewhere(t *testing.T, cacheDir *providercache.Dir, providerVersions map[string][]string) { + t.Helper() + + // It can be hard to spot the mistake of forgetting to run testChdir before + // modifying the working directory, so we'll use a simple heuristic here + // to try to detect that mistake and make a noisy error about it instead. + wd, err := os.Getwd() + if err == nil { + wd = filepath.Clean(wd) + // If the directory we're in is named "command" or if we're under a + // directory named "testdata" then we'll assume a mistake and generate + // an error. This will cause the test to fail but won't block it from + // running. + if filepath.Base(wd) == "command" || filepath.Base(wd) == "testdata" || strings.Contains(filepath.ToSlash(wd), "/testdata/") { + t.Errorf("installFakeProviderPackage may be used only by tests that switch to a temporary working directory, e.g. using testChdir") + } + } + + for name, versions := range providerVersions { + addr := addrs.NewDefaultProvider(name) + for _, versionStr := range versions { + version, err := getproviders.ParseVersion(versionStr) + if err != nil { + t.Fatalf("failed to parse %q as a version number for %q: %s", versionStr, name, err) + } + meta, close, err := getproviders.FakeInstallablePackageMeta(addr, version, getproviders.VersionList{getproviders.MustParseVersion("5.0")}, getproviders.CurrentPlatform, "") + // We're going to install all these fake packages before we return, + // so we don't need to preserve them afterwards. + defer close() + if err != nil { + t.Fatalf("failed to prepare fake package for %s %s: %s", name, versionStr, err) + } + _, err = cacheDir.InstallPackage(context.Background(), meta, nil) + if err != nil { + t.Fatalf("failed to install fake package for %s %s: %s", name, versionStr, err) + } + } + } +} + +// expectedPackageInstallPath is a companion to installFakeProviderPackages +// that returns the path where the provider with the given name and version +// would be installed and, relatedly, where the installer will expect to +// find an already-installed version. +// +// Just as with installFakeProviderPackages, this function is a shortcut helper +// for "default-namespaced" providers as we commonly use in tests. If you need +// more control over the provider addresses, use functions of the underlying +// getproviders and providercache packages instead. +// +// The result always uses forward slashes, even on Windows, for consistency +// with how the getproviders and providercache packages build paths. +func expectedPackageInstallPath(name, version string, exe bool) string { + platform := getproviders.CurrentPlatform + baseDir := ".terraform/providers" + if exe { + p := fmt.Sprintf("registry.terraform.io/hashicorp/%s/%s/%s/terraform-provider-%s_%s", name, version, platform, name, version) + if platform.OS == "windows" { + p += ".exe" + } + return filepath.ToSlash(filepath.Join(baseDir, p)) + } + return filepath.ToSlash(filepath.Join( + baseDir, fmt.Sprintf("registry.terraform.io/hashicorp/%s/%s/%s", name, version, platform), + )) +} diff --git a/command/jsonchecks/checks.go b/command/jsonchecks/checks.go new file mode 100644 index 000000000000..f82141876229 --- /dev/null +++ b/command/jsonchecks/checks.go @@ -0,0 +1,124 @@ +package jsonchecks + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/hashicorp/terraform/states" +) + +// MarshalCheckStates is the main entry-point for this package, which takes +// the top-level model object for checks in state and plan, and returns a +// JSON representation of it suitable for use in public integration points. +func MarshalCheckStates(results *states.CheckResults) []byte { + jsonResults := make([]checkResultStatic, 0, results.ConfigResults.Len()) + + for _, elem := range results.ConfigResults.Elems { + staticAddr := elem.Key + aggrResult := elem.Value + + objects := make([]checkResultDynamic, 0, aggrResult.ObjectResults.Len()) + for _, elem := range aggrResult.ObjectResults.Elems { + dynamicAddr := elem.Key + result := elem.Value + + problems := make([]checkProblem, 0, len(result.FailureMessages)) + for _, msg := range result.FailureMessages { + problems = append(problems, checkProblem{ + Message: msg, + }) + } + sort.Slice(problems, func(i, j int) bool { + return problems[i].Message < problems[j].Message + }) + + objects = append(objects, checkResultDynamic{ + Address: makeDynamicObjectAddr(dynamicAddr), + Status: checkStatusForJSON(result.Status), + Problems: problems, + }) + } + + sort.Slice(objects, func(i, j int) bool { + return objects[i].Address["to_display"].(string) < objects[j].Address["to_display"].(string) + }) + + jsonResults = append(jsonResults, checkResultStatic{ + Address: makeStaticObjectAddr(staticAddr), + Status: checkStatusForJSON(aggrResult.Status), + Instances: objects, + }) + } + + sort.Slice(jsonResults, func(i, j int) bool { + return jsonResults[i].Address["to_display"].(string) < jsonResults[j].Address["to_display"].(string) + }) + + ret, err := json.Marshal(jsonResults) + if err != nil { + // We totally control the input to json.Marshal, so any error here + // is a bug in the code above. + panic(fmt.Sprintf("invalid input to json.Marshal: %s", err)) + } + return ret +} + +// checkResultStatic is the container for the static, configuration-driven +// idea of "checkable object" -- a resource block with conditions, for example -- +// which ensures that we can always say _something_ about each checkable +// object in the configuration even if Terraform Core encountered an error +// before being able to determine the dynamic instances of the checkable object. +type checkResultStatic struct { + ExperimentalNote experimentalNote `json:"//"` + + // Address is the address of the checkable object this result relates to. + Address staticObjectAddr `json:"address"` + + // Status is the aggregate status for all of the dynamic objects belonging + // to this static object. + Status checkStatus `json:"status"` + + // Instances contains the results for each individual dynamic object that + // belongs to this static object. + Instances []checkResultDynamic `json:"instances,omitempty"` +} + +// checkResultDynamic describes the check result for a dynamic object, which +// results from Terraform Core evaluating the "expansion" (e.g. count or for_each) +// of the containing object or its own containing module(s). +type checkResultDynamic struct { + // Address augments the Address of the containing checkResultStatic with + // instance-specific extra properties or overridden properties. + Address dynamicObjectAddr `json:"address"` + + // Status is the status for this specific dynamic object. + Status checkStatus `json:"status"` + + // Problems describes some optional details associated with a failure + // status, describing what fails. + // + // This does not include the errors for status "error", because Terraform + // Core emits those separately as normal diagnostics. However, if a + // particular object has a mixture of conditions that failed and conditions + // that were invalid then status can be "error" while simultaneously + // returning problems in this property. + Problems []checkProblem `json:"problems,omitempty"` +} + +// checkProblem describes one of potentially several problems that led to +// a check being classified as status "fail". +type checkProblem struct { + // Message is the condition error message provided by the author. + Message string `json:"message"` + + // We don't currently have any other problem-related data, but this is + // intentionally an object to allow us to add other data over time, such + // as the source location where the failing condition was defined. +} + +type experimentalNote struct{} + +func (n experimentalNote) MarshalJSON() ([]byte, error) { + return []byte(`"EXPERIMENTAL: see docs for details"`), nil +} diff --git a/internal/command/jsonchecks/checks_test.go b/command/jsonchecks/checks_test.go similarity index 97% rename from internal/command/jsonchecks/checks_test.go rename to command/jsonchecks/checks_test.go index 6e0f52da4faa..336c2eee9274 100644 --- a/internal/command/jsonchecks/checks_test.go +++ b/command/jsonchecks/checks_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/checks" + "github.com/hashicorp/terraform/states" ) func TestMarshalCheckStates(t *testing.T) { diff --git a/internal/command/jsonchecks/doc.go b/command/jsonchecks/doc.go similarity index 100% rename from internal/command/jsonchecks/doc.go rename to command/jsonchecks/doc.go diff --git a/internal/command/jsonchecks/objects.go b/command/jsonchecks/objects.go similarity index 97% rename from internal/command/jsonchecks/objects.go rename to command/jsonchecks/objects.go index d7a5014fee13..3135ca1c1a3d 100644 --- a/internal/command/jsonchecks/objects.go +++ b/command/jsonchecks/objects.go @@ -3,7 +3,7 @@ package jsonchecks import ( "fmt" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) type staticObjectAddr map[string]interface{} diff --git a/internal/command/jsonchecks/status.go b/command/jsonchecks/status.go similarity index 91% rename from internal/command/jsonchecks/status.go rename to command/jsonchecks/status.go index f55194aeb039..55bb9f6019e7 100644 --- a/internal/command/jsonchecks/status.go +++ b/command/jsonchecks/status.go @@ -3,7 +3,7 @@ package jsonchecks import ( "fmt" - "github.com/hashicorp/terraform/internal/checks" + "github.com/hashicorp/terraform/checks" ) type checkStatus []byte diff --git a/command/jsonconfig/config.go b/command/jsonconfig/config.go new file mode 100644 index 000000000000..7bd91bba77c0 --- /dev/null +++ b/command/jsonconfig/config.go @@ -0,0 +1,565 @@ +package jsonconfig + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/terraform" +) + +// Config represents the complete configuration source +type config struct { + ProviderConfigs map[string]providerConfig `json:"provider_config,omitempty"` + RootModule module `json:"root_module,omitempty"` +} + +// ProviderConfig describes all of the provider configurations throughout the +// configuration tree, flattened into a single map for convenience since +// provider configurations are the one concept in Terraform that can span across +// module boundaries. +type providerConfig struct { + Name string `json:"name,omitempty"` + FullName string `json:"full_name,omitempty"` + Alias string `json:"alias,omitempty"` + VersionConstraint string `json:"version_constraint,omitempty"` + ModuleAddress string `json:"module_address,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` + parentKey string +} + +type module struct { + Outputs map[string]output `json:"outputs,omitempty"` + // Resources are sorted in a user-friendly order that is undefined at this + // time, but consistent. + Resources []resource `json:"resources,omitempty"` + ModuleCalls map[string]moduleCall `json:"module_calls,omitempty"` + Variables variables `json:"variables,omitempty"` +} + +type moduleCall struct { + Source string `json:"source,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` + CountExpression *expression `json:"count_expression,omitempty"` + ForEachExpression *expression `json:"for_each_expression,omitempty"` + Module module `json:"module,omitempty"` + VersionConstraint string `json:"version_constraint,omitempty"` + DependsOn []string `json:"depends_on,omitempty"` +} + +// variables is the JSON representation of the variables provided to the current +// plan. +type variables map[string]*variable + +type variable struct { + Default json.RawMessage `json:"default,omitempty"` + Description string `json:"description,omitempty"` + Sensitive bool `json:"sensitive,omitempty"` +} + +// Resource is the representation of a resource in the config +type resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // ProviderConfigKey is the key into "provider_configs" (shown above) for + // the provider configuration that this resource is associated with. + // + // NOTE: If a given resource is in a ModuleCall, and the provider was + // configured outside of the module (in a higher level configuration file), + // the ProviderConfigKey will not match a key in the ProviderConfigs map. + ProviderConfigKey string `json:"provider_config_key,omitempty"` + + // Provisioners is an optional field which describes any provisioners. + // Connection info will not be included here. + Provisioners []provisioner `json:"provisioners,omitempty"` + + // Expressions" describes the resource-type-specific content of the + // configuration block. + Expressions map[string]interface{} `json:"expressions,omitempty"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // CountExpression and ForEachExpression describe the expressions given for + // the corresponding meta-arguments in the resource configuration block. + // These are omitted if the corresponding argument isn't set. + CountExpression *expression `json:"count_expression,omitempty"` + ForEachExpression *expression `json:"for_each_expression,omitempty"` + + DependsOn []string `json:"depends_on,omitempty"` +} + +type output struct { + Sensitive bool `json:"sensitive,omitempty"` + Expression expression `json:"expression,omitempty"` + DependsOn []string `json:"depends_on,omitempty"` + Description string `json:"description,omitempty"` +} + +type provisioner struct { + Type string `json:"type,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` +} + +// Marshal returns the json encoding of terraform configuration. +func Marshal(c *configs.Config, schemas *terraform.Schemas) ([]byte, error) { + var output config + + pcs := make(map[string]providerConfig) + marshalProviderConfigs(c, schemas, pcs) + + rootModule, err := marshalModule(c, schemas, "") + if err != nil { + return nil, err + } + output.RootModule = rootModule + + normalizeModuleProviderKeys(&rootModule, pcs) + + for name, pc := range pcs { + if pc.parentKey != "" { + delete(pcs, name) + } + } + output.ProviderConfigs = pcs + + ret, err := json.Marshal(output) + return ret, err +} + +func marshalProviderConfigs( + c *configs.Config, + schemas *terraform.Schemas, + m map[string]providerConfig, +) { + if c == nil { + return + } + + // We want to determine only the provider requirements from this module, + // ignoring any descendants. Disregard any diagnostics when determining + // requirements because we want this marshalling to succeed even if there + // are invalid constraints. + reqs, _ := c.ProviderRequirementsShallow() + + // Add an entry for each provider configuration block in the module. + for k, pc := range c.Module.ProviderConfigs { + providerFqn := c.ProviderForConfigAddr(addrs.LocalProviderConfig{LocalName: pc.Name}) + schema := schemas.ProviderConfig(providerFqn) + + p := providerConfig{ + Name: pc.Name, + FullName: providerFqn.String(), + Alias: pc.Alias, + ModuleAddress: c.Path.String(), + Expressions: marshalExpressions(pc.Config, schema), + } + + // Store the fully resolved provider version constraint, rather than + // using the version argument in the configuration block. This is both + // future proof (for when we finish the deprecation of the provider config + // version argument) and more accurate (as it reflects the full set of + // constraints, in case there are multiple). + if vc, ok := reqs[providerFqn]; ok { + p.VersionConstraint = getproviders.VersionConstraintsString(vc) + } + + key := opaqueProviderKey(k, c.Path.String()) + + m[key] = p + } + + // Ensure that any required providers with no associated configuration + // block are included in the set. + for k, pr := range c.Module.ProviderRequirements.RequiredProviders { + // If a provider has aliases defined, process those first. + for _, alias := range pr.Aliases { + // If there exists a value for this provider, we have nothing to add + // to it, so skip. + key := opaqueProviderKey(alias.StringCompact(), c.Path.String()) + if _, exists := m[key]; exists { + continue + } + // Given no provider configuration block exists, the only fields we can + // fill here are the local name, FQN, module address, and version + // constraints. + p := providerConfig{ + Name: pr.Name, + FullName: pr.Type.String(), + ModuleAddress: c.Path.String(), + } + + if vc, ok := reqs[pr.Type]; ok { + p.VersionConstraint = getproviders.VersionConstraintsString(vc) + } + + m[key] = p + } + + // If there exists a value for this provider, we have nothing to add + // to it, so skip. + key := opaqueProviderKey(k, c.Path.String()) + if _, exists := m[key]; exists { + continue + } + + // Given no provider configuration block exists, the only fields we can + // fill here are the local name, module address, and version + // constraints. + p := providerConfig{ + Name: pr.Name, + FullName: pr.Type.String(), + ModuleAddress: c.Path.String(), + } + + if vc, ok := reqs[pr.Type]; ok { + p.VersionConstraint = getproviders.VersionConstraintsString(vc) + } + + if c.Parent != nil { + parentKey := opaqueProviderKey(pr.Name, c.Parent.Path.String()) + p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) + } + + m[key] = p + } + + // Providers could be implicitly created or inherited from the parent module + // when no requirements and configuration block defined. + for req := range reqs { + // Only default providers could implicitly exist, + // so the provider name must be same as the provider type. + key := opaqueProviderKey(req.Type, c.Path.String()) + if _, exists := m[key]; exists { + continue + } + + p := providerConfig{ + Name: req.Type, + FullName: req.String(), + ModuleAddress: c.Path.String(), + } + + // In child modules, providers defined in the parent module can be implicitly used. + if c.Parent != nil { + parentKey := opaqueProviderKey(req.Type, c.Parent.Path.String()) + p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) + } + + m[key] = p + } + + // Must also visit our child modules, recursively. + for name, mc := range c.Module.ModuleCalls { + // Keys in c.Children are guaranteed to match those in c.Module.ModuleCalls + cc := c.Children[name] + + // Add provider config map entries for passed provider configs, + // pointing at the passed configuration + for _, ppc := range mc.Providers { + // These provider names include aliases, if set + moduleProviderName := ppc.InChild.String() + parentProviderName := ppc.InParent.String() + + // Look up the provider FQN from the module context, using the non-aliased local name + providerFqn := cc.ProviderForConfigAddr(addrs.LocalProviderConfig{LocalName: ppc.InChild.Name}) + + // The presence of passed provider configs means that we cannot have + // any configuration expressions or version constraints here + p := providerConfig{ + Name: moduleProviderName, + FullName: providerFqn.String(), + ModuleAddress: cc.Path.String(), + } + + key := opaqueProviderKey(moduleProviderName, cc.Path.String()) + parentKey := opaqueProviderKey(parentProviderName, cc.Parent.Path.String()) + p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) + + m[key] = p + } + + // Finally, marshal any other provider configs within the called module. + // It is safe to do this last because it is invalid to configure a + // provider which has passed provider configs in the module call. + marshalProviderConfigs(cc, schemas, m) + } +} + +func marshalModule(c *configs.Config, schemas *terraform.Schemas, addr string) (module, error) { + var module module + var rs []resource + + managedResources, err := marshalResources(c.Module.ManagedResources, schemas, addr) + if err != nil { + return module, err + } + dataResources, err := marshalResources(c.Module.DataResources, schemas, addr) + if err != nil { + return module, err + } + + rs = append(managedResources, dataResources...) + module.Resources = rs + + outputs := make(map[string]output) + for _, v := range c.Module.Outputs { + o := output{ + Sensitive: v.Sensitive, + Expression: marshalExpression(v.Expr), + } + if v.Description != "" { + o.Description = v.Description + } + if len(v.DependsOn) > 0 { + dependencies := make([]string, len(v.DependsOn)) + for i, d := range v.DependsOn { + ref, diags := addrs.ParseRef(d) + // we should not get an error here, because `terraform validate` + // would have complained well before this point, but if we do we'll + // silenty skip it. + if !diags.HasErrors() { + dependencies[i] = ref.Subject.String() + } + } + o.DependsOn = dependencies + } + + outputs[v.Name] = o + } + module.Outputs = outputs + + module.ModuleCalls = marshalModuleCalls(c, schemas) + + if len(c.Module.Variables) > 0 { + vars := make(variables, len(c.Module.Variables)) + for k, v := range c.Module.Variables { + var defaultValJSON []byte + if v.Default == cty.NilVal { + defaultValJSON = nil + } else { + defaultValJSON, err = ctyjson.Marshal(v.Default, v.Default.Type()) + if err != nil { + return module, err + } + } + vars[k] = &variable{ + Default: defaultValJSON, + Description: v.Description, + Sensitive: v.Sensitive, + } + } + module.Variables = vars + } + + return module, nil +} + +func marshalModuleCalls(c *configs.Config, schemas *terraform.Schemas) map[string]moduleCall { + ret := make(map[string]moduleCall) + + for name, mc := range c.Module.ModuleCalls { + mcConfig := c.Children[name] + ret[name] = marshalModuleCall(mcConfig, mc, schemas) + } + + return ret +} + +func marshalModuleCall(c *configs.Config, mc *configs.ModuleCall, schemas *terraform.Schemas) moduleCall { + // It is possible to have a module call with a nil config. + if c == nil { + return moduleCall{} + } + + ret := moduleCall{ + // We're intentionally echoing back exactly what the user entered + // here, rather than the normalized version in SourceAddr, because + // historically we only _had_ the raw address and thus it would be + // a (admittedly minor) breaking change to start normalizing them + // now, in case consumers of this data are expecting a particular + // non-normalized syntax. + Source: mc.SourceAddrRaw, + VersionConstraint: mc.Version.Required.String(), + } + cExp := marshalExpression(mc.Count) + if !cExp.Empty() { + ret.CountExpression = &cExp + } else { + fExp := marshalExpression(mc.ForEach) + if !fExp.Empty() { + ret.ForEachExpression = &fExp + } + } + + schema := &configschema.Block{} + schema.Attributes = make(map[string]*configschema.Attribute) + for _, variable := range c.Module.Variables { + schema.Attributes[variable.Name] = &configschema.Attribute{ + Required: variable.Default == cty.NilVal, + } + } + + ret.Expressions = marshalExpressions(mc.Config, schema) + + module, _ := marshalModule(c, schemas, c.Path.String()) + + ret.Module = module + + if len(mc.DependsOn) > 0 { + dependencies := make([]string, len(mc.DependsOn)) + for i, d := range mc.DependsOn { + ref, diags := addrs.ParseRef(d) + // we should not get an error here, because `terraform validate` + // would have complained well before this point, but if we do we'll + // silenty skip it. + if !diags.HasErrors() { + dependencies[i] = ref.Subject.String() + } + } + ret.DependsOn = dependencies + } + + return ret +} + +func marshalResources(resources map[string]*configs.Resource, schemas *terraform.Schemas, moduleAddr string) ([]resource, error) { + var rs []resource + for _, v := range resources { + providerConfigKey := opaqueProviderKey(v.ProviderConfigAddr().StringCompact(), moduleAddr) + r := resource{ + Address: v.Addr().String(), + Type: v.Type, + Name: v.Name, + ProviderConfigKey: providerConfigKey, + } + + switch v.Mode { + case addrs.ManagedResourceMode: + r.Mode = "managed" + case addrs.DataResourceMode: + r.Mode = "data" + default: + return rs, fmt.Errorf("resource %s has an unsupported mode %s", r.Address, v.Mode.String()) + } + + cExp := marshalExpression(v.Count) + if !cExp.Empty() { + r.CountExpression = &cExp + } else { + fExp := marshalExpression(v.ForEach) + if !fExp.Empty() { + r.ForEachExpression = &fExp + } + } + + schema, schemaVer := schemas.ResourceTypeConfig( + v.Provider, + v.Mode, + v.Type, + ) + if schema == nil { + return nil, fmt.Errorf("no schema found for %s (in provider %s)", v.Addr().String(), v.Provider) + } + r.SchemaVersion = schemaVer + + r.Expressions = marshalExpressions(v.Config, schema) + + // Managed is populated only for Mode = addrs.ManagedResourceMode + if v.Managed != nil && len(v.Managed.Provisioners) > 0 { + var provisioners []provisioner + for _, p := range v.Managed.Provisioners { + schema := schemas.ProvisionerConfig(p.Type) + prov := provisioner{ + Type: p.Type, + Expressions: marshalExpressions(p.Config, schema), + } + provisioners = append(provisioners, prov) + } + r.Provisioners = provisioners + } + + if len(v.DependsOn) > 0 { + dependencies := make([]string, len(v.DependsOn)) + for i, d := range v.DependsOn { + ref, diags := addrs.ParseRef(d) + // we should not get an error here, because `terraform validate` + // would have complained well before this point, but if we do we'll + // silenty skip it. + if !diags.HasErrors() { + dependencies[i] = ref.Subject.String() + } + } + r.DependsOn = dependencies + } + + rs = append(rs, r) + } + sort.Slice(rs, func(i, j int) bool { + return rs[i].Address < rs[j].Address + }) + return rs, nil +} + +// Flatten all resource provider keys in a module and its descendents, such +// that any resources from providers using a configuration passed through the +// module call have a direct refernce to that provider configuration. +func normalizeModuleProviderKeys(m *module, pcs map[string]providerConfig) { + for i, r := range m.Resources { + if pc, exists := pcs[r.ProviderConfigKey]; exists { + if _, hasParent := pcs[pc.parentKey]; hasParent { + m.Resources[i].ProviderConfigKey = pc.parentKey + } + } + } + + for _, mc := range m.ModuleCalls { + normalizeModuleProviderKeys(&mc.Module, pcs) + } +} + +// opaqueProviderKey generates a unique absProviderConfig-like string from the module +// address and provider +func opaqueProviderKey(provider string, addr string) (key string) { + key = provider + if addr != "" { + key = fmt.Sprintf("%s:%s", addr, provider) + } + return key +} + +// Traverse up the module call tree until we find the provider +// configuration which has no linked parent config. This is then +// the source of the configuration used in this module call, so +// we link to it directly +func findSourceProviderKey(startKey string, fullName string, m map[string]providerConfig) string { + var parentKey string + + key := startKey + for key != "" { + parent, exists := m[key] + if !exists || parent.FullName != fullName { + break + } + + parentKey = key + key = parent.parentKey + } + + return parentKey +} diff --git a/internal/command/jsonconfig/config_test.go b/command/jsonconfig/config_test.go similarity index 100% rename from internal/command/jsonconfig/config_test.go rename to command/jsonconfig/config_test.go diff --git a/internal/command/jsonconfig/doc.go b/command/jsonconfig/doc.go similarity index 100% rename from internal/command/jsonconfig/doc.go rename to command/jsonconfig/doc.go diff --git a/internal/command/jsonconfig/expression.go b/command/jsonconfig/expression.go similarity index 96% rename from internal/command/jsonconfig/expression.go rename to command/jsonconfig/expression.go index fa443fc3ea50..98c499541560 100644 --- a/internal/command/jsonconfig/expression.go +++ b/command/jsonconfig/expression.go @@ -7,10 +7,10 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/lang/blocktoattr" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/lang/blocktoattr" "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" ) diff --git a/internal/command/jsonconfig/expression_test.go b/command/jsonconfig/expression_test.go similarity index 97% rename from internal/command/jsonconfig/expression_test.go rename to command/jsonconfig/expression_test.go index 58af11dda53e..c81447e467fc 100644 --- a/internal/command/jsonconfig/expression_test.go +++ b/command/jsonconfig/expression_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" ) func TestMarshalExpressions(t *testing.T) { diff --git a/internal/command/jsonformat/README.md b/command/jsonformat/README.md similarity index 100% rename from internal/command/jsonformat/README.md rename to command/jsonformat/README.md diff --git a/command/jsonformat/collections/action.go b/command/jsonformat/collections/action.go new file mode 100644 index 000000000000..edacf6c110ae --- /dev/null +++ b/command/jsonformat/collections/action.go @@ -0,0 +1,16 @@ +package collections + +import "github.com/hashicorp/terraform/plans" + +// CompareActions will compare current and next, and return plans.Update if they +// are different, and current if they are the same. +func CompareActions(current, next plans.Action) plans.Action { + if next == plans.NoOp { + return current + } + + if current != next { + return plans.Update + } + return current +} diff --git a/command/jsonformat/collections/map.go b/command/jsonformat/collections/map.go new file mode 100644 index 000000000000..e6603dc41022 --- /dev/null +++ b/command/jsonformat/collections/map.go @@ -0,0 +1,26 @@ +package collections + +import ( + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" +) + +type ProcessKey func(key string) computed.Diff + +func TransformMap[Input any](before, after map[string]Input, keys []string, process ProcessKey) (map[string]computed.Diff, plans.Action) { + current := plans.NoOp + if before != nil && after == nil { + current = plans.Delete + } + if before == nil && after != nil { + current = plans.Create + } + + elements := make(map[string]computed.Diff) + for _, key := range keys { + elements[key] = process(key) + current = CompareActions(current, elements[key].Action) + } + + return elements, current +} diff --git a/command/jsonformat/collections/slice.go b/command/jsonformat/collections/slice.go new file mode 100644 index 000000000000..1e87baa80fc2 --- /dev/null +++ b/command/jsonformat/collections/slice.go @@ -0,0 +1,72 @@ +package collections + +import ( + "reflect" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" +) + +type TransformIndices func(before, after int) computed.Diff +type ProcessIndices func(before, after int) +type IsObjType[Input any] func(input Input) bool + +func TransformSlice[Input any](before, after []Input, process TransformIndices, isObjType IsObjType[Input]) ([]computed.Diff, plans.Action) { + current := plans.NoOp + if before != nil && after == nil { + current = plans.Delete + } + if before == nil && after != nil { + current = plans.Create + } + + var elements []computed.Diff + ProcessSlice(before, after, func(before, after int) { + element := process(before, after) + elements = append(elements, element) + current = CompareActions(current, element.Action) + }, isObjType) + return elements, current +} + +func ProcessSlice[Input any](before, after []Input, process ProcessIndices, isObjType IsObjType[Input]) { + lcs := objchange.LongestCommonSubsequence(before, after, func(before, after Input) bool { + return reflect.DeepEqual(before, after) + }) + + var beforeIx, afterIx, lcsIx int + for beforeIx < len(before) || afterIx < len(after) || lcsIx < len(lcs) { + // Step through all the before values until we hit the next item in the + // longest common subsequence. We are going to just say that all of + // these have been deleted. + for beforeIx < len(before) && (lcsIx >= len(lcs) || !reflect.DeepEqual(before[beforeIx], lcs[lcsIx])) { + isObjectDiff := isObjType(before[beforeIx]) && afterIx < len(after) && isObjType(after[afterIx]) && (lcsIx >= len(lcs) || !reflect.DeepEqual(after[afterIx], lcs[lcsIx])) + if isObjectDiff { + process(beforeIx, afterIx) + beforeIx++ + afterIx++ + continue + } + + process(beforeIx, len(after)) + beforeIx++ + } + + // Now, step through all the after values until hit the next item in the + // LCS. We are going to say that all of these have been created. + for afterIx < len(after) && (lcsIx >= len(lcs) || !reflect.DeepEqual(after[afterIx], lcs[lcsIx])) { + process(len(before), afterIx) + afterIx++ + } + + // Finally, add the item in common as unchanged. + if lcsIx < len(lcs) { + process(beforeIx, afterIx) + beforeIx++ + afterIx++ + lcsIx++ + } + } +} diff --git a/command/jsonformat/computed/diff.go b/command/jsonformat/computed/diff.go new file mode 100644 index 000000000000..e1878317c375 --- /dev/null +++ b/command/jsonformat/computed/diff.go @@ -0,0 +1,120 @@ +package computed + +import ( + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform/plans" +) + +// Diff captures the computed diff for a single block, element or attribute. +// +// It essentially merges common functionality across all types of changes, +// namely the replace logic and the action / change type. Any remaining +// behaviour can be offloaded to the renderer which will be unique for the +// various change types (eg. maps, objects, lists, blocks, primitives, etc.). +type Diff struct { + // Renderer captures the uncommon functionality across the different kinds + // of changes. Each type of change (lists, blocks, sets, etc.) will have a + // unique renderer. + Renderer DiffRenderer + + // Action is the action described by this change (such as create, delete, + // update, etc.). + Action plans.Action + + // Replace tells the Change that it should add the `# forces replacement` + // suffix. + // + // Every single change could potentially add this suffix, so we embed it in + // the change as common functionality instead of in the specific renderers. + Replace bool +} + +// NewDiff creates a new Diff object with the provided renderer, action and +// replace context. +func NewDiff(renderer DiffRenderer, action plans.Action, replace bool) Diff { + return Diff{ + Renderer: renderer, + Action: action, + Replace: replace, + } +} + +// RenderHuman prints the Change into a human-readable string referencing the +// specified RenderOpts. +// +// If the returned string is a single line, then indent should be ignored. +// +// If the return string is multiple lines, then indent should be used to offset +// the beginning of all lines but the first by the specified amount. +func (diff Diff) RenderHuman(indent int, opts RenderHumanOpts) string { + return diff.Renderer.RenderHuman(diff, indent, opts) +} + +// WarningsHuman returns a list of strings that should be rendered as warnings +// before a given change is rendered. +// +// As with the RenderHuman function, the indent should only be applied on +// multiline warnings and on the second and following lines. +func (diff Diff) WarningsHuman(indent int, opts RenderHumanOpts) []string { + return diff.Renderer.WarningsHuman(diff, indent, opts) +} + +type DiffRenderer interface { + RenderHuman(diff Diff, indent int, opts RenderHumanOpts) string + WarningsHuman(diff Diff, indent int, opts RenderHumanOpts) []string +} + +// RenderHumanOpts contains options that can control how the human render +// function of the DiffRenderer will function. +type RenderHumanOpts struct { + Colorize *colorstring.Colorize + + // OverrideNullSuffix tells the Renderer not to display the `-> null` suffix + // that is normally displayed when an element, attribute, or block is + // deleted. + OverrideNullSuffix bool + + // OverrideForcesReplacement tells the Renderer to display the + // `# forces replacement` suffix, even if a diff doesn't have the Replace + // field set. + // + // Some renderers (like the Set renderer) don't display the suffix + // themselves but force their child diffs to display it instead. + OverrideForcesReplacement bool + + // ShowUnchangedChildren instructs the Renderer to render all children of a + // given complex change, instead of hiding unchanged items and compressing + // them into a single line. + ShowUnchangedChildren bool + + // HideDiffActionSymbols tells the renderer not to show the '+'/'-' symbols + // and to skip the places where the symbols would result in an offset. + HideDiffActionSymbols bool +} + +// NewRenderHumanOpts creates a new RenderHumanOpts struct with the required +// fields set. +func NewRenderHumanOpts(colorize *colorstring.Colorize) RenderHumanOpts { + return RenderHumanOpts{ + Colorize: colorize, + } +} + +// Clone returns a new RenderOpts object, that matches the original but can be +// edited without changing the original. +func (opts RenderHumanOpts) Clone() RenderHumanOpts { + return RenderHumanOpts{ + Colorize: opts.Colorize, + + OverrideNullSuffix: opts.OverrideNullSuffix, + ShowUnchangedChildren: opts.ShowUnchangedChildren, + HideDiffActionSymbols: opts.HideDiffActionSymbols, + + // OverrideForcesReplacement is a special case in that it doesn't + // cascade. So each diff should decide independently whether it's direct + // children should override their internal Replace logic, instead of + // an ancestor making the switch and affecting the entire tree. + OverrideForcesReplacement: false, + } +} diff --git a/internal/command/jsonformat/computed/doc.go b/command/jsonformat/computed/doc.go similarity index 100% rename from internal/command/jsonformat/computed/doc.go rename to command/jsonformat/computed/doc.go diff --git a/command/jsonformat/computed/renderers/block.go b/command/jsonformat/computed/renderers/block.go new file mode 100644 index 000000000000..cdcc2cf5b794 --- /dev/null +++ b/command/jsonformat/computed/renderers/block.go @@ -0,0 +1,181 @@ +package renderers + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + + "github.com/hashicorp/terraform/plans" +) + +var ( + _ computed.DiffRenderer = (*blockRenderer)(nil) + + importantAttributes = []string{ + "id", + "name", + "tags", + } +) + +func importantAttribute(attr string) bool { + for _, attribute := range importantAttributes { + if attribute == attr { + return true + } + } + return false +} + +func Block(attributes map[string]computed.Diff, blocks Blocks) computed.DiffRenderer { + return &blockRenderer{ + attributes: attributes, + blocks: blocks, + } +} + +type blockRenderer struct { + NoWarningsRenderer + + attributes map[string]computed.Diff + blocks Blocks +} + +func (renderer blockRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if len(renderer.attributes) == 0 && len(renderer.blocks.GetAllKeys()) == 0 { + return fmt.Sprintf("{}%s", forcesReplacement(diff.Replace, opts)) + } + + unchangedAttributes := 0 + unchangedBlocks := 0 + + maximumAttributeKeyLen := 0 + var attributeKeys []string + escapedAttributeKeys := make(map[string]string) + for key := range renderer.attributes { + attributeKeys = append(attributeKeys, key) + escapedKey := EnsureValidAttributeName(key) + escapedAttributeKeys[key] = escapedKey + if maximumAttributeKeyLen < len(escapedKey) { + maximumAttributeKeyLen = len(escapedKey) + } + } + sort.Strings(attributeKeys) + + importantAttributeOpts := opts.Clone() + importantAttributeOpts.ShowUnchangedChildren = true + + attributeOpts := opts.Clone() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(diff.Replace, opts))) + for _, key := range attributeKeys { + attribute := renderer.attributes[key] + if importantAttribute(key) { + + // Always display the important attributes. + for _, warning := range attribute.WarningsHuman(indent+1, importantAttributeOpts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, importantAttributeOpts), maximumAttributeKeyLen, key, attribute.RenderHuman(indent+1, importantAttributeOpts))) + continue + } + if attribute.Action == plans.NoOp && !opts.ShowUnchangedChildren { + unchangedAttributes++ + continue + } + + for _, warning := range attribute.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, attributeOpts), maximumAttributeKeyLen, escapedAttributeKeys[key], attribute.RenderHuman(indent+1, attributeOpts))) + } + + if unchangedAttributes > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("attribute", unchangedAttributes, opts))) + } + + blockKeys := renderer.blocks.GetAllKeys() + for _, key := range blockKeys { + + foundChangedBlock := false + renderBlock := func(diff computed.Diff, mapKey string, opts computed.RenderHumanOpts) { + + creatingSensitiveValue := diff.Action == plans.Create && renderer.blocks.AfterSensitiveBlocks[key] + deletingSensitiveValue := diff.Action == plans.Delete && renderer.blocks.BeforeSensitiveBlocks[key] + modifyingSensitiveValue := (diff.Action == plans.Update || diff.Action == plans.NoOp) && (renderer.blocks.AfterSensitiveBlocks[key] || renderer.blocks.BeforeSensitiveBlocks[key]) + + if creatingSensitiveValue || deletingSensitiveValue || modifyingSensitiveValue { + // Intercept the renderer here if the sensitive data was set + // across all the blocks instead of individually. + action := diff.Action + if diff.Action == plans.NoOp && renderer.blocks.BeforeSensitiveBlocks[key] != renderer.blocks.AfterSensitiveBlocks[key] { + action = plans.Update + } + + diff = computed.NewDiff(SensitiveBlock(diff, renderer.blocks.BeforeSensitiveBlocks[key], renderer.blocks.AfterSensitiveBlocks[key]), action, diff.Replace) + } + + if diff.Action == plans.NoOp && !opts.ShowUnchangedChildren { + unchangedBlocks++ + return + } + + if !foundChangedBlock && len(renderer.attributes) > 0 { + // We always want to put an extra new line between the + // attributes and blocks, and between groups of blocks. + buf.WriteString("\n") + foundChangedBlock = true + } + + // If the force replacement metadata was set for every entry in the + // block we need to override that here. Our child blocks will only + // know about the replace function if it was set on them + // specifically, and not if it was set for all the blocks. + blockOpts := opts.Clone() + blockOpts.OverrideForcesReplacement = renderer.blocks.ReplaceBlocks[key] + + for _, warning := range diff.WarningsHuman(indent+1, blockOpts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%s%s %s\n", formatIndent(indent+1), writeDiffActionSymbol(diff.Action, blockOpts), EnsureValidAttributeName(key), mapKey, diff.RenderHuman(indent+1, blockOpts))) + + } + + switch { + case renderer.blocks.IsSingleBlock(key): + renderBlock(renderer.blocks.SingleBlocks[key], "", opts) + case renderer.blocks.IsMapBlock(key): + var keys []string + for key := range renderer.blocks.MapBlocks[key] { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, innerKey := range keys { + renderBlock(renderer.blocks.MapBlocks[key][innerKey], fmt.Sprintf(" %q", innerKey), opts) + } + case renderer.blocks.IsSetBlock(key): + + setOpts := opts.Clone() + setOpts.OverrideForcesReplacement = diff.Replace + + for _, block := range renderer.blocks.SetBlocks[key] { + renderBlock(block, "", opts) + } + case renderer.blocks.IsListBlock(key): + for _, block := range renderer.blocks.ListBlocks[key] { + renderBlock(block, "", opts) + } + } + } + + if unchangedBlocks > 0 { + buf.WriteString(fmt.Sprintf("\n%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("block", unchangedBlocks, opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s}", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts))) + return buf.String() +} diff --git a/internal/command/jsonformat/computed/renderers/blocks.go b/command/jsonformat/computed/renderers/blocks.go similarity index 97% rename from internal/command/jsonformat/computed/renderers/blocks.go rename to command/jsonformat/computed/renderers/blocks.go index 62f2f7912943..fda59db328c2 100644 --- a/internal/command/jsonformat/computed/renderers/blocks.go +++ b/command/jsonformat/computed/renderers/blocks.go @@ -3,7 +3,7 @@ package renderers import ( "sort" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed" ) // Blocks is a helper struct for collating the different kinds of blocks in a diff --git a/internal/command/jsonformat/computed/renderers/json.go b/command/jsonformat/computed/renderers/json.go similarity index 88% rename from internal/command/jsonformat/computed/renderers/json.go rename to command/jsonformat/computed/renderers/json.go index 61ab7d5a4f99..2d3c7cc88e8d 100644 --- a/internal/command/jsonformat/computed/renderers/json.go +++ b/command/jsonformat/computed/renderers/json.go @@ -3,9 +3,9 @@ package renderers import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/jsondiff" - "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/jsondiff" + "github.com/hashicorp/terraform/plans" ) // RendererJsonOpts creates a jsondiff.JsonOpts object that returns the correct diff --git a/command/jsonformat/computed/renderers/list.go b/command/jsonformat/computed/renderers/list.go new file mode 100644 index 000000000000..8ab626a0024d --- /dev/null +++ b/command/jsonformat/computed/renderers/list.go @@ -0,0 +1,124 @@ +package renderers + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" +) + +var _ computed.DiffRenderer = (*listRenderer)(nil) + +func List(elements []computed.Diff) computed.DiffRenderer { + return &listRenderer{ + displayContext: true, + elements: elements, + } +} + +func NestedList(elements []computed.Diff) computed.DiffRenderer { + return &listRenderer{ + elements: elements, + } +} + +type listRenderer struct { + NoWarningsRenderer + + displayContext bool + elements []computed.Diff +} + +func (renderer listRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if len(renderer.elements) == 0 { + return fmt.Sprintf("[]%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) + } + + elementOpts := opts.Clone() + elementOpts.OverrideNullSuffix = true + + unchangedElementOpts := opts.Clone() + unchangedElementOpts.ShowUnchangedChildren = true + + var unchangedElements []computed.Diff + + // renderNext tells the renderer to print out the next element in the list + // whatever state it is in. So, even if a change is a NoOp we will still + // print it out if the last change we processed wants us to. + renderNext := false + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("[%s\n", forcesReplacement(diff.Replace, opts))) + for _, element := range renderer.elements { + if element.Action == plans.NoOp && !renderNext && !opts.ShowUnchangedChildren { + unchangedElements = append(unchangedElements, element) + continue + } + renderNext = false + + opts := elementOpts + + // If we want to display the context around this change, we want to + // render the change immediately before this change in the list, and the + // change immediately after in the list, even if both these changes are + // NoOps. This will give the user reading the diff some context as to + // where in the list these changes are being made, as order matters. + if renderer.displayContext { + // If our list of unchanged elements contains more than one entry + // we'll print out a count of the number of unchanged elements that + // we skipped. Note, this is the length of the unchanged elements + // minus 1 as the most recent unchanged element will be printed out + // in full. + if len(unchangedElements) > 1 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", len(unchangedElements)-1, opts))) + } + // If our list of unchanged elements contains at least one entry, + // we're going to print out the most recent change in full. That's + // what happens here. + if len(unchangedElements) > 0 { + lastElement := unchangedElements[len(unchangedElements)-1] + buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(lastElement.Action, unchangedElementOpts), lastElement.RenderHuman(indent+1, unchangedElementOpts))) + } + // We now reset the unchanged elements list, we've printed out a + // count of all the elements we skipped so we start counting from + // scratch again. This means that if we process a run of changed + // elements, they won't all start printing out summaries of every + // change that happened previously. + unchangedElements = nil + + if element.Action == plans.NoOp { + // If this is a NoOp action then we're going to render it below + // so we need to just override the opts we're going to use to + // make sure we use the unchanged opts. + opts = unchangedElementOpts + } else { + // As we also want to render the element immediately after any + // changes, we make a note here to say we should render the next + // change whatever it is. But, we only want to render the next + // change if the current change isn't a NoOp. If the current change + // is a NoOp then it was told to print by the last change and we + // don't want to cascade and print all changes from now on. + renderNext = true + } + } + + for _, warning := range element.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, opts), element.RenderHuman(indent+1, opts))) + } + + // If we were not displaying any context alongside our changes then the + // unchangedElements list will contain every unchanged element, and we'll + // print that out as we do with every other collection. + // + // If we were displaying context, then this will contain any unchanged + // elements since our last change, so we should also print it out. + if len(unchangedElements) > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", len(unchangedElements), opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s]%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) + return buf.String() +} diff --git a/command/jsonformat/computed/renderers/map.go b/command/jsonformat/computed/renderers/map.go new file mode 100644 index 000000000000..6aa4d68f21c6 --- /dev/null +++ b/command/jsonformat/computed/renderers/map.go @@ -0,0 +1,107 @@ +package renderers + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + + "github.com/hashicorp/terraform/plans" +) + +var _ computed.DiffRenderer = (*mapRenderer)(nil) + +func Map(elements map[string]computed.Diff) computed.DiffRenderer { + return &mapRenderer{ + elements: elements, + alignKeys: true, + } +} + +func NestedMap(elements map[string]computed.Diff) computed.DiffRenderer { + return &mapRenderer{ + elements: elements, + overrideNullSuffix: true, + overrideForcesReplacement: true, + } +} + +type mapRenderer struct { + NoWarningsRenderer + + elements map[string]computed.Diff + + overrideNullSuffix bool + overrideForcesReplacement bool + alignKeys bool +} + +func (renderer mapRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + forcesReplacementSelf := diff.Replace && !renderer.overrideForcesReplacement + forcesReplacementChildren := diff.Replace && renderer.overrideForcesReplacement + + if len(renderer.elements) == 0 { + return fmt.Sprintf("{}%s%s", nullSuffix(diff.Action, opts), forcesReplacement(forcesReplacementSelf, opts)) + } + + // Sort the map elements by key, so we have a deterministic ordering in + // the output. + var keys []string + + // We need to make sure the keys are capable of rendering properly. + escapedKeys := make(map[string]string) + + maximumKeyLen := 0 + for key := range renderer.elements { + keys = append(keys, key) + + escapedKey := hclEscapeString(key) + escapedKeys[key] = escapedKey + if maximumKeyLen < len(escapedKey) { + maximumKeyLen = len(escapedKey) + } + } + sort.Strings(keys) + + unchangedElements := 0 + + elementOpts := opts.Clone() + elementOpts.OverrideNullSuffix = diff.Action == plans.Delete || renderer.overrideNullSuffix + elementOpts.OverrideForcesReplacement = forcesReplacementChildren + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(forcesReplacementSelf, opts))) + for _, key := range keys { + element := renderer.elements[key] + + if element.Action == plans.NoOp && !opts.ShowUnchangedChildren { + // Don't render NoOp operations when we are compact display. + unchangedElements++ + continue + } + + for _, warning := range element.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + // Only show commas between elements for objects. + comma := "" + if _, ok := element.Renderer.(*objectRenderer); ok { + comma = "," + } + + if renderer.alignKeys { + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s%s\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), maximumKeyLen, escapedKeys[key], element.RenderHuman(indent+1, elementOpts), comma)) + } else { + buf.WriteString(fmt.Sprintf("%s%s%s = %s%s\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), escapedKeys[key], element.RenderHuman(indent+1, elementOpts), comma)) + } + + } + + if unchangedElements > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", unchangedElements, opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s}%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) + return buf.String() +} diff --git a/command/jsonformat/computed/renderers/object.go b/command/jsonformat/computed/renderers/object.go new file mode 100644 index 000000000000..66a8a16b464b --- /dev/null +++ b/command/jsonformat/computed/renderers/object.go @@ -0,0 +1,95 @@ +package renderers + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" +) + +var _ computed.DiffRenderer = (*objectRenderer)(nil) + +func Object(attributes map[string]computed.Diff) computed.DiffRenderer { + return &objectRenderer{ + attributes: attributes, + overrideNullSuffix: true, + } +} + +func NestedObject(attributes map[string]computed.Diff) computed.DiffRenderer { + return &objectRenderer{ + attributes: attributes, + overrideNullSuffix: false, + } +} + +type objectRenderer struct { + NoWarningsRenderer + + attributes map[string]computed.Diff + overrideNullSuffix bool +} + +func (renderer objectRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if len(renderer.attributes) == 0 { + return fmt.Sprintf("{}%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) + } + + attributeOpts := opts.Clone() + attributeOpts.OverrideNullSuffix = renderer.overrideNullSuffix + + // We need to keep track of our keys in two ways. The first is the order in + // which we will display them. The second is a mapping to their safely + // escaped equivalent. + + maximumKeyLen := 0 + var keys []string + escapedKeys := make(map[string]string) + for key := range renderer.attributes { + keys = append(keys, key) + escapedKey := EnsureValidAttributeName(key) + escapedKeys[key] = escapedKey + if maximumKeyLen < len(escapedKey) { + maximumKeyLen = len(escapedKey) + } + } + sort.Strings(keys) + + unchangedAttributes := 0 + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(diff.Replace, opts))) + for _, key := range keys { + attribute := renderer.attributes[key] + + if importantAttribute(key) { + importantAttributeOpts := attributeOpts.Clone() + importantAttributeOpts.ShowUnchangedChildren = true + + for _, warning := range attribute.WarningsHuman(indent+1, importantAttributeOpts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, importantAttributeOpts), maximumKeyLen, escapedKeys[key], attribute.RenderHuman(indent+1, importantAttributeOpts))) + continue + } + + if attribute.Action == plans.NoOp && !opts.ShowUnchangedChildren { + // Don't render NoOp operations when we are compact display. + unchangedAttributes++ + continue + } + + for _, warning := range attribute.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, attributeOpts), maximumKeyLen, escapedKeys[key], attribute.RenderHuman(indent+1, attributeOpts))) + } + + if unchangedAttributes > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("attribute", unchangedAttributes, opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s}%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) + return buf.String() +} diff --git a/command/jsonformat/computed/renderers/primitive.go b/command/jsonformat/computed/renderers/primitive.go new file mode 100644 index 000000000000..06c53baf09bb --- /dev/null +++ b/command/jsonformat/computed/renderers/primitive.go @@ -0,0 +1,239 @@ +package renderers + +import ( + "fmt" + "math/big" + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/collections" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" + "github.com/hashicorp/terraform/plans" +) + +var _ computed.DiffRenderer = (*primitiveRenderer)(nil) + +func Primitive(before, after interface{}, ctype cty.Type) computed.DiffRenderer { + return &primitiveRenderer{ + before: before, + after: after, + ctype: ctype, + } +} + +type primitiveRenderer struct { + NoWarningsRenderer + + before interface{} + after interface{} + ctype cty.Type +} + +func (renderer primitiveRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if renderer.ctype == cty.String { + return renderer.renderStringDiff(diff, indent, opts) + } + + beforeValue := renderPrimitiveValue(renderer.before, renderer.ctype, opts) + afterValue := renderPrimitiveValue(renderer.after, renderer.ctype, opts) + + switch diff.Action { + case plans.Create: + return fmt.Sprintf("%s%s", afterValue, forcesReplacement(diff.Replace, opts)) + case plans.Delete: + return fmt.Sprintf("%s%s%s", beforeValue, nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) + case plans.NoOp: + return fmt.Sprintf("%s%s", beforeValue, forcesReplacement(diff.Replace, opts)) + default: + return fmt.Sprintf("%s %s %s%s", beforeValue, opts.Colorize.Color("[yellow]->[reset]"), afterValue, forcesReplacement(diff.Replace, opts)) + } +} + +func renderPrimitiveValue(value interface{}, t cty.Type, opts computed.RenderHumanOpts) string { + if value == nil { + return opts.Colorize.Color("[dark_gray]null[reset]") + } + + switch { + case t == cty.Bool: + if value.(bool) { + return "true" + } + return "false" + case t == cty.Number: + bf := big.NewFloat(value.(float64)) + return bf.Text('f', -1) + default: + panic("unrecognized primitive type: " + t.FriendlyName()) + } +} + +func (renderer primitiveRenderer) renderStringDiff(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + + // We process multiline strings at the end of the switch statement. + var lines []string + + switch diff.Action { + case plans.Create, plans.NoOp: + str := evaluatePrimitiveString(renderer.after, opts) + + if str.Json != nil { + if diff.Action == plans.NoOp { + return renderer.renderStringDiffAsJson(diff, indent, opts, str, str) + } else { + return renderer.renderStringDiffAsJson(diff, indent, opts, evaluatedString{}, str) + } + } + + if !str.IsMultiline { + return fmt.Sprintf("%s%s", str.RenderSimple(), forcesReplacement(diff.Replace, opts)) + } + + // We are creating a single multiline string, so let's split by the new + // line character. While we are doing this, we are going to insert our + // indents and make sure each line is formatted correctly. + lines = strings.Split(strings.ReplaceAll(str.String, "\n", fmt.Sprintf("\n%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts))), "\n") + + // We now just need to do the same for the first entry in lines, because + // we split on the new line characters which won't have been at the + // beginning of the first line. + lines[0] = fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), lines[0]) + case plans.Delete: + str := evaluatePrimitiveString(renderer.before, opts) + if str.IsNull { + // We don't put the null suffix (-> null) here because the final + // render or null -> null would look silly. + return fmt.Sprintf("%s%s", str.RenderSimple(), forcesReplacement(diff.Replace, opts)) + } + + if str.Json != nil { + return renderer.renderStringDiffAsJson(diff, indent, opts, str, evaluatedString{}) + } + + if !str.IsMultiline { + return fmt.Sprintf("%s%s%s", str.RenderSimple(), nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) + } + + // We are creating a single multiline string, so let's split by the new + // line character. While we are doing this, we are going to insert our + // indents and make sure each line is formatted correctly. + lines = strings.Split(strings.ReplaceAll(str.String, "\n", fmt.Sprintf("\n%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts))), "\n") + + // We now just need to do the same for the first entry in lines, because + // we split on the new line characters which won't have been at the + // beginning of the first line. + lines[0] = fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), lines[0]) + default: + beforeString := evaluatePrimitiveString(renderer.before, opts) + afterString := evaluatePrimitiveString(renderer.after, opts) + + if beforeString.Json != nil && afterString.Json != nil { + return renderer.renderStringDiffAsJson(diff, indent, opts, beforeString, afterString) + } + + if beforeString.Json != nil || afterString.Json != nil { + // This means one of the strings is JSON and one isn't. We're going + // to be a little inefficient here, but we can just reuse another + // renderer for this so let's keep it simple. + return computed.NewDiff( + TypeChange( + computed.NewDiff(Primitive(renderer.before, nil, cty.String), plans.Delete, false), + computed.NewDiff(Primitive(nil, renderer.after, cty.String), plans.Create, false)), + diff.Action, + diff.Replace).RenderHuman(indent, opts) + } + + if !beforeString.IsMultiline && !afterString.IsMultiline { + return fmt.Sprintf("%s %s %s%s", beforeString.RenderSimple(), opts.Colorize.Color("[yellow]->[reset]"), afterString.RenderSimple(), forcesReplacement(diff.Replace, opts)) + } + + beforeLines := strings.Split(beforeString.String, "\n") + afterLines := strings.Split(afterString.String, "\n") + + processIndices := func(beforeIx, afterIx int) { + if beforeIx < 0 || beforeIx >= len(beforeLines) { + lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.Create, opts), afterLines[afterIx])) + return + } + + if afterIx < 0 || afterIx >= len(afterLines) { + lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.Delete, opts), beforeLines[beforeIx])) + return + } + + lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), beforeLines[beforeIx])) + } + isObjType := func(_ string) bool { + return false + } + + collections.ProcessSlice(beforeLines, afterLines, processIndices, isObjType) + } + + // We return early if we find non-multiline strings or JSON strings, so we + // know here that we just render the lines slice properly. + return fmt.Sprintf("<<-EOT%s\n%s\n%s%sEOT%s", + forcesReplacement(diff.Replace, opts), + strings.Join(lines, "\n"), + formatIndent(indent), + writeDiffActionSymbol(plans.NoOp, opts), + nullSuffix(diff.Action, opts)) +} + +func (renderer primitiveRenderer) renderStringDiffAsJson(diff computed.Diff, indent int, opts computed.RenderHumanOpts, before evaluatedString, after evaluatedString) string { + jsonDiff := RendererJsonOpts().Transform(structured.Change{ + BeforeExplicit: diff.Action != plans.Create, + AfterExplicit: diff.Action != plans.Delete, + Before: before.Json, + After: after.Json, + Unknown: false, + BeforeSensitive: false, + AfterSensitive: false, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }) + + action := diff.Action + + jsonOpts := opts.Clone() + jsonOpts.OverrideNullSuffix = true + + var whitespace, replace string + if jsonDiff.Action == plans.NoOp && diff.Action == plans.Update { + // Then this means we are rendering a whitespace only change. The JSON + // differ will have ignored the whitespace changes so that makes the + // diff we are about to print out very confusing without extra + // explanation. + if diff.Replace { + whitespace = " # whitespace changes force replacement" + } else { + whitespace = " # whitespace changes" + } + + // Because we'd be showing no changes otherwise: + jsonOpts.ShowUnchangedChildren = true + + // Whitespace changes should not appear as if edited. + action = plans.NoOp + } else { + // We only show the replace suffix if we didn't print something out + // about whitespace changes. + replace = forcesReplacement(diff.Replace, opts) + } + + renderedJsonDiff := jsonDiff.RenderHuman(indent+1, jsonOpts) + + if diff.Action == plans.Create || diff.Action == plans.Delete { + // We don't display the '+' or '-' symbols on the JSON diffs, we should + // still display the '~' for an update action though. + action = plans.NoOp + } + + if strings.Contains(renderedJsonDiff, "\n") { + return fmt.Sprintf("jsonencode(%s\n%s%s%s%s\n%s%s)%s", whitespace, formatIndent(indent+1), writeDiffActionSymbol(action, opts), renderedJsonDiff, replace, formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts)) + } + return fmt.Sprintf("jsonencode(%s)%s%s", renderedJsonDiff, whitespace, replace) +} diff --git a/internal/command/jsonformat/computed/renderers/renderer_test.go b/command/jsonformat/computed/renderers/renderer_test.go similarity index 99% rename from internal/command/jsonformat/computed/renderers/renderer_test.go rename to command/jsonformat/computed/renderers/renderer_test.go index ae49a0974f38..da2957cc248d 100644 --- a/internal/command/jsonformat/computed/renderers/renderer_test.go +++ b/command/jsonformat/computed/renderers/renderer_test.go @@ -4,13 +4,13 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed" "github.com/google/go-cmp/cmp" "github.com/mitchellh/colorstring" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/plans" ) func TestRenderers_Human(t *testing.T) { diff --git a/command/jsonformat/computed/renderers/sensitive.go b/command/jsonformat/computed/renderers/sensitive.go new file mode 100644 index 000000000000..8034274e2cd9 --- /dev/null +++ b/command/jsonformat/computed/renderers/sensitive.go @@ -0,0 +1,50 @@ +package renderers + +import ( + "fmt" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" +) + +var _ computed.DiffRenderer = (*sensitiveRenderer)(nil) + +func Sensitive(change computed.Diff, beforeSensitive, afterSensitive bool) computed.DiffRenderer { + return &sensitiveRenderer{ + inner: change, + beforeSensitive: beforeSensitive, + afterSensitive: afterSensitive, + } +} + +type sensitiveRenderer struct { + inner computed.Diff + + beforeSensitive bool + afterSensitive bool +} + +func (renderer sensitiveRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + return fmt.Sprintf("(sensitive value)%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) +} + +func (renderer sensitiveRenderer) WarningsHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) []string { + if (renderer.beforeSensitive == renderer.afterSensitive) || renderer.inner.Action == plans.Create || renderer.inner.Action == plans.Delete { + // Only display warnings for sensitive values if they are changing from + // being sensitive or to being sensitive and if they are not being + // destroyed or created. + return []string{} + } + + var warning string + if renderer.beforeSensitive { + warning = opts.Colorize.Color(fmt.Sprintf(" # [yellow]Warning[reset]: this attribute value will no longer be marked as sensitive\n%s # after applying this change.", formatIndent(indent))) + } else { + warning = opts.Colorize.Color(fmt.Sprintf(" # [yellow]Warning[reset]: this attribute value will be marked as sensitive and will not\n%s # display in UI output after applying this change.", formatIndent(indent))) + } + + if renderer.inner.Action == plans.NoOp { + return []string{fmt.Sprintf("%s The value is unchanged.", warning)} + } + return []string{warning} +} diff --git a/internal/command/jsonformat/computed/renderers/sensitive_block.go b/command/jsonformat/computed/renderers/sensitive_block.go similarity index 93% rename from internal/command/jsonformat/computed/renderers/sensitive_block.go rename to command/jsonformat/computed/renderers/sensitive_block.go index e5c129862f22..8e3d3f79f470 100644 --- a/internal/command/jsonformat/computed/renderers/sensitive_block.go +++ b/command/jsonformat/computed/renderers/sensitive_block.go @@ -3,8 +3,8 @@ package renderers import ( "fmt" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" ) func SensitiveBlock(diff computed.Diff, beforeSensitive, afterSensitive bool) computed.DiffRenderer { diff --git a/command/jsonformat/computed/renderers/set.go b/command/jsonformat/computed/renderers/set.go new file mode 100644 index 000000000000..1e459f4b2292 --- /dev/null +++ b/command/jsonformat/computed/renderers/set.go @@ -0,0 +1,72 @@ +package renderers + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" +) + +var _ computed.DiffRenderer = (*setRenderer)(nil) + +func Set(elements []computed.Diff) computed.DiffRenderer { + return &setRenderer{ + elements: elements, + } +} + +func NestedSet(elements []computed.Diff) computed.DiffRenderer { + return &setRenderer{ + elements: elements, + overrideForcesReplacement: true, + } +} + +type setRenderer struct { + NoWarningsRenderer + + elements []computed.Diff + + overrideForcesReplacement bool +} + +func (renderer setRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + // Sets are a bit finicky, nested sets don't render the forces replacement + // suffix themselves, but push it onto their children. So if we are + // overriding the forces replacement setting, we set it to true for children + // and false for ourselves. + displayForcesReplacementInSelf := diff.Replace && !renderer.overrideForcesReplacement + displayForcesReplacementInChildren := diff.Replace && renderer.overrideForcesReplacement + + if len(renderer.elements) == 0 { + return fmt.Sprintf("[]%s%s", nullSuffix(diff.Action, opts), forcesReplacement(displayForcesReplacementInSelf, opts)) + } + + elementOpts := opts.Clone() + elementOpts.OverrideNullSuffix = true + elementOpts.OverrideForcesReplacement = displayForcesReplacementInChildren + + unchangedElements := 0 + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("[%s\n", forcesReplacement(displayForcesReplacementInSelf, opts))) + for _, element := range renderer.elements { + if element.Action == plans.NoOp && !opts.ShowUnchangedChildren { + unchangedElements++ + continue + } + + for _, warning := range element.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), element.RenderHuman(indent+1, elementOpts))) + } + + if unchangedElements > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", unchangedElements, opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s]%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) + return buf.String() +} diff --git a/internal/command/jsonformat/computed/renderers/string.go b/command/jsonformat/computed/renderers/string.go similarity index 92% rename from internal/command/jsonformat/computed/renderers/string.go rename to command/jsonformat/computed/renderers/string.go index 7777a3eb4239..cec5fd9e9c5c 100644 --- a/internal/command/jsonformat/computed/renderers/string.go +++ b/command/jsonformat/computed/renderers/string.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed" ) type evaluatedString struct { diff --git a/command/jsonformat/computed/renderers/testing.go b/command/jsonformat/computed/renderers/testing.go new file mode 100644 index 000000000000..2c791f5971f6 --- /dev/null +++ b/command/jsonformat/computed/renderers/testing.go @@ -0,0 +1,318 @@ +package renderers + +import ( + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" +) + +type ValidateDiffFunction func(t *testing.T, diff computed.Diff) + +func validateDiff(t *testing.T, diff computed.Diff, expectedAction plans.Action, expectedReplace bool) { + if diff.Replace != expectedReplace || diff.Action != expectedAction { + t.Errorf("\nreplace:\n\texpected:%t\n\tactual:%t\naction:\n\texpected:%s\n\tactual:%s", expectedReplace, diff.Replace, expectedAction, diff.Action) + } +} + +func ValidatePrimitive(before, after interface{}, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + primitive, ok := diff.Renderer.(*primitiveRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + beforeDiff := cmp.Diff(primitive.before, before) + afterDiff := cmp.Diff(primitive.after, after) + + if len(beforeDiff) > 0 || len(afterDiff) > 0 { + t.Errorf("before diff: (%s), after diff: (%s)", beforeDiff, afterDiff) + } + } +} + +func ValidateObject(attributes map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + object, ok := diff.Renderer.(*objectRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if !object.overrideNullSuffix { + t.Errorf("created the wrong type of object renderer") + } + + validateMapType(t, object.attributes, attributes) + } +} + +func ValidateNestedObject(attributes map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + object, ok := diff.Renderer.(*objectRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if object.overrideNullSuffix { + t.Errorf("created the wrong type of object renderer") + } + + validateMapType(t, object.attributes, attributes) + } +} + +func ValidateMap(elements map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + m, ok := diff.Renderer.(*mapRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + validateMapType(t, m.elements, elements) + } +} + +func validateMapType(t *testing.T, actual map[string]computed.Diff, expected map[string]ValidateDiffFunction) { + validateKeys(t, actual, expected) + + for key, expected := range expected { + if actual, ok := actual[key]; ok { + expected(t, actual) + } + } +} + +func validateKeys[C, V any](t *testing.T, actual map[string]C, expected map[string]V) { + if len(actual) != len(expected) { + + var actualAttributes []string + var expectedAttributes []string + + for key := range actual { + actualAttributes = append(actualAttributes, key) + } + for key := range expected { + expectedAttributes = append(expectedAttributes, key) + } + + sort.Strings(actualAttributes) + sort.Strings(expectedAttributes) + + if diff := cmp.Diff(actualAttributes, expectedAttributes); len(diff) > 0 { + t.Errorf("actual and expected attributes did not match: %s", diff) + } + } +} + +func ValidateList(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + list, ok := diff.Renderer.(*listRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if !list.displayContext { + t.Errorf("created the wrong type of list renderer") + } + + validateSliceType(t, list.elements, elements) + } +} + +func ValidateNestedList(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + list, ok := diff.Renderer.(*listRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if list.displayContext { + t.Errorf("created the wrong type of list renderer") + } + + validateSliceType(t, list.elements, elements) + } +} + +func ValidateSet(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + set, ok := diff.Renderer.(*setRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + validateSliceType(t, set.elements, elements) + } +} + +func validateSliceType(t *testing.T, actual []computed.Diff, expected []ValidateDiffFunction) { + if len(actual) != len(expected) { + t.Errorf("expected %d elements but found %d elements", len(expected), len(actual)) + return + } + + for ix := 0; ix < len(expected); ix++ { + expected[ix](t, actual[ix]) + } +} + +func ValidateBlock( + attributes map[string]ValidateDiffFunction, + singleBlocks map[string]ValidateDiffFunction, + listBlocks map[string][]ValidateDiffFunction, + mapBlocks map[string]map[string]ValidateDiffFunction, + setBlocks map[string][]ValidateDiffFunction, + action plans.Action, + replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + block, ok := diff.Renderer.(*blockRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + validateKeys(t, block.attributes, attributes) + validateKeys(t, block.blocks.SingleBlocks, singleBlocks) + validateKeys(t, block.blocks.ListBlocks, listBlocks) + validateKeys(t, block.blocks.MapBlocks, mapBlocks) + validateKeys(t, block.blocks.SetBlocks, setBlocks) + + for key, expected := range attributes { + if actual, ok := block.attributes[key]; ok { + expected(t, actual) + } + } + + for key, expected := range singleBlocks { + expected(t, block.blocks.SingleBlocks[key]) + } + + for key, expected := range listBlocks { + if actual, ok := block.blocks.ListBlocks[key]; ok { + if len(actual) != len(expected) { + t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) + } + for ix := range expected { + expected[ix](t, actual[ix]) + } + } + } + + for key, expected := range setBlocks { + if actual, ok := block.blocks.SetBlocks[key]; ok { + if len(actual) != len(expected) { + t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) + } + for ix := range expected { + expected[ix](t, actual[ix]) + } + } + } + + for key, expected := range setBlocks { + if actual, ok := block.blocks.SetBlocks[key]; ok { + if len(actual) != len(expected) { + t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) + } + for ix := range expected { + expected[ix](t, actual[ix]) + } + } + } + + for key, expected := range mapBlocks { + if actual, ok := block.blocks.MapBlocks[key]; ok { + if len(actual) != len(expected) { + t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) + } + for dKey := range expected { + expected[dKey](t, actual[dKey]) + } + } + } + } +} + +func ValidateTypeChange(before, after ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + typeChange, ok := diff.Renderer.(*typeChangeRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + before(t, typeChange.before) + after(t, typeChange.after) + } +} + +func ValidateSensitive(inner ValidateDiffFunction, beforeSensitive, afterSensitive bool, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + sensitive, ok := diff.Renderer.(*sensitiveRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if beforeSensitive != sensitive.beforeSensitive || afterSensitive != sensitive.afterSensitive { + t.Errorf("before or after sensitive values don't match:\n\texpected; before: %t after: %t\n\tactual; before: %t, after: %t", beforeSensitive, afterSensitive, sensitive.beforeSensitive, sensitive.afterSensitive) + } + + inner(t, sensitive.inner) + } +} + +func ValidateUnknown(before ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + unknown, ok := diff.Renderer.(*unknownRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if before == nil { + if unknown.before.Renderer != nil { + t.Errorf("did not expect a before renderer, but found one") + } + return + } + + if unknown.before.Renderer == nil { + t.Errorf("expected a before renderer, but found none") + } + + before(t, unknown.before) + } +} diff --git a/internal/command/jsonformat/computed/renderers/type_change.go b/command/jsonformat/computed/renderers/type_change.go similarity index 90% rename from internal/command/jsonformat/computed/renderers/type_change.go rename to command/jsonformat/computed/renderers/type_change.go index 7469b51be6ac..d8d823d6f44f 100644 --- a/internal/command/jsonformat/computed/renderers/type_change.go +++ b/command/jsonformat/computed/renderers/type_change.go @@ -3,7 +3,7 @@ package renderers import ( "fmt" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed" ) var _ computed.DiffRenderer = (*typeChangeRenderer)(nil) diff --git a/command/jsonformat/computed/renderers/unknown.go b/command/jsonformat/computed/renderers/unknown.go new file mode 100644 index 000000000000..51ea7cac7db1 --- /dev/null +++ b/command/jsonformat/computed/renderers/unknown.go @@ -0,0 +1,33 @@ +package renderers + +import ( + "fmt" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + + "github.com/hashicorp/terraform/plans" +) + +var _ computed.DiffRenderer = (*unknownRenderer)(nil) + +func Unknown(before computed.Diff) computed.DiffRenderer { + return &unknownRenderer{ + before: before, + } +} + +type unknownRenderer struct { + NoWarningsRenderer + + before computed.Diff +} + +func (renderer unknownRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if diff.Action == plans.Create { + return fmt.Sprintf("(known after apply)%s", forcesReplacement(diff.Replace, opts)) + } + + // Never render null suffix for children of unknown changes. + opts.OverrideNullSuffix = true + return fmt.Sprintf("%s -> (known after apply)%s", renderer.before.RenderHuman(indent, opts), forcesReplacement(diff.Replace, opts)) +} diff --git a/command/jsonformat/computed/renderers/util.go b/command/jsonformat/computed/renderers/util.go new file mode 100644 index 000000000000..5381d3f0b4bf --- /dev/null +++ b/command/jsonformat/computed/renderers/util.go @@ -0,0 +1,90 @@ +package renderers + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/command/format" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" +) + +// NoWarningsRenderer defines a Warnings function that returns an empty list of +// warnings. This can be used by other renderers to ensure we don't see lots of +// repeats of this empty function. +type NoWarningsRenderer struct{} + +// WarningsHuman returns an empty slice, as the name NoWarningsRenderer suggests. +func (render NoWarningsRenderer) WarningsHuman(_ computed.Diff, _ int, _ computed.RenderHumanOpts) []string { + return nil +} + +// nullSuffix returns the `-> null` suffix if the change is a delete action, and +// it has not been overridden. +func nullSuffix(action plans.Action, opts computed.RenderHumanOpts) string { + if !opts.OverrideNullSuffix && action == plans.Delete { + return opts.Colorize.Color(" [dark_gray]-> null[reset]") + } + return "" +} + +// forcesReplacement returns the `# forces replacement` suffix if this change is +// driving the entire resource to be replaced. +func forcesReplacement(replace bool, opts computed.RenderHumanOpts) string { + if replace || opts.OverrideForcesReplacement { + return opts.Colorize.Color(" [red]# forces replacement[reset]") + } + return "" +} + +// indent returns whitespace that is the required length for the specified +// indent. +func formatIndent(indent int) string { + return strings.Repeat(" ", indent) +} + +// unchanged prints out a description saying how many of 'keyword' have been +// hidden because they are unchanged or noop actions. +func unchanged(keyword string, count int, opts computed.RenderHumanOpts) string { + if count == 1 { + return opts.Colorize.Color(fmt.Sprintf("[dark_gray]# (%d unchanged %s hidden)[reset]", count, keyword)) + } + return opts.Colorize.Color(fmt.Sprintf("[dark_gray]# (%d unchanged %ss hidden)[reset]", count, keyword)) +} + +// EnsureValidAttributeName checks if `name` contains any HCL syntax and calls +// and returns hclEscapeString. +func EnsureValidAttributeName(name string) string { + if !hclsyntax.ValidIdentifier(name) { + return hclEscapeString(name) + } + return name +} + +// hclEscapeString formats the input string into a format that is safe for +// rendering within HCL. +// +// Note, this function doesn't actually do a very good job of this currently. We +// need to expose some internal functions from HCL in a future version and call +// them from here. For now, just use "%q" formatting. +func hclEscapeString(str string) string { + // TODO: Replace this with more complete HCL logic instead of the simple + // go workaround. + return fmt.Sprintf("%q", str) +} + +// writeDiffActionSymbol writes out the symbols for the associated action, and +// handles localized colorization of the symbol as well as indenting the symbol +// to be 4 spaces wide. +// +// If the opts has HideDiffActionSymbols set then this function returns an empty +// string. +func writeDiffActionSymbol(action plans.Action, opts computed.RenderHumanOpts) string { + if opts.HideDiffActionSymbols { + return "" + } + return fmt.Sprintf("%s ", opts.Colorize.Color(format.DiffActionSymbol(action))) +} diff --git a/command/jsonformat/diff.go b/command/jsonformat/diff.go new file mode 100644 index 000000000000..007bd98a5599 --- /dev/null +++ b/command/jsonformat/diff.go @@ -0,0 +1,99 @@ +package jsonformat + +import ( + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/differ" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" + "github.com/hashicorp/terraform/command/jsonplan" + "github.com/hashicorp/terraform/plans" +) + +func precomputeDiffs(plan Plan, mode plans.Mode) diffs { + diffs := diffs{ + outputs: make(map[string]computed.Diff), + } + + for _, drift := range plan.ResourceDrift { + + var relevantAttrs attribute_path.Matcher + if mode == plans.RefreshOnlyMode { + // For a refresh only plan, we show all the drift. + relevantAttrs = attribute_path.AlwaysMatcher() + } else { + matcher := attribute_path.Empty(true) + + // Otherwise we only want to show the drift changes that are + // relevant. + for _, attr := range plan.RelevantAttributes { + if len(attr.Resource) == 0 || attr.Resource == drift.Address { + matcher = attribute_path.AppendSingle(matcher, attr.Attr) + } + } + + if len(matcher.Paths) > 0 { + relevantAttrs = matcher + } + } + + if relevantAttrs == nil { + // If we couldn't build a relevant attribute matcher, then we are + // not going to show anything for this drift. + continue + } + + schema := plan.getSchema(drift) + change := structured.FromJsonChange(drift.Change, relevantAttrs) + diffs.drift = append(diffs.drift, diff{ + change: drift, + diff: differ.ComputeDiffForBlock(change, schema.Block), + }) + } + + for _, change := range plan.ResourceChanges { + schema := plan.getSchema(change) + structuredChange := structured.FromJsonChange(change.Change, attribute_path.AlwaysMatcher()) + diffs.changes = append(diffs.changes, diff{ + change: change, + diff: differ.ComputeDiffForBlock(structuredChange, schema.Block), + }) + } + + for key, output := range plan.OutputChanges { + change := structured.FromJsonChange(output, attribute_path.AlwaysMatcher()) + diffs.outputs[key] = differ.ComputeDiffForOutput(change) + } + + return diffs +} + +type diffs struct { + drift []diff + changes []diff + outputs map[string]computed.Diff +} + +func (d diffs) Empty() bool { + for _, change := range d.changes { + if change.diff.Action != plans.NoOp || change.Moved() { + return false + } + } + + for _, output := range d.outputs { + if output.Action != plans.NoOp { + return false + } + } + + return true +} + +type diff struct { + change jsonplan.ResourceChange + diff computed.Diff +} + +func (d diff) Moved() bool { + return len(d.change.PreviousAddress) > 0 && d.change.PreviousAddress != d.change.Address +} diff --git a/command/jsonformat/differ/attribute.go b/command/jsonformat/differ/attribute.go new file mode 100644 index 000000000000..854699a328cd --- /dev/null +++ b/command/jsonformat/differ/attribute.go @@ -0,0 +1,84 @@ +package differ + +import ( + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + + "github.com/hashicorp/terraform/command/jsonprovider" +) + +func ComputeDiffForAttribute(change structured.Change, attribute *jsonprovider.Attribute) computed.Diff { + if attribute.AttributeNestedType != nil { + return computeDiffForNestedAttribute(change, attribute.AttributeNestedType) + } + return ComputeDiffForType(change, unmarshalAttribute(attribute)) +} + +func computeDiffForNestedAttribute(change structured.Change, nested *jsonprovider.NestedType) computed.Diff { + if sensitive, ok := checkForSensitiveNestedAttribute(change, nested); ok { + return sensitive + } + + if computed, ok := checkForUnknownNestedAttribute(change, nested); ok { + return computed + } + + switch NestingMode(nested.NestingMode) { + case nestingModeSingle, nestingModeGroup: + return computeAttributeDiffAsNestedObject(change, nested.Attributes) + case nestingModeMap: + return computeAttributeDiffAsNestedMap(change, nested.Attributes) + case nestingModeList: + return computeAttributeDiffAsNestedList(change, nested.Attributes) + case nestingModeSet: + return computeAttributeDiffAsNestedSet(change, nested.Attributes) + default: + panic("unrecognized nesting mode: " + nested.NestingMode) + } +} + +func ComputeDiffForType(change structured.Change, ctype cty.Type) computed.Diff { + if sensitive, ok := checkForSensitiveType(change, ctype); ok { + return sensitive + } + + if computed, ok := checkForUnknownType(change, ctype); ok { + return computed + } + + switch { + case ctype == cty.NilType, ctype == cty.DynamicPseudoType: + // Forward nil or dynamic types over to be processed as outputs. + // There is nothing particularly special about the way outputs are + // processed that make this unsafe, we could just as easily call this + // function computeChangeForDynamicValues(), but external callers will + // only be in this situation when processing outputs so this function + // is named for their benefit. + return ComputeDiffForOutput(change) + case ctype.IsPrimitiveType(): + return computeAttributeDiffAsPrimitive(change, ctype) + case ctype.IsObjectType(): + return computeAttributeDiffAsObject(change, ctype.AttributeTypes()) + case ctype.IsMapType(): + return computeAttributeDiffAsMap(change, ctype.ElementType()) + case ctype.IsListType(): + return computeAttributeDiffAsList(change, ctype.ElementType()) + case ctype.IsTupleType(): + return computeAttributeDiffAsTuple(change, ctype.TupleElementTypes()) + case ctype.IsSetType(): + return computeAttributeDiffAsSet(change, ctype.ElementType()) + default: + panic("unrecognized type: " + ctype.FriendlyName()) + } +} + +func unmarshalAttribute(attribute *jsonprovider.Attribute) cty.Type { + ctyType, err := ctyjson.UnmarshalType(attribute.AttributeType) + if err != nil { + panic("could not unmarshal attribute type: " + err.Error()) + } + return ctyType +} diff --git a/command/jsonformat/differ/block.go b/command/jsonformat/differ/block.go new file mode 100644 index 000000000000..5aad75d03496 --- /dev/null +++ b/command/jsonformat/differ/block.go @@ -0,0 +1,118 @@ +package differ + +import ( + "github.com/hashicorp/terraform/command/jsonformat/collections" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/plans" +) + +func ComputeDiffForBlock(change structured.Change, block *jsonprovider.Block) computed.Diff { + if sensitive, ok := checkForSensitiveBlock(change, block); ok { + return sensitive + } + + if unknown, ok := checkForUnknownBlock(change, block); ok { + return unknown + } + + current := change.GetDefaultActionForIteration() + + blockValue := change.AsMap() + + attributes := make(map[string]computed.Diff) + for key, attr := range block.Attributes { + childValue := blockValue.GetChild(key) + + if !childValue.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + childValue = childValue.AsNoOp() + } + + // Empty strings in blocks should be considered null for legacy reasons. + // The SDK doesn't support null strings yet, so we work around this now. + if before, ok := childValue.Before.(string); ok && len(before) == 0 { + childValue.Before = nil + } + if after, ok := childValue.After.(string); ok && len(after) == 0 { + childValue.After = nil + } + + // Always treat changes to blocks as implicit. + childValue.BeforeExplicit = false + childValue.AfterExplicit = false + + childChange := ComputeDiffForAttribute(childValue, attr) + if childChange.Action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values at all in blocks. + continue + } + + attributes[key] = childChange + current = collections.CompareActions(current, childChange.Action) + } + + blocks := renderers.Blocks{ + ReplaceBlocks: make(map[string]bool), + BeforeSensitiveBlocks: make(map[string]bool), + AfterSensitiveBlocks: make(map[string]bool), + SingleBlocks: make(map[string]computed.Diff), + ListBlocks: make(map[string][]computed.Diff), + SetBlocks: make(map[string][]computed.Diff), + MapBlocks: make(map[string]map[string]computed.Diff), + } + + for key, blockType := range block.BlockTypes { + childValue := blockValue.GetChild(key) + + if !childValue.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + childValue = childValue.AsNoOp() + } + + beforeSensitive := childValue.IsBeforeSensitive() + afterSensitive := childValue.IsAfterSensitive() + forcesReplacement := childValue.ReplacePaths.Matches() + + switch NestingMode(blockType.NestingMode) { + case nestingModeSet: + diffs, action := computeBlockDiffsAsSet(childValue, blockType.Block) + if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values in blocks. + continue + } + blocks.AddAllSetBlock(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) + current = collections.CompareActions(current, action) + case nestingModeList: + diffs, action := computeBlockDiffsAsList(childValue, blockType.Block) + if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values in blocks. + continue + } + blocks.AddAllListBlock(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) + current = collections.CompareActions(current, action) + case nestingModeMap: + diffs, action := computeBlockDiffsAsMap(childValue, blockType.Block) + if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values in blocks. + continue + } + blocks.AddAllMapBlocks(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) + current = collections.CompareActions(current, action) + case nestingModeSingle, nestingModeGroup: + diff := ComputeDiffForBlock(childValue, blockType.Block) + if diff.Action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values in blocks. + continue + } + blocks.AddSingleBlock(key, diff, forcesReplacement, beforeSensitive, afterSensitive) + current = collections.CompareActions(current, diff.Action) + default: + panic("unrecognized nesting mode: " + blockType.NestingMode) + } + } + + return computed.NewDiff(renderers.Block(attributes, blocks), current, change.ReplacePaths.Matches()) +} diff --git a/command/jsonformat/differ/differ.go b/command/jsonformat/differ/differ.go new file mode 100644 index 000000000000..e5d7a6b08fe9 --- /dev/null +++ b/command/jsonformat/differ/differ.go @@ -0,0 +1,12 @@ +package differ + +import ( + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/structured" +) + +// asDiff is a helper function to abstract away some simple and common +// functionality when converting a renderer into a concrete diff. +func asDiff(change structured.Change, renderer computed.DiffRenderer) computed.Diff { + return computed.NewDiff(renderer, change.CalculateAction(), change.ReplacePaths.Matches()) +} diff --git a/internal/command/jsonformat/differ/differ_test.go b/command/jsonformat/differ/differ_test.go similarity index 99% rename from internal/command/jsonformat/differ/differ_test.go rename to command/jsonformat/differ/differ_test.go index 817336ef352d..77d9bdd4f5e2 100644 --- a/internal/command/jsonformat/differ/differ_test.go +++ b/command/jsonformat/differ/differ_test.go @@ -8,11 +8,11 @@ import ( "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/plans" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/plans" ) type SetDiff struct { diff --git a/command/jsonformat/differ/list.go b/command/jsonformat/differ/list.go new file mode 100644 index 000000000000..aa8773a2eb26 --- /dev/null +++ b/command/jsonformat/differ/list.go @@ -0,0 +1,87 @@ +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/collections" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/plans" +) + +func computeAttributeDiffAsList(change structured.Change, elementType cty.Type) computed.Diff { + sliceValue := change.AsSlice() + + processIndices := func(beforeIx, afterIx int) computed.Diff { + value := sliceValue.GetChild(beforeIx, afterIx) + + // It's actually really difficult to render the diffs when some indices + // within a slice are relevant and others aren't. To make this simpler + // we just treat all children of a relevant list or set as also + // relevant. + // + // Interestingly the terraform plan builder also agrees with this, and + // never sets relevant attributes beneath lists or sets. We're just + // going to enforce this logic here as well. If the collection is + // relevant (decided elsewhere), then every element in the collection is + // also relevant. To be clear, in practice even if we didn't do the + // following explicitly the effect would be the same. It's just nicer + // for us to be clear about the behaviour we expect. + // + // What makes this difficult is the fact that the beforeIx and afterIx + // can be different, and it's quite difficult to work out which one is + // the relevant one. For nested lists, block lists, and tuples it's much + // easier because we always process the same indices in the before and + // after. + value.RelevantAttributes = attribute_path.AlwaysMatcher() + + return ComputeDiffForType(value, elementType) + } + + isObjType := func(_ interface{}) bool { + return elementType.IsObjectType() + } + + elements, current := collections.TransformSlice(sliceValue.Before, sliceValue.After, processIndices, isObjType) + return computed.NewDiff(renderers.List(elements), current, change.ReplacePaths.Matches()) +} + +func computeAttributeDiffAsNestedList(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processNestedList(change, func(value structured.Change) { + element := computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ + Attributes: attributes, + NestingMode: "single", + }) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return computed.NewDiff(renderers.NestedList(elements), current, change.ReplacePaths.Matches()) +} + +func computeBlockDiffsAsList(change structured.Change, block *jsonprovider.Block) ([]computed.Diff, plans.Action) { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processNestedList(change, func(value structured.Change) { + element := ComputeDiffForBlock(value, block) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return elements, current +} + +func processNestedList(change structured.Change, process func(value structured.Change)) { + sliceValue := change.AsSlice() + for ix := 0; ix < len(sliceValue.Before) || ix < len(sliceValue.After); ix++ { + value := sliceValue.GetChild(ix, ix) + if !value.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + value = value.AsNoOp() + } + process(value) + } +} diff --git a/command/jsonformat/differ/map.go b/command/jsonformat/differ/map.go new file mode 100644 index 000000000000..fd2d5a9c55be --- /dev/null +++ b/command/jsonformat/differ/map.go @@ -0,0 +1,53 @@ +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/collections" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/plans" +) + +func computeAttributeDiffAsMap(change structured.Change, elementType cty.Type) computed.Diff { + mapValue := change.AsMap() + elements, current := collections.TransformMap(mapValue.Before, mapValue.After, mapValue.AllKeys(), func(key string) computed.Diff { + value := mapValue.GetChild(key) + if !value.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + value = value.AsNoOp() + } + return ComputeDiffForType(value, elementType) + }) + return computed.NewDiff(renderers.Map(elements), current, change.ReplacePaths.Matches()) +} + +func computeAttributeDiffAsNestedMap(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { + mapValue := change.AsMap() + elements, current := collections.TransformMap(mapValue.Before, mapValue.After, mapValue.ExplicitKeys(), func(key string) computed.Diff { + value := mapValue.GetChild(key) + if !value.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + value = value.AsNoOp() + } + return computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ + Attributes: attributes, + NestingMode: "single", + }) + }) + return computed.NewDiff(renderers.NestedMap(elements), current, change.ReplacePaths.Matches()) +} + +func computeBlockDiffsAsMap(change structured.Change, block *jsonprovider.Block) (map[string]computed.Diff, plans.Action) { + mapValue := change.AsMap() + return collections.TransformMap(mapValue.Before, mapValue.After, mapValue.ExplicitKeys(), func(key string) computed.Diff { + value := mapValue.GetChild(key) + if !value.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + value = value.AsNoOp() + } + return ComputeDiffForBlock(value, block) + }) +} diff --git a/command/jsonformat/differ/object.go b/command/jsonformat/differ/object.go new file mode 100644 index 000000000000..6b6c77899ff0 --- /dev/null +++ b/command/jsonformat/differ/object.go @@ -0,0 +1,67 @@ +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/collections" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/plans" +) + +func computeAttributeDiffAsObject(change structured.Change, attributes map[string]cty.Type) computed.Diff { + attributeDiffs, action := processObject(change, attributes, func(value structured.Change, ctype cty.Type) computed.Diff { + return ComputeDiffForType(value, ctype) + }) + return computed.NewDiff(renderers.Object(attributeDiffs), action, change.ReplacePaths.Matches()) +} + +func computeAttributeDiffAsNestedObject(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { + attributeDiffs, action := processObject(change, attributes, func(value structured.Change, attribute *jsonprovider.Attribute) computed.Diff { + return ComputeDiffForAttribute(value, attribute) + }) + return computed.NewDiff(renderers.NestedObject(attributeDiffs), action, change.ReplacePaths.Matches()) +} + +// processObject steps through the children of value as if it is an object and +// calls out to the provided computeDiff function once it has collated the +// diffs for each child attribute. +// +// We have to make this generic as attributes and nested objects process either +// cty.Type or jsonprovider.Attribute children respectively. And we want to +// reuse as much code as possible. +// +// Also, as it generic we cannot make this function a method on Change as you +// can't create generic methods on structs. Instead, we make this a generic +// function that receives the value as an argument. +func processObject[T any](v structured.Change, attributes map[string]T, computeDiff func(structured.Change, T) computed.Diff) (map[string]computed.Diff, plans.Action) { + attributeDiffs := make(map[string]computed.Diff) + mapValue := v.AsMap() + + currentAction := v.GetDefaultActionForIteration() + for key, attribute := range attributes { + attributeValue := mapValue.GetChild(key) + + if !attributeValue.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + attributeValue = attributeValue.AsNoOp() + } + + // We always assume changes to object are implicit. + attributeValue.BeforeExplicit = false + attributeValue.AfterExplicit = false + + attributeDiff := computeDiff(attributeValue, attribute) + if attributeDiff.Action == plans.NoOp && attributeValue.Before == nil && attributeValue.After == nil { + // We skip attributes of objects that are null both before and + // after. We don't even count these as unchanged attributes. + continue + } + attributeDiffs[key] = attributeDiff + currentAction = collections.CompareActions(currentAction, attributeDiff.Action) + } + + return attributeDiffs, currentAction +} diff --git a/command/jsonformat/differ/output.go b/command/jsonformat/differ/output.go new file mode 100644 index 000000000000..22b1441f0942 --- /dev/null +++ b/command/jsonformat/differ/output.go @@ -0,0 +1,22 @@ +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" +) + +func ComputeDiffForOutput(change structured.Change) computed.Diff { + if sensitive, ok := checkForSensitiveType(change, cty.DynamicPseudoType); ok { + return sensitive + } + + if unknown, ok := checkForUnknownType(change, cty.DynamicPseudoType); ok { + return unknown + } + + jsonOpts := renderers.RendererJsonOpts() + return jsonOpts.Transform(change) +} diff --git a/command/jsonformat/differ/primitive.go b/command/jsonformat/differ/primitive.go new file mode 100644 index 000000000000..0532bb088a78 --- /dev/null +++ b/command/jsonformat/differ/primitive.go @@ -0,0 +1,13 @@ +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" +) + +func computeAttributeDiffAsPrimitive(change structured.Change, ctype cty.Type) computed.Diff { + return asDiff(change, renderers.Primitive(change.Before, change.After, ctype)) +} diff --git a/command/jsonformat/differ/sensitive.go b/command/jsonformat/differ/sensitive.go new file mode 100644 index 000000000000..ab66ea9c9fc9 --- /dev/null +++ b/command/jsonformat/differ/sensitive.go @@ -0,0 +1,43 @@ +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/plans" +) + +type CreateSensitiveRenderer func(computed.Diff, bool, bool) computed.DiffRenderer + +func checkForSensitiveType(change structured.Change, ctype cty.Type) (computed.Diff, bool) { + return change.CheckForSensitive( + func(value structured.Change) computed.Diff { + return ComputeDiffForType(value, ctype) + }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { + return computed.NewDiff(renderers.Sensitive(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) + }, + ) +} + +func checkForSensitiveNestedAttribute(change structured.Change, attribute *jsonprovider.NestedType) (computed.Diff, bool) { + return change.CheckForSensitive( + func(value structured.Change) computed.Diff { + return computeDiffForNestedAttribute(value, attribute) + }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { + return computed.NewDiff(renderers.Sensitive(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) + }, + ) +} + +func checkForSensitiveBlock(change structured.Change, block *jsonprovider.Block) (computed.Diff, bool) { + return change.CheckForSensitive( + func(value structured.Change) computed.Diff { + return ComputeDiffForBlock(value, block) + }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { + return computed.NewDiff(renderers.SensitiveBlock(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) + }, + ) +} diff --git a/command/jsonformat/differ/set.go b/command/jsonformat/differ/set.go new file mode 100644 index 000000000000..39ea6cbde84f --- /dev/null +++ b/command/jsonformat/differ/set.go @@ -0,0 +1,132 @@ +package differ + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/collections" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/plans" +) + +func computeAttributeDiffAsSet(change structured.Change, elementType cty.Type) computed.Diff { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processSet(change, func(value structured.Change) { + element := ComputeDiffForType(value, elementType) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return computed.NewDiff(renderers.Set(elements), current, change.ReplacePaths.Matches()) +} + +func computeAttributeDiffAsNestedSet(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processSet(change, func(value structured.Change) { + element := computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ + Attributes: attributes, + NestingMode: "single", + }) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return computed.NewDiff(renderers.NestedSet(elements), current, change.ReplacePaths.Matches()) +} + +func computeBlockDiffsAsSet(change structured.Change, block *jsonprovider.Block) ([]computed.Diff, plans.Action) { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processSet(change, func(value structured.Change) { + element := ComputeDiffForBlock(value, block) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return elements, current +} + +func processSet(change structured.Change, process func(value structured.Change)) { + sliceValue := change.AsSlice() + + foundInBefore := make(map[int]int) + foundInAfter := make(map[int]int) + + // O(n^2) operation here to find matching pairs in the set, so we can make + // the display look pretty. There might be a better way to do this, so look + // here for potential optimisations. + + for ix := 0; ix < len(sliceValue.Before); ix++ { + matched := false + for jx := 0; jx < len(sliceValue.After); jx++ { + if _, ok := foundInAfter[jx]; ok { + // We've already found a match for this after value. + continue + } + + child := sliceValue.GetChild(ix, jx) + if reflect.DeepEqual(child.Before, child.After) && child.IsBeforeSensitive() == child.IsAfterSensitive() && !child.IsUnknown() { + matched = true + foundInBefore[ix] = jx + foundInAfter[jx] = ix + } + } + + if !matched { + foundInBefore[ix] = -1 + } + } + + clearRelevantStatus := func(change structured.Change) structured.Change { + // It's actually really difficult to render the diffs when some indices + // within a slice are relevant and others aren't. To make this simpler + // we just treat all children of a relevant list or set as also + // relevant. + // + // Interestingly the terraform plan builder also agrees with this, and + // never sets relevant attributes beneath lists or sets. We're just + // going to enforce this logic here as well. If the collection is + // relevant (decided elsewhere), then every element in the collection is + // also relevant. To be clear, in practice even if we didn't do the + // following explicitly the effect would be the same. It's just nicer + // for us to be clear about the behaviour we expect. + // + // What makes this difficult is the fact that the beforeIx and afterIx + // can be different, and it's quite difficult to work out which one is + // the relevant one. For nested lists, block lists, and tuples it's much + // easier because we always process the same indices in the before and + // after. + change.RelevantAttributes = attribute_path.AlwaysMatcher() + return change + } + + // Now everything in before should be a key in foundInBefore and a value + // in foundInAfter. If a key is mapped to -1 in foundInBefore it means it + // does not have an equivalent in foundInAfter and so has been deleted. + // Everything in foundInAfter has a matching value in foundInBefore, but + // some values in after may not be in foundInAfter. This means these values + // are newly created. + + for ix := 0; ix < len(sliceValue.Before); ix++ { + if jx := foundInBefore[ix]; jx >= 0 { + child := clearRelevantStatus(sliceValue.GetChild(ix, jx)) + process(child) + continue + } + child := clearRelevantStatus(sliceValue.GetChild(ix, len(sliceValue.After))) + process(child) + } + + for jx := 0; jx < len(sliceValue.After); jx++ { + if _, ok := foundInAfter[jx]; ok { + // Then this value was handled in the previous for loop. + continue + } + child := clearRelevantStatus(sliceValue.GetChild(len(sliceValue.Before), jx)) + process(child) + } +} diff --git a/command/jsonformat/differ/tuple.go b/command/jsonformat/differ/tuple.go new file mode 100644 index 000000000000..57fb7b46b70d --- /dev/null +++ b/command/jsonformat/differ/tuple.go @@ -0,0 +1,27 @@ +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/collections" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" +) + +func computeAttributeDiffAsTuple(change structured.Change, elementTypes []cty.Type) computed.Diff { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + sliceValue := change.AsSlice() + for ix, elementType := range elementTypes { + childValue := sliceValue.GetChild(ix, ix) + if !childValue.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + childValue = childValue.AsNoOp() + } + element := ComputeDiffForType(childValue, elementType) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + } + return computed.NewDiff(renderers.List(elements), current, change.ReplacePaths.Matches()) +} diff --git a/internal/command/jsonformat/differ/types.go b/command/jsonformat/differ/types.go similarity index 100% rename from internal/command/jsonformat/differ/types.go rename to command/jsonformat/differ/types.go diff --git a/command/jsonformat/differ/unknown.go b/command/jsonformat/differ/unknown.go new file mode 100644 index 000000000000..690b024661b0 --- /dev/null +++ b/command/jsonformat/differ/unknown.go @@ -0,0 +1,63 @@ +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonprovider" +) + +func checkForUnknownType(change structured.Change, ctype cty.Type) (computed.Diff, bool) { + return change.CheckForUnknown( + false, + processUnknown, + createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { + return ComputeDiffForType(value, ctype) + })) +} + +func checkForUnknownNestedAttribute(change structured.Change, attribute *jsonprovider.NestedType) (computed.Diff, bool) { + + // We want our child attributes to show up as computed instead of deleted. + // Let's populate that here. + childUnknown := make(map[string]interface{}) + for key := range attribute.Attributes { + childUnknown[key] = true + } + + return change.CheckForUnknown( + childUnknown, + processUnknown, + createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { + return computeDiffForNestedAttribute(value, attribute) + })) +} + +func checkForUnknownBlock(change structured.Change, block *jsonprovider.Block) (computed.Diff, bool) { + + // We want our child attributes to show up as computed instead of deleted. + // Let's populate that here. + childUnknown := make(map[string]interface{}) + for key := range block.Attributes { + childUnknown[key] = true + } + + return change.CheckForUnknown( + childUnknown, + processUnknown, + createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { + return ComputeDiffForBlock(value, block) + })) +} + +func processUnknown(current structured.Change) computed.Diff { + return asDiff(current, renderers.Unknown(computed.Diff{})) +} + +func createProcessUnknownWithBefore(computeDiff func(value structured.Change) computed.Diff) structured.ProcessUnknownWithBefore { + return func(current structured.Change, before structured.Change) computed.Diff { + return asDiff(current, renderers.Unknown(computeDiff(before))) + } +} diff --git a/command/jsonformat/jsondiff/diff.go b/command/jsonformat/jsondiff/diff.go new file mode 100644 index 000000000000..0172483e6142 --- /dev/null +++ b/command/jsonformat/jsondiff/diff.go @@ -0,0 +1,148 @@ +package jsondiff + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/command/jsonformat/collections" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/plans" +) + +type TransformPrimitiveJson func(before, after interface{}, ctype cty.Type, action plans.Action) computed.Diff +type TransformObjectJson func(map[string]computed.Diff, plans.Action) computed.Diff +type TransformArrayJson func([]computed.Diff, plans.Action) computed.Diff +type TransformUnknownJson func(computed.Diff, plans.Action) computed.Diff +type TransformSensitiveJson func(computed.Diff, bool, bool, plans.Action) computed.Diff +type TransformTypeChangeJson func(before, after computed.Diff, action plans.Action) computed.Diff + +// JsonOpts defines the external callback functions that callers should +// implement to process the supplied diffs. +type JsonOpts struct { + Primitive TransformPrimitiveJson + Object TransformObjectJson + Array TransformArrayJson + Unknown TransformUnknownJson + Sensitive TransformSensitiveJson + TypeChange TransformTypeChangeJson +} + +// Transform accepts a generic before and after value that is assumed to be JSON +// formatted and transforms it into a computed.Diff, using the callbacks +// supplied in the JsonOpts class. +func (opts JsonOpts) Transform(change structured.Change) computed.Diff { + if sensitive, ok := opts.processSensitive(change); ok { + return sensitive + } + + if unknown, ok := opts.processUnknown(change); ok { + return unknown + } + + beforeType := GetType(change.Before) + afterType := GetType(change.After) + + deleted := afterType == Null && !change.AfterExplicit + created := beforeType == Null && !change.BeforeExplicit + + if beforeType == afterType || (created || deleted) { + targetType := beforeType + if targetType == Null { + targetType = afterType + } + return opts.processUpdate(change, targetType) + } + + b := opts.processUpdate(change.AsDelete(), beforeType) + a := opts.processUpdate(change.AsCreate(), afterType) + return opts.TypeChange(b, a, plans.Update) +} + +func (opts JsonOpts) processUpdate(change structured.Change, jtype Type) computed.Diff { + switch jtype { + case Null: + return opts.processPrimitive(change, cty.NilType) + case Bool: + return opts.processPrimitive(change, cty.Bool) + case String: + return opts.processPrimitive(change, cty.String) + case Number: + return opts.processPrimitive(change, cty.Number) + case Object: + return opts.processObject(change.AsMap()) + case Array: + return opts.processArray(change.AsSlice()) + default: + panic("unrecognized json type: " + jtype) + } +} + +func (opts JsonOpts) processPrimitive(change structured.Change, ctype cty.Type) computed.Diff { + beforeMissing := change.Before == nil && !change.BeforeExplicit + afterMissing := change.After == nil && !change.AfterExplicit + + var action plans.Action + switch { + case beforeMissing && !afterMissing: + action = plans.Create + case !beforeMissing && afterMissing: + action = plans.Delete + case reflect.DeepEqual(change.Before, change.After): + action = plans.NoOp + default: + action = plans.Update + } + + return opts.Primitive(change.Before, change.After, ctype, action) +} + +func (opts JsonOpts) processArray(change structured.ChangeSlice) computed.Diff { + processIndices := func(beforeIx, afterIx int) computed.Diff { + // It's actually really difficult to render the diffs when some indices + // within a list are relevant and others aren't. To make this simpler + // we just treat all children of a relevant list as also relevant, so we + // ignore the relevant attributes field. + // + // Interestingly the terraform plan builder also agrees with this, and + // never sets relevant attributes beneath lists or sets. We're just + // going to enforce this logic here as well. If the list is relevant + // (decided elsewhere), then every element in the list is also relevant. + return opts.Transform(change.GetChild(beforeIx, afterIx)) + } + + isObjType := func(value interface{}) bool { + return GetType(value) == Object + } + + return opts.Array(collections.TransformSlice(change.Before, change.After, processIndices, isObjType)) +} + +func (opts JsonOpts) processObject(change structured.ChangeMap) computed.Diff { + return opts.Object(collections.TransformMap(change.Before, change.After, change.AllKeys(), func(key string) computed.Diff { + child := change.GetChild(key) + if !child.RelevantAttributes.MatchesPartial() { + child = child.AsNoOp() + } + + return opts.Transform(child) + })) +} + +func (opts JsonOpts) processUnknown(change structured.Change) (computed.Diff, bool) { + return change.CheckForUnknown( + false, + func(current structured.Change) computed.Diff { + return opts.Unknown(computed.Diff{}, plans.Create) + }, func(current structured.Change, before structured.Change) computed.Diff { + return opts.Unknown(opts.Transform(before), plans.Update) + }, + ) +} + +func (opts JsonOpts) processSensitive(change structured.Change) (computed.Diff, bool) { + return change.CheckForSensitive(opts.Transform, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { + return opts.Sensitive(inner, beforeSensitive, afterSensitive, action) + }) +} diff --git a/internal/command/jsonformat/jsondiff/types.go b/command/jsonformat/jsondiff/types.go similarity index 100% rename from internal/command/jsonformat/jsondiff/types.go rename to command/jsonformat/jsondiff/types.go diff --git a/command/jsonformat/plan.go b/command/jsonformat/plan.go new file mode 100644 index 000000000000..e7499f84fc9a --- /dev/null +++ b/command/jsonformat/plan.go @@ -0,0 +1,481 @@ +package jsonformat + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/computed/renderers" + "github.com/hashicorp/terraform/command/jsonplan" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/plans" +) + +type PlanRendererOpt int + +const ( + detectedDrift string = "drift" + proposedChange string = "change" + + Errored PlanRendererOpt = iota + CanNotApply +) + +type Plan struct { + PlanFormatVersion string `json:"plan_format_version"` + OutputChanges map[string]jsonplan.Change `json:"output_changes"` + ResourceChanges []jsonplan.ResourceChange `json:"resource_changes"` + ResourceDrift []jsonplan.ResourceChange `json:"resource_drift"` + RelevantAttributes []jsonplan.ResourceAttr `json:"relevant_attributes"` + + ProviderFormatVersion string `json:"provider_format_version"` + ProviderSchemas map[string]*jsonprovider.Provider `json:"provider_schemas"` +} + +func (plan Plan) getSchema(change jsonplan.ResourceChange) *jsonprovider.Schema { + switch change.Mode { + case jsonstate.ManagedResourceMode: + return plan.ProviderSchemas[change.ProviderName].ResourceSchemas[change.Type] + case jsonstate.DataResourceMode: + return plan.ProviderSchemas[change.ProviderName].DataSourceSchemas[change.Type] + default: + panic("found unrecognized resource mode: " + change.Mode) + } +} + +func (plan Plan) renderHuman(renderer Renderer, mode plans.Mode, opts ...PlanRendererOpt) { + checkOpts := func(target PlanRendererOpt) bool { + for _, opt := range opts { + if opt == target { + return true + } + } + return false + } + + diffs := precomputeDiffs(plan, mode) + haveRefreshChanges := renderHumanDiffDrift(renderer, diffs, mode) + + willPrintResourceChanges := false + counts := make(map[plans.Action]int) + var changes []diff + for _, diff := range diffs.changes { + action := jsonplan.UnmarshalActions(diff.change.Change.Actions) + if action == plans.NoOp && !diff.Moved() { + // Don't show anything for NoOp changes. + continue + } + if action == plans.Delete && diff.change.Mode != jsonstate.ManagedResourceMode { + // Don't render anything for deleted data sources. + continue + } + + changes = append(changes, diff) + + // Don't count move-only changes + if action != plans.NoOp { + willPrintResourceChanges = true + counts[action]++ + } + } + + // Precompute the outputs early, so we can make a decision about whether we + // display the "there are no changes messages". + outputs := renderHumanDiffOutputs(renderer, diffs.outputs) + + if len(changes) == 0 && len(outputs) == 0 { + // If we didn't find any changes to report at all then this is a + // "No changes" plan. How we'll present this depends on whether + // the plan is "applyable" and, if so, whether it had refresh changes + // that we already would've presented above. + + if checkOpts(Errored) { + if haveRefreshChanges { + renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) + renderer.Streams.Println() + } + renderer.Streams.Print( + renderer.Colorize.Color("\n[reset][bold][red]Planning failed.[reset][bold] Terraform encountered an error while generating this plan.[reset]\n\n"), + ) + } else { + switch mode { + case plans.RefreshOnlyMode: + if haveRefreshChanges { + // We already generated a sufficient prompt about what will + // happen if applying this change above, so we don't need to + // say anything more. + return + } + + renderer.Streams.Print(renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] Your infrastructure still matches the configuration.[reset]\n\n")) + renderer.Streams.Println(format.WordWrap( + "Terraform has checked that the real remote objects still match the result of your most recent changes, and found no differences.", + renderer.Streams.Stdout.Columns())) + case plans.DestroyMode: + if haveRefreshChanges { + renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) + fmt.Fprintln(renderer.Streams.Stdout.File) + } + renderer.Streams.Print(renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] No objects need to be destroyed.[reset]\n\n")) + renderer.Streams.Println(format.WordWrap( + "Either you have not created any objects yet or the existing objects were already deleted outside of Terraform.", + renderer.Streams.Stdout.Columns())) + default: + if haveRefreshChanges { + renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) + renderer.Streams.Println("") + } + renderer.Streams.Print( + renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] Your infrastructure matches the configuration.[reset]\n\n"), + ) + + if haveRefreshChanges { + if !checkOpts(CanNotApply) { + // In this case, applying this plan will not change any + // remote objects but _will_ update the state to match what + // we detected during refresh, so we'll reassure the user + // about that. + renderer.Streams.Println(format.WordWrap( + "Your configuration already matches the changes detected above, so applying this plan will only update the state to include the changes detected above and won't change any real infrastructure.", + renderer.Streams.Stdout.Columns(), + )) + } else { + // In this case we detected changes during refresh but this isn't + // a planning mode where we consider those to be applyable. The + // user must re-run in refresh-only mode in order to update the + // state to match the upstream changes. + suggestion := "." + if !renderer.RunningInAutomation { + // The normal message includes a specific command line to run. + suggestion = ":\n terraform apply -refresh-only" + } + renderer.Streams.Println(format.WordWrap( + "Your configuration already matches the changes detected above. If you'd like to update the Terraform state to match, create and apply a refresh-only plan"+suggestion, + renderer.Streams.Stdout.Columns(), + )) + } + return + } + + // If we get down here then we're just in the simple situation where + // the plan isn't applyable at all. + renderer.Streams.Println(format.WordWrap( + "Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed.", + renderer.Streams.Stdout.Columns(), + )) + } + } + } + + if haveRefreshChanges { + renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) + renderer.Streams.Println() + } + + if willPrintResourceChanges { + renderer.Streams.Println(format.WordWrap( + "\nTerraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:", + renderer.Streams.Stdout.Columns())) + if counts[plans.Create] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Create))) + } + if counts[plans.Update] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Update))) + } + if counts[plans.Delete] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Delete))) + } + if counts[plans.DeleteThenCreate] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.DeleteThenCreate))) + } + if counts[plans.CreateThenDelete] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.CreateThenDelete))) + } + if counts[plans.Read] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Read))) + } + } + + if len(changes) > 0 { + if checkOpts(Errored) { + renderer.Streams.Printf("\nTerraform planned the following actions, but then encountered a problem:\n") + } else { + renderer.Streams.Printf("\nTerraform will perform the following actions:\n") + } + + for _, change := range changes { + diff, render := renderHumanDiff(renderer, change, proposedChange) + if render { + fmt.Fprintln(renderer.Streams.Stdout.File) + renderer.Streams.Println(diff) + } + } + + renderer.Streams.Printf( + renderer.Colorize.Color("\n[bold]Plan:[reset] %d to add, %d to change, %d to destroy.\n"), + counts[plans.Create]+counts[plans.DeleteThenCreate]+counts[plans.CreateThenDelete], + counts[plans.Update], + counts[plans.Delete]+counts[plans.DeleteThenCreate]+counts[plans.CreateThenDelete]) + } + + if len(outputs) > 0 { + renderer.Streams.Print("\nChanges to Outputs:\n") + renderer.Streams.Printf("%s\n", outputs) + + if len(counts) == 0 { + // If we have output changes but not resource changes then we + // won't have output any indication about the changes at all yet, + // so we need some extra context about what it would mean to + // apply a change that _only_ includes output changes. + renderer.Streams.Println(format.WordWrap( + "\nYou can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure.", + renderer.Streams.Stdout.Columns())) + } + } +} + +func renderHumanDiffOutputs(renderer Renderer, outputs map[string]computed.Diff) string { + var rendered []string + + var keys []string + escapedKeys := make(map[string]string) + var escapedKeyMaxLen int + for key := range outputs { + escapedKey := renderers.EnsureValidAttributeName(key) + keys = append(keys, key) + escapedKeys[key] = escapedKey + if len(escapedKey) > escapedKeyMaxLen { + escapedKeyMaxLen = len(escapedKey) + } + } + sort.Strings(keys) + + for _, key := range keys { + output := outputs[key] + if output.Action != plans.NoOp { + rendered = append(rendered, fmt.Sprintf("%s %-*s = %s", renderer.Colorize.Color(format.DiffActionSymbol(output.Action)), escapedKeyMaxLen, escapedKeys[key], output.RenderHuman(0, computed.NewRenderHumanOpts(renderer.Colorize)))) + } + } + return strings.Join(rendered, "\n") +} + +func renderHumanDiffDrift(renderer Renderer, diffs diffs, mode plans.Mode) bool { + var drs []diff + + // In refresh-only mode, we show all resources marked as drifted, + // including those which have moved without other changes. In other plan + // modes, move-only changes will be rendered in the planned changes, so + // we skip them here. + + if mode == plans.RefreshOnlyMode { + drs = diffs.drift + } else { + for _, dr := range diffs.drift { + if dr.diff.Action != plans.NoOp { + drs = append(drs, dr) + } + } + } + + if len(drs) == 0 { + return false + } + + // If the overall plan is empty, and it's not a refresh only plan then we + // won't show any drift changes. + if diffs.Empty() && mode != plans.RefreshOnlyMode { + return false + } + + renderer.Streams.Print(renderer.Colorize.Color("\n[bold][cyan]Note:[reset][bold] Objects have changed outside of Terraform\n")) + renderer.Streams.Println() + renderer.Streams.Print(format.WordWrap( + "Terraform detected the following changes made outside of Terraform since the last \"terraform apply\" which may have affected this plan:\n", + renderer.Streams.Stdout.Columns())) + + for _, drift := range drs { + diff, render := renderHumanDiff(renderer, drift, detectedDrift) + if render { + renderer.Streams.Println() + renderer.Streams.Println(diff) + } + } + + switch mode { + case plans.RefreshOnlyMode: + renderer.Streams.Println(format.WordWrap( + "\n\nThis is a refresh-only plan, so Terraform will not take any actions to undo these. If you were expecting these changes then you can apply this plan to record the updated values in the Terraform state without changing any remote objects.", + renderer.Streams.Stdout.Columns(), + )) + default: + renderer.Streams.Println(format.WordWrap( + "\n\nUnless you have made equivalent changes to your configuration, or ignored the relevant attributes using ignore_changes, the following plan may include actions to undo or respond to these changes.", + renderer.Streams.Stdout.Columns(), + )) + } + + return true +} + +func renderHumanDiff(renderer Renderer, diff diff, cause string) (string, bool) { + + // Internally, our computed diffs can't tell the difference between a + // replace action (eg. CreateThenDestroy, DestroyThenCreate) and a simple + // update action. So, at the top most level we rely on the action provided + // by the plan itself instead of what we compute. Nested attributes and + // blocks however don't have the replace type of actions, so we can trust + // the computed actions of these. + + action := jsonplan.UnmarshalActions(diff.change.Change.Actions) + if action == plans.NoOp && (len(diff.change.PreviousAddress) == 0 || diff.change.PreviousAddress == diff.change.Address) { + // Skip resource changes that have nothing interesting to say. + return "", false + } + + var buf bytes.Buffer + buf.WriteString(renderer.Colorize.Color(resourceChangeComment(diff.change, action, cause))) + buf.WriteString(fmt.Sprintf("%s %s %s", renderer.Colorize.Color(format.DiffActionSymbol(action)), resourceChangeHeader(diff.change), diff.diff.RenderHuman(0, computed.NewRenderHumanOpts(renderer.Colorize)))) + return buf.String(), true +} + +func resourceChangeComment(resource jsonplan.ResourceChange, action plans.Action, changeCause string) string { + var buf bytes.Buffer + + dispAddr := resource.Address + if len(resource.Deposed) != 0 { + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, resource.Deposed) + } + + switch action { + case plans.Create: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr)) + case plans.Read: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be read during apply", dispAddr)) + switch resource.ActionReason { + case jsonplan.ResourceInstanceReadBecauseConfigUnknown: + buf.WriteString("\n # (config refers to values not yet known)") + case jsonplan.ResourceInstanceReadBecauseDependencyPending: + buf.WriteString("\n # (depends on a resource or a module with changes pending)") + } + case plans.Update: + switch changeCause { + case proposedChange: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr)) + case detectedDrift: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has changed", dispAddr)) + default: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] update (unknown reason %s)", dispAddr, changeCause)) + } + case plans.CreateThenDelete, plans.DeleteThenCreate: + switch resource.ActionReason { + case jsonplan.ResourceInstanceReplaceBecauseTainted: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced[reset]", dispAddr)) + case jsonplan.ResourceInstanceReplaceByRequest: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]replaced[reset], as requested", dispAddr)) + case jsonplan.ResourceInstanceReplaceByTriggers: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]replaced[reset] due to changes in replace_triggered_by", dispAddr)) + default: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced[reset]", dispAddr)) + } + case plans.Delete: + switch changeCause { + case proposedChange: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed[reset]", dispAddr)) + case detectedDrift: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has been deleted", dispAddr)) + default: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] delete (unknown reason %s)", dispAddr, changeCause)) + } + // We can sometimes give some additional detail about why we're + // proposing to delete. We show this as additional notes, rather than + // as additional wording in the main action statement, in an attempt + // to make the "will be destroyed" message prominent and consistent + // in all cases, for easier scanning of this often-risky action. + switch resource.ActionReason { + case jsonplan.ResourceInstanceDeleteBecauseNoResourceConfig: + buf.WriteString(fmt.Sprintf("\n # (because %s.%s is not in configuration)", resource.Type, resource.Name)) + case jsonplan.ResourceInstanceDeleteBecauseNoMoveTarget: + buf.WriteString(fmt.Sprintf("\n # (because %s was moved to %s, which is not in configuration)", resource.PreviousAddress, resource.Address)) + case jsonplan.ResourceInstanceDeleteBecauseNoModule: + // FIXME: Ideally we'd truncate addr.Module to reflect the earliest + // step that doesn't exist, so it's clearer which call this refers + // to, but we don't have enough information out here in the UI layer + // to decide that; only the "expander" in Terraform Core knows + // which module instance keys are actually declared. + buf.WriteString(fmt.Sprintf("\n # (because %s is not in configuration)", resource.ModuleAddress)) + case jsonplan.ResourceInstanceDeleteBecauseWrongRepetition: + var index interface{} + if resource.Index != nil { + if err := json.Unmarshal(resource.Index, &index); err != nil { + panic(err) + } + } + + // We have some different variations of this one + switch index.(type) { + case nil: + buf.WriteString("\n # (because resource uses count or for_each)") + case float64: + buf.WriteString("\n # (because resource does not use count)") + case string: + buf.WriteString("\n # (because resource does not use for_each)") + } + case jsonplan.ResourceInstanceDeleteBecauseCountIndex: + buf.WriteString(fmt.Sprintf("\n # (because index [%s] is out of range for count)", resource.Index)) + case jsonplan.ResourceInstanceDeleteBecauseEachKey: + buf.WriteString(fmt.Sprintf("\n # (because key [%s] is not in for_each map)", resource.Index)) + } + if len(resource.Deposed) != 0 { + // Some extra context about this unusual situation. + buf.WriteString("\n # (left over from a partially-failed replacement of this instance)") + } + case plans.NoOp: + if len(resource.PreviousAddress) > 0 && resource.PreviousAddress != resource.Address { + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has moved to [bold]%s[reset]", resource.PreviousAddress, dispAddr)) + break + } + fallthrough + default: + // should never happen, since the above is exhaustive + buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) + } + buf.WriteString("\n") + + if len(resource.PreviousAddress) > 0 && resource.PreviousAddress != resource.Address && action != plans.NoOp { + buf.WriteString(fmt.Sprintf(" # [reset](moved from %s)\n", resource.PreviousAddress)) + } + + return buf.String() +} + +func resourceChangeHeader(change jsonplan.ResourceChange) string { + mode := "resource" + if change.Mode != jsonstate.ManagedResourceMode { + mode = "data" + } + return fmt.Sprintf("%s \"%s\" \"%s\"", mode, change.Type, change.Name) +} + +func actionDescription(action plans.Action) string { + switch action { + case plans.Create: + return " [green]+[reset] create" + case plans.Delete: + return " [red]-[reset] destroy" + case plans.Update: + return " [yellow]~[reset] update in-place" + case plans.CreateThenDelete: + return "[green]+[reset]/[red]-[reset] create replacement and then destroy" + case plans.DeleteThenCreate: + return "[red]-[reset]/[green]+[reset] destroy and then create replacement" + case plans.Read: + return " [cyan]<=[reset] read (data resources)" + default: + panic(fmt.Sprintf("unrecognized change type: %s", action.String())) + } +} diff --git a/command/jsonformat/plan_test.go b/command/jsonformat/plan_test.go new file mode 100644 index 000000000000..124aa8b66574 --- /dev/null +++ b/command/jsonformat/plan_test.go @@ -0,0 +1,6956 @@ +package jsonformat + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/jsonformat/differ" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" + "github.com/hashicorp/terraform/command/jsonplan" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" +) + +func TestRenderHuman_EmptyPlan(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + streams, done := terminal.StreamsForTesting(t) + + plan := Plan{} + + renderer := Renderer{Colorize: color, Streams: streams} + plan.renderHuman(renderer, plans.NormalMode) + + want := ` +No changes. Your infrastructure matches the configuration. + +Terraform has compared your real infrastructure against your configuration +and found no differences, so no changes are needed. +` + + got := done(t).Stdout() + if diff := cmp.Diff(want, got); len(diff) > 0 { + t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } +} + +func TestRenderHuman_EmptyOutputs(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + streams, done := terminal.StreamsForTesting(t) + + outputVal, _ := json.Marshal("some-text") + plan := Plan{ + OutputChanges: map[string]jsonplan.Change{ + "a_string": { + Actions: []string{"no-op"}, + Before: outputVal, + After: outputVal, + }, + }, + } + + renderer := Renderer{Colorize: color, Streams: streams} + plan.renderHuman(renderer, plans.NormalMode) + + want := ` +No changes. Your infrastructure matches the configuration. + +Terraform has compared your real infrastructure against your configuration +and found no differences, so no changes are needed. +` + + got := done(t).Stdout() + if diff := cmp.Diff(want, got); len(diff) > 0 { + t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } +} + +func TestResourceChange_primitiveTypes(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + }`, + }, + "creation (null string)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("null"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + string = "null" + }`, + }, + "creation (null string with extra whitespace)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("null "), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + string = "null " + }`, + }, + "creation (object with quoted keys)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "object": cty.ObjectVal(map[string]cty.Value{ + "unquoted": cty.StringVal("value"), + "quoted:key": cty.StringVal("some-value"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "object": {Type: cty.Object(map[string]cty.Type{ + "unquoted": cty.String, + "quoted:key": cty.String, + }), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + object = { + + "quoted:key" = "some-value" + + unquoted = "value" + } + }`, + }, + "deletion": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + }`, + }, + "deletion of deposed object": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + DeposedKey: states.DeposedKey("byebye"), + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example (deposed object byebye) will be destroyed + # (left over from a partially-failed replacement of this instance) + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + }`, + }, + "deletion (empty string)": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "intentionally_long": cty.StringVal(""), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "intentionally_long": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + }`, + }, + "string in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + }`, + }, + "update with quoted key": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "saml:aud": cty.StringVal("https://example.com/saml"), + "zeta": cty.StringVal("alpha"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "saml:aud": cty.StringVal("https://saml.example.com"), + "zeta": cty.StringVal("alpha"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "saml:aud": {Type: cty.String, Optional: true}, + "zeta": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ "saml:aud" = "https://example.com/saml" -> "https://saml.example.com" + # (1 unchanged attribute hidden) + }`, + }, + "string force-new update": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "ami"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "string in-place update (null values)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "unchanged": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "unchanged": cty.NullVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "unchanged": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update of multi-line string field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.StringVal(`original +long +multi-line +string +field`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +extremely long +multi-line +string +field`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ more_lines = <<-EOT + original + - long + + extremely long + multi-line + string + field + EOT + }`, + }, + "addition of multi-line string field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +new line`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + more_lines = <<-EOT + original + new line + EOT + }`, + }, + "force-new update of multi-line string field": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.StringVal(`original`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +new line`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "more_lines"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ more_lines = <<-EOT # forces replacement + original + + new line + EOT + }`, + }, + + // Sensitive + + "creation with sensitive field": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "password": cty.StringVal("top-secret"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("top-secret"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + "conn_info": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "user": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + conn_info = { + + password = (sensitive value) + + user = "not-secret" + } + + id = (known after apply) + + password = (sensitive value) + }`, + }, + "update with equal sensitive field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("blah"), + "str": cty.StringVal("before"), + "password": cty.StringVal("top-secret"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "str": cty.StringVal("after"), + "password": cty.StringVal("top-secret"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "str": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "blah" -> (known after apply) + ~ str = "before" -> "after" + # (1 unchanged attribute hidden) + }`, + }, + + // tainted objects + "replace tainted resource": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "ami"}, + }), + ExpectedOutput: ` # test_instance.example is tainted, so must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + }`, + }, + "force replacement with empty before value": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal("example"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "forced": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "forced"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + + forced = "example" # forces replacement + name = "name" + }`, + }, + "force replacement with empty before value legacy": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal(""), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal("example"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "forced": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "forced"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + + forced = "example" # forces replacement + name = "name" + }`, + }, + "read during apply because of unknown configuration": { + Action: plans.Read, + ActionReason: plans.ResourceInstanceReadBecauseConfigUnknown, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + # (config refers to values not yet known) + <= data "test_instance" "example" { + name = "name" + }`, + }, + "read during apply because of pending changes to upstream dependency": { + Action: plans.Read, + ActionReason: plans.ResourceInstanceReadBecauseDependencyPending, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + # (depends on a resource or a module with changes pending) + <= data "test_instance" "example" { + name = "name" + }`, + }, + "read during apply for unspecified reason": { + Action: plans.Read, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + <= data "test_instance" "example" { + name = "name" + }`, + }, + "show all identifying attributes even if unchanged": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "bar": cty.StringVal("bar"), + "foo": cty.StringVal("foo"), + "name": cty.StringVal("alice"), + "tags": cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("bob"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "bar": cty.StringVal("bar"), + "foo": cty.StringVal("foo"), + "name": cty.StringVal("alice"), + "tags": cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("bob"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + "foo": {Type: cty.String, Optional: true}, + "name": {Type: cty.String, Optional: true}, + "tags": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + name = "alice" + tags = { + "name" = "bob" + } + # (2 unchanged attributes hidden) + }`, + }, + } + + runTestCases(t, testCases) +} + +func TestResourceChange_JSON(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{ + "str": "value", + "list":["a","b", 234, true], + "obj": {"key": "val"} + }`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + + json_field = jsonencode( + { + + list = [ + + "a", + + "b", + + 234, + + true, + ] + + obj = { + + key = "val" + } + + str = "value" + } + ) + }`, + }, + "in-place update of object": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value","ccc": 5}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + bbb = "new_value" + - ccc = 5 + # (1 unchanged attribute hidden) + } + ) + }`, + }, + "in-place update of object with quoted keys": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "c:c": "old_value"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "b:bb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + "b:bb" = "new_value" + - "c:c" = "old_value" + # (1 unchanged attribute hidden) + } + ) + }`, + }, + "in-place update (from empty tuple)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": []}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": ["value"]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + + "value", + ] + } + ) + }`, + }, + "in-place update (to empty tuple)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": ["value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": []}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + - "value", + ] + } + ) + }`, + }, + "in-place update (tuple of different types)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"baz"}, "value"]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + 42, + ~ { + ~ foo = "bar" -> "baz" + }, + "value", + ] + } + ) + }`, + }, + "force-new update": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "json_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + bbb = "new_value" + # (1 unchanged attribute hidden) + } # forces replacement + ) + }`, + }, + "in-place update (whitespace change)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa":"value", + "bbb":"another"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( # whitespace changes + { + aaa = "value" + bbb = "another" + } + ) + }`, + }, + "force-new update (whitespace change)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa":"value", + "bbb":"another"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "json_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( # whitespace changes force replacement + { + aaa = "value" + bbb = "another" + } + ) + }`, + }, + "creation (empty)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + + json_field = jsonencode({}) + }`, + }, + "JSON list item removal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`["first","second","third"]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["first","second"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + # (1 unchanged element hidden) + "second", + - "third", + ] + ) + }`, + }, + "JSON list item addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`["first","second"]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["first","second","third"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + # (1 unchanged element hidden) + "second", + + "third", + ] + ) + }`, + }, + "JSON list object addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"first":"111"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"first":"111","second":"222"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + second = "222" + # (1 unchanged attribute hidden) + } + ) + }`, + }, + "JSON object with nested list": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{ + "Statement": ["first"] + }`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{ + "Statement": ["first", "second"] + }`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ Statement = [ + "first", + + "second", + ] + } + ) + }`, + }, + "JSON list of objects - adding item": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`[{"one": "111"}]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + { + one = "111" + }, + + { + + two = "222" + }, + ] + ) + }`, + }, + "JSON list of objects - removing item": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}, {"three": "333"}]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`[{"one": "111"}, {"three": "333"}]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + { + one = "111" + }, + - { + - two = "222" + }, + { + three = "333" + }, + ] + ) + }`, + }, + "JSON object with list of objects": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"parent":[{"one": "111"}]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"parent":[{"one": "111"}, {"two": "222"}]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ parent = [ + { + one = "111" + }, + + { + + two = "222" + }, + ] + } + ) + }`, + }, + "JSON object double nested lists": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"parent":[{"another_list": ["111"]}]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"parent":[{"another_list": ["111", "222"]}]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ parent = [ + ~ { + ~ another_list = [ + "111", + + "222", + ] + }, + ] + } + ) + }`, + }, + "in-place update from object to tuple": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["aaa", 42, "something"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + - aaa = [ + - 42, + - { + - foo = "bar" + }, + - "value", + ] + } -> [ + + "aaa", + + 42, + + "something", + ] + ) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_listObject(t *testing.T) { + testCases := map[string]testCase{ + // https://github.com/hashicorp/terraform/issues/30641 + "updating non-identifying attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "accounts": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1"), + "name": cty.StringVal("production"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("2"), + "name": cty.StringVal("staging"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("3"), + "name": cty.StringVal("disaster-recovery"), + "status": cty.StringVal("ACTIVE"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "accounts": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1"), + "name": cty.StringVal("production"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("2"), + "name": cty.StringVal("staging"), + "status": cty.StringVal("EXPLODED"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("3"), + "name": cty.StringVal("disaster-recovery"), + "status": cty.StringVal("ACTIVE"), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "accounts": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "id": cty.String, + "name": cty.String, + "status": cty.String, + })), + }, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ accounts = [ + { + id = "1" + name = "production" + status = "ACTIVE" + }, + ~ { + id = "2" + name = "staging" + ~ status = "ACTIVE" -> "EXPLODED" + }, + { + id = "3" + name = "disaster-recovery" + status = "ACTIVE" + }, + ] + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveList(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.NullVal(cty.List(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + list_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - first addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + # (1 unchanged element hidden) + "bbbb", + + "cccc", + "dddd", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "list_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ # forces replacement + "aaaa", + + "bbbb", + "cccc", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + - "aaaa", + "bbbb", + - "cccc", + "dddd", + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "creation - empty list": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + list_field = [] + }`, + }, + "in-place update - full to empty": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + - "aaaa", + - "bbbb", + - "cccc", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - null to empty": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.NullVal(cty.List(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + list_field = [] + # (1 unchanged attribute hidden) + }`, + }, + "update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + "aaaa", + - "bbbb", + + (known after apply), + "cccc", + ] + # (1 unchanged attribute hidden) + }`, + }, + "update - two new unknown elements": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + "aaaa", + - "bbbb", + + (known after apply), + + (known after apply), + "cccc", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveTuple(t *testing.T) { + testCases := map[string]testCase{ + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "tuple_field": cty.TupleVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "tuple_field": cty.TupleVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Required: true}, + "tuple_field": {Type: cty.Tuple([]cty.Type{cty.String, cty.String, cty.String, cty.String, cty.String}), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ tuple_field = [ + # (1 unchanged element hidden) + "bbbb", + ~ "dddd" -> "cccc", + "eeee", + # (1 unchanged element hidden) + ] + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveSet(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.NullVal(cty.Set(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + set_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + + "bbbb", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "set_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ # forces replacement + + "bbbb", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("bbbb"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "cccc", + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "creation - empty set": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + set_field = [] + }`, + }, + "in-place update - full to empty set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "bbbb", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - null to empty set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.NullVal(cty.Set(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + set_field = [] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update to unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.UnknownVal(cty.Set(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "bbbb", + ] -> (known after apply) + # (1 unchanged attribute hidden) + }`, + }, + "in-place update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "bbbb", + + (known after apply), + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_map(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.NullVal(cty.Map(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "new-key": cty.StringVal("new-element"), + "be:ep": cty.StringVal("boop"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + map_field = { + + "be:ep" = "boop" + + "new-key" = "new-element" + } + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "new-key": cty.StringVal("new-element"), + "be:ep": cty.StringVal("boop"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + + "be:ep" = "boop" + + "new-key" = "new-element" + } + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "b:b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + + "b" = "bbbb" + + "b:b" = "bbbb" + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + }`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "map_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { # forces replacement + + "b" = "bbbb" + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "b": cty.StringVal("bbbb"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + - "a" = "aaaa" -> null + - "c" = "cccc" -> null + # (1 unchanged element hidden) + } + # (1 unchanged attribute hidden) + }`, + }, + "creation - empty": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + map_field = {} + }`, + }, + "update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.UnknownVal(cty.String), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + ~ "b" = "bbbb" -> (known after apply) + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedList(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - equal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + }`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + })}), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device {} + }`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + + size = "50GB" + # (1 unchanged attribute hidden) + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + + new_field = "new_value" + # (1 unchanged attribute hidden) + } + }`, + }, + "force-new update (inside blocks)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "volume_type"}, + }, + cty.Path{ + cty.GetAttrStep{Name: "disks"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "mount_point"}, + }, + ), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement + # (1 unchanged attribute hidden) + }, + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + ~ volume_type = "gp2" -> "different" # forces replacement + } + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ # forces replacement + ~ { + ~ mount_point = "/var/diska" -> "/var/diskb" + # (1 unchanged attribute hidden) + }, + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { # forces replacement + ~ volume_type = "gp2" -> "different" + } + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + }`, + }, + "with dynamically-typed attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "block": cty.EmptyTupleVal, + }), + After: cty.ObjectVal(map[string]cty.Value{ + "block": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.True, + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + block { + + attr = "foo" + } + + block { + + attr = true + } + }`, + }, + "in-place sequence update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("x")}), + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("z")}), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": { + Type: cty.String, + Required: true, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ list { + ~ attr = "x" -> "y" + } + ~ list { + ~ attr = "y" -> "z" + } + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + "in-place update - modification": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskc"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("75GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskc"), + "size": cty.StringVal("25GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + ~ size = "50GB" -> "75GB" + # (1 unchanged attribute hidden) + }, + ~ { + ~ size = "50GB" -> "25GB" + # (1 unchanged attribute hidden) + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSet(t *testing.T) { + testCases := map[string]testCase{ + "creation from null - sensitive set": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "ami": cty.String, + "disks": cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.Set(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + })), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + }`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + }`, + }, + "in-place update - creation - sensitive set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + }`, + }, + "in-place update - marking set sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. The value is unchanged. + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + }, + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + + root_block_device { + + new_field = "new_value" + + volume_type = "gp2" + } + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { # forces replacement + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + + { # forces replacement + + mount_point = "/var/diskb" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { # forces replacement + - volume_type = "gp2" -> null + } + + root_block_device { # forces replacement + + volume_type = "different" + } + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + "new_field": cty.String, + })), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - new_field = "new_value" -> null + - volume_type = "gp2" -> null + } + }`, + }, + "in-place update - empty nested sets": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disks = [] + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - null insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disks = [ + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + + root_block_device { + + new_field = "new_value" + + volume_type = "gp2" + } + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedMap(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = { + + "disk_a" = { + + mount_point = "/var/diska" + }, + } + + id = "i-02ae66f368e8518a9" + + + root_block_device "a" { + + volume_type = "gp2" + } + }`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_a" = { + + mount_point = "/var/diska" + }, + } + id = "i-02ae66f368e8518a9" + + + root_block_device "a" { + + volume_type = "gp2" + } + }`, + }, + "in-place update - change attr": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + ~ "disk_a" = { + + size = "50GB" + # (1 unchanged attribute hidden) + }, + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device "a" { + + new_field = "new_value" + # (1 unchanged attribute hidden) + } + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "disk_2": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/disk2"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_2" = { + + mount_point = "/var/disk2" + + size = "50GB" + }, + # (1 unchanged element hidden) + } + id = "i-02ae66f368e8518a9" + + + root_block_device "b" { + + new_field = "new_value" + + volume_type = "gp2" + } + + # (1 unchanged block hidden) + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("standard"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("standard"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.IndexStep{Key: cty.StringVal("a")}, + }, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + ~ "disk_a" = { # forces replacement + ~ size = "50GB" -> "100GB" + # (1 unchanged attribute hidden) + }, + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device "a" { # forces replacement + ~ volume_type = "gp2" -> "different" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + "new_field": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + - "disk_a" = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + } + id = "i-02ae66f368e8518a9" + + - root_block_device "a" { + - new_field = "new_value" -> null + - volume_type = "gp2" -> null + } + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + - "disk_a" = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + } -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + "in-place update - insertion sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}, + cty.IndexStep{Key: cty.StringVal("disk_a")}, + cty.GetAttrStep{Name: "mount_point"}, + }, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_a" = { + + mount_point = (sensitive value) + + size = "50GB" + }, + } + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple unchanged blocks": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (2 unchanged blocks hidden) + }`, + }, + "in-place update - multiple blocks first changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple blocks second changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + }`, + }, + "in-place update - multiple different unchanged blocks": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (2 unchanged blocks hidden) + }`, + }, + "in-place update - multiple different blocks first changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple different blocks second changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple different blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + }`, + }, + "in-place update - mixed blocks unchanged": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (4 unchanged blocks hidden) + }`, + }, + "in-place update - mixed blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (2 unchanged blocks hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSingle(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - equal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + }`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disk": cty.NullVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.NullVal(cty.String), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disk = { + + mount_point = "/var/diska" + + size = "50GB" + } + id = "i-02ae66f368e8518a9" + + + root_block_device {} + }`, + }, + "force-new update (inside blocks)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.GetAttrStep{Name: "volume_type"}, + }, + cty.Path{ + cty.GetAttrStep{Name: "disk"}, + cty.GetAttrStep{Name: "mount_point"}, + }, + ), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + ~ volume_type = "gp2" -> "different" # forces replacement + } + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disk"}}, + ), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { # forces replacement + ~ mount_point = "/var/diska" -> "/var/diskb" + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device { # forces replacement + ~ volume_type = "gp2" -> "different" + } + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disk": cty.NullVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disk = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + } -> null + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + }`, + }, + "with dynamically-typed attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "block": cty.NullVal(cty.Object(map[string]cty.Type{ + "attr": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "block": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + block { + + attr = "foo" + } + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ mount_point = "/var/diska" -> (known after apply) + ~ size = "50GB" -> (known after apply) + } -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + "in-place update - modification": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("25GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ size = "50GB" -> "25GB" + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedMapSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) -> null + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedListSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) -> null + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSetSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) -> null + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_actionReason(t *testing.T) { + emptySchema := &configschema.Block{} + nullVal := cty.NullVal(cty.EmptyObject) + emptyVal := cty.EmptyObjectVal + + testCases := map[string]testCase{ + "delete for no particular reason": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" {}`, + }, + "delete because of wrong repetition mode (NoKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.NoKey, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + # (because resource uses count or for_each) + - resource "test_instance" "example" {}`, + }, + "delete because of wrong repetition mode (IntKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.IntKey(1), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example[1] will be destroyed + # (because resource does not use count) + - resource "test_instance" "example" {}`, + }, + "delete because of wrong repetition mode (StringKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.StringKey("a"), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example["a"] will be destroyed + # (because resource does not use for_each) + - resource "test_instance" "example" {}`, + }, + "delete because no resource configuration": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseNoResourceConfig, + ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.NoKey), + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # module.foo.test_instance.example will be destroyed + # (because test_instance.example is not in configuration) + - resource "test_instance" "example" {}`, + }, + "delete because no module": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseNoModule, + ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.IntKey(1)), + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # module.foo[1].test_instance.example will be destroyed + # (because module.foo[1] is not in configuration) + - resource "test_instance" "example" {}`, + }, + "delete because out of range for count": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseCountIndex, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.IntKey(1), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example[1] will be destroyed + # (because index [1] is out of range for count) + - resource "test_instance" "example" {}`, + }, + "delete because out of range for for_each": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseEachKey, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.StringKey("boop"), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example["boop"] will be destroyed + # (because key ["boop"] is not in for_each map) + - resource "test_instance" "example" {}`, + }, + "replace for no particular reason (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" {}`, + }, + "replace for no particular reason (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example must be replaced ++/- resource "test_instance" "example" {}`, + }, + "replace by request (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceByRequest, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be replaced, as requested +-/+ resource "test_instance" "example" {}`, + }, + "replace by request (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceByRequest, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be replaced, as requested ++/- resource "test_instance" "example" {}`, + }, + "replace because tainted (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example is tainted, so must be replaced +-/+ resource "test_instance" "example" {}`, + }, + "replace because tainted (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example is tainted, so must be replaced ++/- resource "test_instance" "example" {}`, + }, + "replace because cannot update (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + // This one has no special message, because the fuller explanation + // typically appears inline as a "# forces replacement" comment. + // (not shown here) + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" {}`, + }, + "replace because cannot update (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + // This one has no special message, because the fuller explanation + // typically appears inline as a "# forces replacement" comment. + // (not shown here) + ExpectedOutput: ` # test_instance.example must be replaced ++/- resource "test_instance" "example" {}`, + }, + } + + runTestCases(t, testCases) +} + +func TestResourceChange_sensitiveVariable(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-123"), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "nested_block_list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + "another": cty.StringVal("not secret"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + "another": cty.StringVal("not secret"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + // Nested blocks/sets will mark the whole set/block as sensitive + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_list"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_list": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = (sensitive value) + + id = "i-02ae66f368e8518a9" + + list_field = [ + + "hello", + + (sensitive value), + + "!", + ] + + map_key = { + + "breakfast" = 800 + + "dinner" = (sensitive value) + } + + map_whole = (sensitive value) + + + nested_block_list { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + + nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "in-place update - before sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "special": cty.BoolVal(false), + "some_number": cty.NumberIntVal(2), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("."), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(1900), + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "special"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "special": {Type: cty.Bool, Optional: true}, + "some_number": {Type: cty.Number, Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + # (1 unchanged element hidden) + "friends", + - (sensitive value), + + ".", + ] + ~ map_key = { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ map_whole = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ some_number = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ special = (sensitive value) + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + - nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + nested_block_set { + + an_attr = "changed" + } + }`, + }, + "in-place update - after sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_single": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("original"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(700), + "dinner": cty.NumberIntVal(2100), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_single": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "tags"}, cty.IndexStep{Key: cty.StringVal("address")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_single"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_single": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ list_field = [ + - "hello", + + (sensitive value), + "friends", + ] + ~ map_key = { + ~ "breakfast" = 800 -> 700 + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ "dinner" = (sensitive value) + } + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ map_whole = (sensitive value) + + # Warning: this block will be marked as sensitive and will not + # display in UI output after applying this change. + ~ nested_block_single { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "in-place update - both sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_map": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("original"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(1800), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_map": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.UnknownVal(cty.String), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_map": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingMap, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + - (sensitive value), + + (sensitive value), + "friends", + ] + ~ map_key = { + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + ~ map_whole = (sensitive value) + + ~ nested_block_map "foo" { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "in-place update - value unchanged, sensitivity changes": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "special"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "special": {Type: cty.Bool, Optional: true}, + "some_number": {Type: cty.Number, Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + # (1 unchanged element hidden) + "friends", + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ (sensitive value), + ] + ~ map_key = { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ map_whole = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ some_number = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ special = (sensitive value) + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "deletion": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + "another": cty.StringVal("not secret"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + "another": cty.StringVal("not secret"), + }), + }), + }), + After: cty.NullVal(cty.EmptyObject), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - ami = (sensitive value) -> null + - id = "i-02ae66f368e8518a9" -> null + - list_field = [ + - "hello", + - (sensitive value), + ] -> null + - map_key = { + - "breakfast" = 800 + - "dinner" = (sensitive value) + } -> null + - map_whole = (sensitive value) -> null + + - nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "update with sensitive value forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "nested_block_set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "nested_block_set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("ami"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.GetAttrPath("nested_block_set"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("ami"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.GetAttrPath("nested_block_set"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Required: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("ami"), + cty.GetAttrPath("nested_block_set"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + + - nested_block_set { # forces replacement + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + nested_block_set { # forces replacement + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "update with sensitive attribute forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true, Computed: true, Sensitive: true}, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("ami"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "update with sensitive nested type attribute forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("top-secret"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("new-secret"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "conn_info": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "user": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("conn_info"), + cty.GetAttrPath("password"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ conn_info = { # forces replacement + ~ password = (sensitive value) + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_moved(t *testing.T) { + prevRunAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "previous", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + testCases := map[string]testCase{ + "moved and updated": { + PrevRunAddr: prevRunAddr, + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("boop"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + # (moved from test_instance.previous) + ~ resource "test_instance" "example" { + ~ bar = "baz" -> "boop" + id = "12345" + # (1 unchanged attribute hidden) + }`, + }, + "moved without changes": { + PrevRunAddr: prevRunAddr, + Action: plans.NoOp, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.previous has moved to test_instance.example + resource "test_instance" "example" { + id = "12345" + # (2 unchanged attributes hidden) + }`, + }, + } + + runTestCases(t, testCases) +} + +type testCase struct { + Action plans.Action + ActionReason plans.ResourceInstanceChangeActionReason + ModuleInst addrs.ModuleInstance + Mode addrs.ResourceMode + InstanceKey addrs.InstanceKey + DeposedKey states.DeposedKey + Before cty.Value + BeforeValMarks []cty.PathValueMarks + AfterValMarks []cty.PathValueMarks + After cty.Value + Schema *configschema.Block + RequiredReplace cty.PathSet + ExpectedOutput string + PrevRunAddr addrs.AbsResourceInstance +} + +func runTestCases(t *testing.T, testCases map[string]testCase) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + ty := tc.Schema.ImpliedType() + + beforeVal := tc.Before + switch { // Some fixups to make the test cases a little easier to write + case beforeVal.IsNull(): + beforeVal = cty.NullVal(ty) // allow mistyped nulls + case !beforeVal.IsKnown(): + beforeVal = cty.UnknownVal(ty) // allow mistyped unknowns + } + + afterVal := tc.After + switch { // Some fixups to make the test cases a little easier to write + case afterVal.IsNull(): + afterVal = cty.NullVal(ty) // allow mistyped nulls + case !afterVal.IsKnown(): + afterVal = cty.UnknownVal(ty) // allow mistyped unknowns + } + + addr := addrs.Resource{ + Mode: tc.Mode, + Type: "test_instance", + Name: "example", + }.Instance(tc.InstanceKey).Absolute(tc.ModuleInst) + + prevRunAddr := tc.PrevRunAddr + // If no previous run address is given, reuse the current address + // to make initialization easier + if prevRunAddr.Resource.Resource.Type == "" { + prevRunAddr = addr + } + + beforeDynamicValue, err := plans.NewDynamicValue(beforeVal, ty) + if err != nil { + t.Fatalf("failed to create dynamic before value: " + err.Error()) + } + + afterDynamicValue, err := plans.NewDynamicValue(afterVal, ty) + if err != nil { + t.Fatalf("failed to create dynamic after value: " + err.Error()) + } + + src := &plans.ResourceInstanceChangeSrc{ + ChangeSrc: plans.ChangeSrc{ + Action: tc.Action, + Before: beforeDynamicValue, + BeforeValMarks: tc.BeforeValMarks, + After: afterDynamicValue, + AfterValMarks: tc.AfterValMarks, + }, + + Addr: addr, + PrevRunAddr: prevRunAddr, + DeposedKey: tc.DeposedKey, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ActionReason: tc.ActionReason, + RequiredReplace: tc.RequiredReplace, + } + + tfschemas := &terraform.Schemas{ + Providers: map[addrs.Provider]*providers.Schemas{ + src.ProviderAddr.Provider: { + ResourceTypes: map[string]*configschema.Block{ + src.Addr.Resource.Resource.Type: tc.Schema, + }, + DataSources: map[string]*configschema.Block{ + src.Addr.Resource.Resource.Type: tc.Schema, + }, + }, + }, + } + jsonchanges, err := jsonplan.MarshalResourceChanges([]*plans.ResourceInstanceChangeSrc{src}, tfschemas) + if err != nil { + t.Errorf("failed to marshal resource changes: " + err.Error()) + return + } + + jsonschemas := jsonprovider.MarshalForRenderer(tfschemas) + change := structured.FromJsonChange(jsonchanges[0].Change, attribute_path.AlwaysMatcher()) + renderer := Renderer{Colorize: color} + diff := diff{ + change: jsonchanges[0], + diff: differ.ComputeDiffForBlock(change, jsonschemas[jsonchanges[0].ProviderName].ResourceSchemas[jsonchanges[0].Type].Block), + } + output, _ := renderHumanDiff(renderer, diff, proposedChange) + if diff := cmp.Diff(output, tc.ExpectedOutput); diff != "" { + t.Errorf("wrong output\nexpected:\n%s\nactual:\n%s\ndiff:\n%s\n", tc.ExpectedOutput, output, diff) + } + }) + } +} + +func TestOutputChanges(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + testCases := map[string]struct { + changes []*plans.OutputChangeSrc + output string + }{ + "new output value": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.NullVal(cty.DynamicPseudoType), + cty.StringVal("bar"), + false, + ), + }, + ` + foo = "bar"`, + }, + "removed output": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.StringVal("bar"), + cty.NullVal(cty.DynamicPseudoType), + false, + ), + }, + ` - foo = "bar" -> null`, + }, + "single string change": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.StringVal("bar"), + cty.StringVal("baz"), + false, + ), + }, + ` ~ foo = "bar" -> "baz"`, + }, + "element added to list": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.ListVal([]cty.Value{ + cty.StringVal("alpha"), + cty.StringVal("beta"), + cty.StringVal("delta"), + cty.StringVal("epsilon"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("alpha"), + cty.StringVal("beta"), + cty.StringVal("gamma"), + cty.StringVal("delta"), + cty.StringVal("epsilon"), + }), + false, + ), + }, + ` ~ foo = [ + # (1 unchanged element hidden) + "beta", + + "gamma", + "delta", + # (1 unchanged element hidden) + ]`, + }, + "multiple outputs changed, one sensitive": { + []*plans.OutputChangeSrc{ + outputChange( + "a", + cty.NumberIntVal(1), + cty.NumberIntVal(2), + false, + ), + outputChange( + "b", + cty.StringVal("hunter2"), + cty.StringVal("correct-horse-battery-staple"), + true, + ), + outputChange( + "c", + cty.BoolVal(false), + cty.BoolVal(true), + false, + ), + }, + ` ~ a = 1 -> 2 + ~ b = (sensitive value) + ~ c = false -> true`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + changes := &plans.Changes{ + Outputs: tc.changes, + } + + outputs, err := jsonplan.MarshalOutputChanges(changes) + if err != nil { + t.Fatalf("failed to marshal output changes") + } + + renderer := Renderer{Colorize: color} + diffs := precomputeDiffs(Plan{ + OutputChanges: outputs, + }, plans.NormalMode) + + output := renderHumanDiffOutputs(renderer, diffs.outputs) + if output != tc.output { + t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.output) + } + }) + } +} + +func outputChange(name string, before, after cty.Value, sensitive bool) *plans.OutputChangeSrc { + addr := addrs.AbsOutputValue{ + OutputValue: addrs.OutputValue{Name: name}, + } + + change := &plans.OutputChange{ + Addr: addr, Change: plans.Change{ + Before: before, + After: after, + }, + Sensitive: sensitive, + } + + changeSrc, err := change.Encode() + if err != nil { + panic(fmt.Sprintf("failed to encode change for %s: %s", addr, err)) + } + + return changeSrc +} + +// A basic test schema using a configurable NestingMode for one (NestedType) attribute and one block +func testSchema(nesting configschema.NestingMode) *configschema.Block { + var diskKey = "disks" + if nesting == configschema.NestingSingle { + diskKey = "disk" + } + + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + diskKey: { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} + +// A basic test schema using a configurable NestingMode for one (NestedType) +// attribute marked sensitive. +func testSchemaSensitive(nesting configschema.NestingMode) *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "disks": { + Sensitive: true, + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + } +} + +func testSchemaMultipleBlocks(nesting configschema.NestingMode) *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "disks": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + "leaf_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} + +// similar to testSchema with the addition of a "new_field" block +func testSchemaPlus(nesting configschema.NestingMode) *configschema.Block { + var diskKey = "disks" + if nesting == configschema.NestingSingle { + diskKey = "disk" + } + + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + diskKey: { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "new_field": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} diff --git a/internal/command/jsonformat/renderer.go b/command/jsonformat/renderer.go similarity index 89% rename from internal/command/jsonformat/renderer.go rename to command/jsonformat/renderer.go index 676d6cad7d0f..286790cc418f 100644 --- a/internal/command/jsonformat/renderer.go +++ b/command/jsonformat/renderer.go @@ -5,16 +5,16 @@ import ( "github.com/mitchellh/colorstring" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/differ" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonplan" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/command/jsonstate" - viewsjson "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/terminal" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/differ" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonplan" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/command/jsonstate" + viewsjson "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terminal" ctyjson "github.com/zclconf/go-cty/cty/json" ) diff --git a/command/jsonformat/state.go b/command/jsonformat/state.go new file mode 100644 index 000000000000..cd0164e672fc --- /dev/null +++ b/command/jsonformat/state.go @@ -0,0 +1,108 @@ +package jsonformat + +import ( + "sort" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/command/jsonformat/differ" + "github.com/hashicorp/terraform/command/jsonformat/structured" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/command/jsonstate" +) + +type State struct { + StateFormatVersion string `json:"state_format_version"` + RootModule jsonstate.Module `json:"root"` + RootModuleOutputs map[string]jsonstate.Output `json:"root_module_outputs"` + + ProviderFormatVersion string `json:"provider_format_version"` + ProviderSchemas map[string]*jsonprovider.Provider `json:"provider_schemas"` +} + +func (state State) Empty() bool { + return len(state.RootModuleOutputs) == 0 && len(state.RootModule.Resources) == 0 && len(state.RootModule.ChildModules) == 0 +} + +func (state State) GetSchema(resource jsonstate.Resource) *jsonprovider.Schema { + switch resource.Mode { + case jsonstate.ManagedResourceMode: + return state.ProviderSchemas[resource.ProviderName].ResourceSchemas[resource.Type] + case jsonstate.DataResourceMode: + return state.ProviderSchemas[resource.ProviderName].DataSourceSchemas[resource.Type] + default: + panic("found unrecognized resource mode: " + resource.Mode) + } +} + +func (state State) renderHumanStateModule(renderer Renderer, module jsonstate.Module, opts computed.RenderHumanOpts, first bool) { + if len(module.Resources) > 0 && !first { + renderer.Streams.Println() + } + + for _, resource := range module.Resources { + + if !first { + renderer.Streams.Println() + } + + if first { + first = false + } + + if len(resource.DeposedKey) > 0 { + renderer.Streams.Printf("# %s: (deposed object %s)", resource.Address, resource.DeposedKey) + } else if resource.Tainted { + renderer.Streams.Printf("# %s: (tainted)", resource.Address) + } else { + renderer.Streams.Printf("# %s:", resource.Address) + } + + renderer.Streams.Println() + + schema := state.GetSchema(resource) + switch resource.Mode { + case jsonstate.ManagedResourceMode: + change := structured.FromJsonResource(resource) + renderer.Streams.Printf("resource %q %q %s", resource.Type, resource.Name, differ.ComputeDiffForBlock(change, schema.Block).RenderHuman(0, opts)) + case jsonstate.DataResourceMode: + change := structured.FromJsonResource(resource) + renderer.Streams.Printf("data %q %q %s", resource.Type, resource.Name, differ.ComputeDiffForBlock(change, schema.Block).RenderHuman(0, opts)) + default: + panic("found unrecognized resource mode: " + resource.Mode) + } + + renderer.Streams.Println() + } + + for _, child := range module.ChildModules { + state.renderHumanStateModule(renderer, child, opts, first) + } +} + +func (state State) renderHumanStateOutputs(renderer Renderer, opts computed.RenderHumanOpts) { + + if len(state.RootModuleOutputs) > 0 { + renderer.Streams.Printf("\n\nOutputs:\n\n") + + var keys []string + for key := range state.RootModuleOutputs { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + output := state.RootModuleOutputs[key] + change := structured.FromJsonOutput(output) + ctype, err := ctyjson.UnmarshalType(output.Type) + if err != nil { + // We can actually do this without the type, so even if we fail + // to work out the type let's just render this anyway. + renderer.Streams.Printf("%s = %s\n", key, differ.ComputeDiffForOutput(change).RenderHuman(0, opts)) + } else { + renderer.Streams.Printf("%s = %s\n", key, differ.ComputeDiffForType(change, ctype).RenderHuman(0, opts)) + } + } + } +} diff --git a/command/jsonformat/state_test.go b/command/jsonformat/state_test.go new file mode 100644 index 000000000000..d5511de7465f --- /dev/null +++ b/command/jsonformat/state_test.go @@ -0,0 +1,437 @@ +package jsonformat + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terminal" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +func TestState(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + tests := []struct { + State *format.StateOpts + Want string + }{ + { + &format.StateOpts{ + State: &states.State{}, + Color: color, + Schemas: &terraform.Schemas{}, + }, + "The state file is empty. No resources are represented.\n", + }, + { + &format.StateOpts{ + State: basicState(t), + Color: color, + Schemas: testSchemas(), + }, + basicStateOutput, + }, + { + &format.StateOpts{ + State: nestedState(t), + Color: color, + Schemas: testSchemas(), + }, + nestedStateOutput, + }, + { + &format.StateOpts{ + State: deposedState(t), + Color: color, + Schemas: testSchemas(), + }, + deposedNestedStateOutput, + }, + { + &format.StateOpts{ + State: onlyDeposedState(t), + Color: color, + Schemas: testSchemas(), + }, + onlyDeposedOutput, + }, + { + &format.StateOpts{ + State: stateWithMoreOutputs(t), + Color: color, + Schemas: testSchemas(), + }, + stateWithMoreOutputsOutput, + }, + } + + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + + root, outputs, err := jsonstate.MarshalForRenderer(&statefile.File{ + State: tt.State.State, + }, tt.State.Schemas) + + if err != nil { + t.Errorf("found err: %v", err) + return + } + + streams, done := terminal.StreamsForTesting(t) + renderer := Renderer{ + Colorize: color, + Streams: streams, + } + + renderer.RenderHumanState(State{ + StateFormatVersion: jsonstate.FormatVersion, + RootModule: root, + RootModuleOutputs: outputs, + ProviderFormatVersion: jsonprovider.FormatVersion, + ProviderSchemas: jsonprovider.MarshalForRenderer(tt.State.Schemas), + }) + + result := done(t).All() + if diff := cmp.Diff(result, tt.Want); diff != "" { + t.Errorf("wrong output\nexpected:\n%s\nactual:\n%s\ndiff:\n%s\n", tt.Want, result, diff) + } + }) + } +} + +func testProvider() *terraform.MockProvider { + p := new(terraform.MockProvider) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + + p.GetProviderSchemaResponse = testProviderSchema() + + return p +} + +func testProviderSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "woozles": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } +} + +func testSchemas() *terraform.Schemas { + provider := testProvider() + return &terraform.Schemas{ + Providers: map[addrs.Provider]*terraform.ProviderSchema{ + addrs.NewDefaultProvider("test"): provider.ProviderSchema(), + }, + } +} + +const basicStateOutput = `# data.test_data_source.data: +data "test_data_source" "data" { + compute = "sure" +} + +# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" +} + + +Outputs: + +bar = "bar value" +` + +const nestedStateOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} +` + +const deposedNestedStateOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} + +# test_resource.baz[0]: (deposed object 1234) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} +` + +const onlyDeposedOutput = `# test_resource.baz[0]: (deposed object 1234) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} + +# test_resource.baz[0]: (deposed object 5678) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} +` + +const stateWithMoreOutputsOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" +} + + +Outputs: + +bool_var = true +int_var = 42 +map_var = { + "first" = "foo" + "second" = "bar" +} +sensitive_var = (sensitive value) +string_var = "string value" +` + +func basicState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetLocalValue("foo", cty.StringVal("foo value")) + rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_data_source", + Name: "data", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"compute":"sure"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func stateWithMoreOutputs(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetOutputValue("string_var", cty.StringVal("string value"), false) + rootModule.SetOutputValue("int_var", cty.NumberIntVal(42), false) + rootModule.SetOutputValue("bool_var", cty.BoolVal(true), false) + rootModule.SetOutputValue("sensitive_var", cty.StringVal("secret!!!"), true) + rootModule.SetOutputValue("map_var", cty.MapVal(map[string]cty.Value{ + "first": cty.StringVal("foo"), + "second": cty.StringVal("bar"), + }), false) + + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func nestedState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func deposedState(t *testing.T) *states.State { + state := nestedState(t) + rootModule := state.RootModule() + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("1234"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +// replicate a corrupt resource where only a deposed exists +func onlyDeposedState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("1234"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("5678"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} diff --git a/internal/command/jsonformat/structured/attribute_path/matcher.go b/command/jsonformat/structured/attribute_path/matcher.go similarity index 100% rename from internal/command/jsonformat/structured/attribute_path/matcher.go rename to command/jsonformat/structured/attribute_path/matcher.go diff --git a/internal/command/jsonformat/structured/attribute_path/matcher_test.go b/command/jsonformat/structured/attribute_path/matcher_test.go similarity index 100% rename from internal/command/jsonformat/structured/attribute_path/matcher_test.go rename to command/jsonformat/structured/attribute_path/matcher_test.go diff --git a/command/jsonformat/structured/change.go b/command/jsonformat/structured/change.go new file mode 100644 index 000000000000..ed3ed026af43 --- /dev/null +++ b/command/jsonformat/structured/change.go @@ -0,0 +1,277 @@ +package structured + +import ( + "encoding/json" + "reflect" + + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" + "github.com/hashicorp/terraform/command/jsonplan" + "github.com/hashicorp/terraform/command/jsonstate" + viewsjson "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/plans" +) + +// Change contains the unmarshalled generic interface{} types that are output by +// the JSON functions in the various json packages (such as jsonplan and +// jsonprovider). +// +// A Change can be converted into a computed.Diff, ready for rendering, with the +// ComputeDiffForAttribute, ComputeDiffForOutput, and ComputeDiffForBlock +// functions. +// +// The Before and After fields are actually go-cty values, but we cannot convert +// them directly because of the Terraform Cloud redacted endpoint. The redacted +// endpoint turns sensitive values into strings regardless of their types. +// Because of this, we cannot just do a direct conversion using the ctyjson +// package. We would have to iterate through the schema first, find the +// sensitive values and their mapped types, update the types inside the schema +// to strings, and then go back and do the overall conversion. This isn't +// including any of the more complicated parts around what happens if something +// was sensitive before and isn't sensitive after or vice versa. This would mean +// the type would need to change between the before and after value. It is in +// fact just easier to iterate through the values as generic JSON interfaces. +type Change struct { + + // BeforeExplicit matches AfterExplicit except references the Before value. + BeforeExplicit bool + + // AfterExplicit refers to whether the After value is explicit or + // implicit. It is explicit if it has been specified by the user, and + // implicit if it has been set as a consequence of other changes. + // + // For example, explicitly setting a value to null in a list should result + // in After being null and AfterExplicit being true. In comparison, + // removing an element from a list should also result in After being null + // and AfterExplicit being false. Without the explicit information our + // functions would not be able to tell the difference between these two + // cases. + AfterExplicit bool + + // Before contains the value before the proposed change. + // + // The type of the value should be informed by the schema and cast + // appropriately when needed. + Before interface{} + + // After contains the value after the proposed change. + // + // The type of the value should be informed by the schema and cast + // appropriately when needed. + After interface{} + + // Unknown describes whether the After value is known or unknown at the time + // of the plan. In practice, this means the after value should be rendered + // simply as `(known after apply)`. + // + // The concrete value could be a boolean describing whether the entirety of + // the After value is unknown, or it could be a list or a map depending on + // the schema describing whether specific elements or attributes within the + // value are unknown. + Unknown interface{} + + // BeforeSensitive matches Unknown, but references whether the Before value + // is sensitive. + BeforeSensitive interface{} + + // AfterSensitive matches Unknown, but references whether the After value is + // sensitive. + AfterSensitive interface{} + + // ReplacePaths contains a set of paths that point to attributes/elements + // that are causing the overall resource to be replaced rather than simply + // updated. + ReplacePaths attribute_path.Matcher + + // RelevantAttributes contains a set of paths that point attributes/elements + // that we should display. Any element/attribute not matched by this Matcher + // should be skipped. + RelevantAttributes attribute_path.Matcher +} + +// FromJsonChange unmarshals the raw []byte values in the jsonplan.Change +// structs into generic interface{} types that can be reasoned about. +func FromJsonChange(change jsonplan.Change, relevantAttributes attribute_path.Matcher) Change { + return Change{ + Before: unmarshalGeneric(change.Before), + After: unmarshalGeneric(change.After), + Unknown: unmarshalGeneric(change.AfterUnknown), + BeforeSensitive: unmarshalGeneric(change.BeforeSensitive), + AfterSensitive: unmarshalGeneric(change.AfterSensitive), + ReplacePaths: attribute_path.Parse(change.ReplacePaths, false), + RelevantAttributes: relevantAttributes, + } +} + +// FromJsonResource unmarshals the raw values in the jsonstate.Resource structs +// into generic interface{} types that can be reasoned about. +func FromJsonResource(resource jsonstate.Resource) Change { + return Change{ + // We model resource formatting as NoOps. + Before: unwrapAttributeValues(resource.AttributeValues), + After: unwrapAttributeValues(resource.AttributeValues), + + // We have some sensitive values, but we don't have any unknown values. + Unknown: false, + BeforeSensitive: unmarshalGeneric(resource.SensitiveValues), + AfterSensitive: unmarshalGeneric(resource.SensitiveValues), + + // We don't display replacement data for resources, and all attributes + // are relevant. + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + } +} + +// FromJsonOutput unmarshals the raw values in the jsonstate.Output structs into +// generic interface{} types that can be reasoned about. +func FromJsonOutput(output jsonstate.Output) Change { + return Change{ + // We model resource formatting as NoOps. + Before: unmarshalGeneric(output.Value), + After: unmarshalGeneric(output.Value), + + // We have some sensitive values, but we don't have any unknown values. + Unknown: false, + BeforeSensitive: output.Sensitive, + AfterSensitive: output.Sensitive, + + // We don't display replacement data for resources, and all attributes + // are relevant. + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + } +} + +// FromJsonViewsOutput unmarshals the raw values in the viewsjson.Output structs into +// generic interface{} types that can be reasoned about. +func FromJsonViewsOutput(output viewsjson.Output) Change { + return Change{ + // We model resource formatting as NoOps. + Before: unmarshalGeneric(output.Value), + After: unmarshalGeneric(output.Value), + + // We have some sensitive values, but we don't have any unknown values. + Unknown: false, + BeforeSensitive: output.Sensitive, + AfterSensitive: output.Sensitive, + + // We don't display replacement data for resources, and all attributes + // are relevant. + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + } +} + +// CalculateAction does a very simple analysis to make the best guess at the +// action this change describes. For complex types such as objects, maps, lists, +// or sets it is likely more efficient to work out the action directly instead +// of relying on this function. +func (change Change) CalculateAction() plans.Action { + if (change.Before == nil && !change.BeforeExplicit) && (change.After != nil || change.AfterExplicit) { + return plans.Create + } + if (change.After == nil && !change.AfterExplicit) && (change.Before != nil || change.BeforeExplicit) { + return plans.Delete + } + + if reflect.DeepEqual(change.Before, change.After) && change.AfterExplicit == change.BeforeExplicit && change.IsAfterSensitive() == change.IsBeforeSensitive() { + return plans.NoOp + } + + return plans.Update +} + +// GetDefaultActionForIteration is used to guess what the change could be for +// complex attributes (collections and objects) and blocks. +// +// You can't really tell the difference between a NoOp and an Update just by +// looking at the attribute itself as you need to inspect the children. +// +// This function returns a Delete or a Create action if the before or after +// values were null, and returns a NoOp for all other cases. It should be used +// in conjunction with compareActions to calculate the actual action based on +// the actions of the children. +func (change Change) GetDefaultActionForIteration() plans.Action { + if change.Before == nil && change.After == nil { + return plans.NoOp + } + + if change.Before == nil { + return plans.Create + } + if change.After == nil { + return plans.Delete + } + return plans.NoOp +} + +// AsNoOp returns the current change as if it is a NoOp operation. +// +// Basically it replaces all the after values with the before values. +func (change Change) AsNoOp() Change { + return Change{ + BeforeExplicit: change.BeforeExplicit, + AfterExplicit: change.BeforeExplicit, + Before: change.Before, + After: change.Before, + Unknown: false, + BeforeSensitive: change.BeforeSensitive, + AfterSensitive: change.BeforeSensitive, + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +// AsDelete returns the current change as if it is a Delete operation. +// +// Basically it replaces all the after values with nil or false. +func (change Change) AsDelete() Change { + return Change{ + BeforeExplicit: change.BeforeExplicit, + AfterExplicit: false, + Before: change.Before, + After: nil, + Unknown: nil, + BeforeSensitive: change.BeforeSensitive, + AfterSensitive: nil, + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +// AsCreate returns the current change as if it is a Create operation. +// +// Basically it replaces all the before values with nil or false. +func (change Change) AsCreate() Change { + return Change{ + BeforeExplicit: false, + AfterExplicit: change.AfterExplicit, + Before: nil, + After: change.After, + Unknown: change.Unknown, + BeforeSensitive: nil, + AfterSensitive: change.AfterSensitive, + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +func unmarshalGeneric(raw json.RawMessage) interface{} { + if raw == nil { + return nil + } + + var out interface{} + if err := json.Unmarshal(raw, &out); err != nil { + panic("unrecognized json type: " + err.Error()) + } + return out +} + +func unwrapAttributeValues(values jsonstate.AttributeValues) map[string]interface{} { + out := make(map[string]interface{}) + for key, value := range values { + out[key] = unmarshalGeneric(value) + } + return out +} diff --git a/internal/command/jsonformat/structured/doc.go b/command/jsonformat/structured/doc.go similarity index 100% rename from internal/command/jsonformat/structured/doc.go rename to command/jsonformat/structured/doc.go diff --git a/command/jsonformat/structured/map.go b/command/jsonformat/structured/map.go new file mode 100644 index 000000000000..ea1ea24c3a8e --- /dev/null +++ b/command/jsonformat/structured/map.go @@ -0,0 +1,160 @@ +package structured + +import ( + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" +) + +// ChangeMap is a Change that represents a Map or an Object type, and has +// converted the relevant interfaces into maps for easier access. +type ChangeMap struct { + // Before contains the value before the proposed change. + Before map[string]interface{} + + // After contains the value after the proposed change. + After map[string]interface{} + + // Unknown contains the unknown status of any elements/attributes of this + // map/object. + Unknown map[string]interface{} + + // BeforeSensitive contains the before sensitive status of any + // elements/attributes of this map/object. + BeforeSensitive map[string]interface{} + + // AfterSensitive contains the after sensitive status of any + // elements/attributes of this map/object. + AfterSensitive map[string]interface{} + + // ReplacePaths matches the same attributes in Change exactly. + ReplacePaths attribute_path.Matcher + + // RelevantAttributes matches the same attributes in Change exactly. + RelevantAttributes attribute_path.Matcher +} + +// AsMap converts the Change into an object or map representation by converting +// the internal Before, After, Unknown, BeforeSensitive, and AfterSensitive +// data structures into generic maps. +func (change Change) AsMap() ChangeMap { + return ChangeMap{ + Before: genericToMap(change.Before), + After: genericToMap(change.After), + Unknown: genericToMap(change.Unknown), + BeforeSensitive: genericToMap(change.BeforeSensitive), + AfterSensitive: genericToMap(change.AfterSensitive), + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +// GetChild safely packages up a Change object for the given child, handling +// all the cases where the data might be null or a static boolean. +func (m ChangeMap) GetChild(key string) Change { + before, beforeExplicit := getFromGenericMap(m.Before, key) + after, afterExplicit := getFromGenericMap(m.After, key) + unknown, _ := getFromGenericMap(m.Unknown, key) + beforeSensitive, _ := getFromGenericMap(m.BeforeSensitive, key) + afterSensitive, _ := getFromGenericMap(m.AfterSensitive, key) + + return Change{ + BeforeExplicit: beforeExplicit, + AfterExplicit: afterExplicit, + Before: before, + After: after, + Unknown: unknown, + BeforeSensitive: beforeSensitive, + AfterSensitive: afterSensitive, + ReplacePaths: m.ReplacePaths.GetChildWithKey(key), + RelevantAttributes: m.RelevantAttributes.GetChildWithKey(key), + } +} + +// ExplicitKeys returns the keys in the Before and After, as opposed to AllKeys +// which also includes keys from the additional meta structures (like the +// sensitive and unknown values). +// +// This function is useful for processing nested attributes and repeated blocks +// where the unknown and sensitive structs contain information about the actual +// attributes, while the before and after structs hold the actual nested values. +func (m ChangeMap) ExplicitKeys() []string { + keys := make(map[string]bool) + for before := range m.Before { + if _, ok := keys[before]; ok { + continue + } + keys[before] = true + } + for after := range m.After { + if _, ok := keys[after]; ok { + continue + } + keys[after] = true + } + + var dedupedKeys []string + for key := range keys { + dedupedKeys = append(dedupedKeys, key) + } + return dedupedKeys +} + +// AllKeys returns all the possible keys for this map. The keys for the map are +// potentially hidden and spread across multiple internal data structures and +// so this function conveniently packages them up. +func (m ChangeMap) AllKeys() []string { + keys := make(map[string]bool) + for before := range m.Before { + if _, ok := keys[before]; ok { + continue + } + keys[before] = true + } + for after := range m.After { + if _, ok := keys[after]; ok { + continue + } + keys[after] = true + } + for unknown := range m.Unknown { + if _, ok := keys[unknown]; ok { + continue + } + keys[unknown] = true + } + for sensitive := range m.AfterSensitive { + if _, ok := keys[sensitive]; ok { + continue + } + keys[sensitive] = true + } + for sensitive := range m.BeforeSensitive { + if _, ok := keys[sensitive]; ok { + continue + } + keys[sensitive] = true + } + + var dedupedKeys []string + for key := range keys { + dedupedKeys = append(dedupedKeys, key) + } + return dedupedKeys +} + +func getFromGenericMap(generic map[string]interface{}, key string) (interface{}, bool) { + if generic == nil { + return nil, false + } + + if child, ok := generic[key]; ok { + return child, ok + } + return nil, false +} + +func genericToMap(generic interface{}) map[string]interface{} { + if concrete, ok := generic.(map[string]interface{}); ok { + return concrete + } + return nil +} diff --git a/command/jsonformat/structured/sensitive.go b/command/jsonformat/structured/sensitive.go new file mode 100644 index 000000000000..1e9098ffe096 --- /dev/null +++ b/command/jsonformat/structured/sensitive.go @@ -0,0 +1,89 @@ +package structured + +import ( + "github.com/hashicorp/terraform/command/jsonformat/computed" + "github.com/hashicorp/terraform/plans" +) + +type ProcessSensitiveInner func(change Change) computed.Diff +type CreateSensitiveDiff func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff + +func (change Change) IsBeforeSensitive() bool { + if sensitive, ok := change.BeforeSensitive.(bool); ok { + return sensitive + } + return false +} + +func (change Change) IsAfterSensitive() bool { + if sensitive, ok := change.AfterSensitive.(bool); ok { + return sensitive + } + return false +} + +// CheckForSensitive is a helper function that handles all common functionality +// for processing a sensitive value. +// +// It returns the computed sensitive diff and true if this value was sensitive +// and needs to be rendered as such, otherwise it returns the second return +// value as false and the first value can be discarded. +// +// The actual processing of sensitive values happens within the +// ProcessSensitiveInner and CreateSensitiveDiff functions. Callers should +// implement these functions as appropriate when using this function. +// +// The ProcessSensitiveInner function should simply return a computed.Diff for +// the provided Change. The provided Change will be the same as the original +// change but with the sensitive metadata removed. The new inner diff is then +// passed into the actual CreateSensitiveDiff function which should return the +// actual sensitive diff. +// +// We include the inner change into the sensitive diff as a way to let the +// sensitive renderer have as much information as possible, while still letting +// it do the actual rendering. +func (change Change) CheckForSensitive(processInner ProcessSensitiveInner, createDiff CreateSensitiveDiff) (computed.Diff, bool) { + beforeSensitive := change.IsBeforeSensitive() + afterSensitive := change.IsAfterSensitive() + + if !beforeSensitive && !afterSensitive { + return computed.Diff{}, false + } + + // We are still going to give the change the contents of the actual change. + // So we create a new Change with everything matching the current value, + // except for the sensitivity. + // + // The change can choose what to do with this information, in most cases + // it will just be ignored in favour of printing `(sensitive value)`. + + value := Change{ + BeforeExplicit: change.BeforeExplicit, + AfterExplicit: change.AfterExplicit, + Before: change.Before, + After: change.After, + Unknown: change.Unknown, + BeforeSensitive: false, + AfterSensitive: false, + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } + + inner := processInner(value) + + action := inner.Action + sensitiveStatusChanged := beforeSensitive != afterSensitive + + // nullNoOp is a stronger NoOp, where not only is there no change happening + // but the before and after values are not explicitly set and are both + // null. This will override even the sensitive state changing. + nullNoOp := change.Before == nil && !change.BeforeExplicit && change.After == nil && !change.AfterExplicit + + if action == plans.NoOp && sensitiveStatusChanged && !nullNoOp { + // Let's override this, since it means the sensitive status has changed + // rather than the actual content of the value. + action = plans.Update + } + + return createDiff(inner, beforeSensitive, afterSensitive, action), true +} diff --git a/command/jsonformat/structured/slice.go b/command/jsonformat/structured/slice.go new file mode 100644 index 000000000000..9ed5da354ca0 --- /dev/null +++ b/command/jsonformat/structured/slice.go @@ -0,0 +1,91 @@ +package structured + +import ( + "github.com/hashicorp/terraform/command/jsonformat/structured/attribute_path" +) + +// ChangeSlice is a Change that represents a Tuple, Set, or List type, and has +// converted the relevant interfaces into slices for easier access. +type ChangeSlice struct { + // Before contains the value before the proposed change. + Before []interface{} + + // After contains the value after the proposed change. + After []interface{} + + // Unknown contains the unknown status of any elements of this list/set. + Unknown []interface{} + + // BeforeSensitive contains the before sensitive status of any elements of + //this list/set. + BeforeSensitive []interface{} + + // AfterSensitive contains the after sensitive status of any elements of + //this list/set. + AfterSensitive []interface{} + + // ReplacePaths matches the same attributes in Change exactly. + ReplacePaths attribute_path.Matcher + + // RelevantAttributes matches the same attributes in Change exactly. + RelevantAttributes attribute_path.Matcher +} + +// AsSlice converts the Change into a slice representation by converting the +// internal Before, After, Unknown, BeforeSensitive, and AfterSensitive data +// structures into generic slices. +func (change Change) AsSlice() ChangeSlice { + return ChangeSlice{ + Before: genericToSlice(change.Before), + After: genericToSlice(change.After), + Unknown: genericToSlice(change.Unknown), + BeforeSensitive: genericToSlice(change.BeforeSensitive), + AfterSensitive: genericToSlice(change.AfterSensitive), + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +// GetChild safely packages up a Change object for the given child, handling +// all the cases where the data might be null or a static boolean. +func (s ChangeSlice) GetChild(beforeIx, afterIx int) Change { + before, beforeExplicit := getFromGenericSlice(s.Before, beforeIx) + after, afterExplicit := getFromGenericSlice(s.After, afterIx) + unknown, _ := getFromGenericSlice(s.Unknown, afterIx) + beforeSensitive, _ := getFromGenericSlice(s.BeforeSensitive, beforeIx) + afterSensitive, _ := getFromGenericSlice(s.AfterSensitive, afterIx) + + mostRelevantIx := beforeIx + if beforeIx < 0 || beforeIx >= len(s.Before) { + mostRelevantIx = afterIx + } + + return Change{ + BeforeExplicit: beforeExplicit, + AfterExplicit: afterExplicit, + Before: before, + After: after, + Unknown: unknown, + BeforeSensitive: beforeSensitive, + AfterSensitive: afterSensitive, + ReplacePaths: s.ReplacePaths.GetChildWithIndex(mostRelevantIx), + RelevantAttributes: s.RelevantAttributes.GetChildWithIndex(mostRelevantIx), + } +} + +func getFromGenericSlice(generic []interface{}, ix int) (interface{}, bool) { + if generic == nil { + return nil, false + } + if ix < 0 || ix >= len(generic) { + return nil, false + } + return generic[ix], true +} + +func genericToSlice(generic interface{}) []interface{} { + if concrete, ok := generic.([]interface{}); ok { + return concrete + } + return nil +} diff --git a/command/jsonformat/structured/unknown.go b/command/jsonformat/structured/unknown.go new file mode 100644 index 000000000000..fe61582e3a65 --- /dev/null +++ b/command/jsonformat/structured/unknown.go @@ -0,0 +1,62 @@ +package structured + +import ( + "github.com/hashicorp/terraform/command/jsonformat/computed" +) + +type ProcessUnknown func(current Change) computed.Diff +type ProcessUnknownWithBefore func(current Change, before Change) computed.Diff + +func (change Change) IsUnknown() bool { + if unknown, ok := change.Unknown.(bool); ok { + return unknown + } + return false +} + +// CheckForUnknown is a helper function that handles all common functionality +// for processing an unknown value. +// +// It returns the computed unknown diff and true if this value was unknown and +// needs to be rendered as such, otherwise it returns the second return value as +// false and the first return value should be discarded. +// +// The actual processing of unknown values happens in the ProcessUnknown and +// ProcessUnknownWithBefore functions. If a value is unknown and is being +// created, the ProcessUnknown function is called and the caller should decide +// how to create the unknown value. If a value is being updated the +// ProcessUnknownWithBefore function is called and the function provides the +// before value as if it is being deleted for the caller to handle. Note that +// values being deleted will never be marked as unknown so this case isn't +// handled. +// +// The childUnknown argument is meant to allow callers with extra information +// about the type being processed to provide a list of known children that might +// not be present in the before or after values. These values will be propagated +// as the unknown values in the before value should it be needed. +func (change Change) CheckForUnknown(childUnknown interface{}, process ProcessUnknown, processBefore ProcessUnknownWithBefore) (computed.Diff, bool) { + unknown := change.IsUnknown() + + if !unknown { + return computed.Diff{}, false + } + + // No matter what we do here, we want to treat the after value as explicit. + // This is because it is going to be null in the value, and we don't want + // the functions in this package to assume this means it has been deleted. + change.AfterExplicit = true + + if change.Before == nil { + return process(change), true + } + + // If we get here, then we have a before value. We're going to model a + // delete operation and our renderer later can render the overall change + // accurately. + before := change.AsDelete() + + // We also let our callers override the unknown values in any before, this + // is the renderers can display them as being computed instead of deleted. + before.Unknown = childUnknown + return processBefore(change, before), true +} diff --git a/internal/command/jsonfunction/function.go b/command/jsonfunction/function.go similarity index 98% rename from internal/command/jsonfunction/function.go rename to command/jsonfunction/function.go index 99a78c92cf14..d72e464d6f45 100644 --- a/internal/command/jsonfunction/function.go +++ b/command/jsonfunction/function.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/function" ) diff --git a/internal/command/jsonfunction/function_test.go b/command/jsonfunction/function_test.go similarity index 100% rename from internal/command/jsonfunction/function_test.go rename to command/jsonfunction/function_test.go diff --git a/internal/command/jsonfunction/parameter.go b/command/jsonfunction/parameter.go similarity index 100% rename from internal/command/jsonfunction/parameter.go rename to command/jsonfunction/parameter.go diff --git a/internal/command/jsonfunction/parameter_test.go b/command/jsonfunction/parameter_test.go similarity index 100% rename from internal/command/jsonfunction/parameter_test.go rename to command/jsonfunction/parameter_test.go diff --git a/internal/command/jsonfunction/return_type.go b/command/jsonfunction/return_type.go similarity index 100% rename from internal/command/jsonfunction/return_type.go rename to command/jsonfunction/return_type.go diff --git a/internal/command/jsonplan/doc.go b/command/jsonplan/doc.go similarity index 100% rename from internal/command/jsonplan/doc.go rename to command/jsonplan/doc.go diff --git a/internal/command/jsonplan/module.go b/command/jsonplan/module.go similarity index 100% rename from internal/command/jsonplan/module.go rename to command/jsonplan/module.go diff --git a/command/jsonplan/plan.go b/command/jsonplan/plan.go new file mode 100644 index 000000000000..3c5d7f3404cf --- /dev/null +++ b/command/jsonplan/plan.go @@ -0,0 +1,864 @@ +package jsonplan + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/jsonchecks" + "github.com/hashicorp/terraform/command/jsonconfig" + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/version" +) + +// FormatVersion represents the version of the json format and will be +// incremented for any change to this format that requires changes to a +// consuming parser. +const ( + FormatVersion = "1.1" + + ResourceInstanceReplaceBecauseCannotUpdate = "replace_because_cannot_update" + ResourceInstanceReplaceBecauseTainted = "replace_because_tainted" + ResourceInstanceReplaceByRequest = "replace_by_request" + ResourceInstanceReplaceByTriggers = "replace_by_triggers" + ResourceInstanceDeleteBecauseNoResourceConfig = "delete_because_no_resource_config" + ResourceInstanceDeleteBecauseWrongRepetition = "delete_because_wrong_repetition" + ResourceInstanceDeleteBecauseCountIndex = "delete_because_count_index" + ResourceInstanceDeleteBecauseEachKey = "delete_because_each_key" + ResourceInstanceDeleteBecauseNoModule = "delete_because_no_module" + ResourceInstanceDeleteBecauseNoMoveTarget = "delete_because_no_move_target" + ResourceInstanceReadBecauseConfigUnknown = "read_because_config_unknown" + ResourceInstanceReadBecauseDependencyPending = "read_because_dependency_pending" +) + +// Plan is the top-level representation of the json format of a plan. It includes +// the complete config and current state. +type plan struct { + FormatVersion string `json:"format_version,omitempty"` + TerraformVersion string `json:"terraform_version,omitempty"` + Variables variables `json:"variables,omitempty"` + PlannedValues stateValues `json:"planned_values,omitempty"` + // ResourceDrift and ResourceChanges are sorted in a user-friendly order + // that is undefined at this time, but consistent. + ResourceDrift []ResourceChange `json:"resource_drift,omitempty"` + ResourceChanges []ResourceChange `json:"resource_changes,omitempty"` + OutputChanges map[string]Change `json:"output_changes,omitempty"` + PriorState json.RawMessage `json:"prior_state,omitempty"` + Config json.RawMessage `json:"configuration,omitempty"` + RelevantAttributes []ResourceAttr `json:"relevant_attributes,omitempty"` + Checks json.RawMessage `json:"checks,omitempty"` +} + +func newPlan() *plan { + return &plan{ + FormatVersion: FormatVersion, + } +} + +// ResourceAttr contains the address and attribute of an external for the +// RelevantAttributes in the plan. +type ResourceAttr struct { + Resource string `json:"resource"` + Attr json.RawMessage `json:"attribute"` +} + +// Change is the representation of a proposed change for an object. +type Change struct { + // Actions are the actions that will be taken on the object selected by the + // properties below. Valid actions values are: + // ["no-op"] + // ["create"] + // ["read"] + // ["update"] + // ["delete", "create"] + // ["create", "delete"] + // ["delete"] + // The two "replace" actions are represented in this way to allow callers to + // e.g. just scan the list for "delete" to recognize all three situations + // where the object will be deleted, allowing for any new deletion + // combinations that might be added in future. + Actions []string `json:"actions,omitempty"` + + // Before and After are representations of the object value both before and + // after the action. For ["create"] and ["delete"] actions, either "before" + // or "after" is unset (respectively). For ["no-op"], the before and after + // values are identical. The "after" value will be incomplete if there are + // values within it that won't be known until after apply. + Before json.RawMessage `json:"before,omitempty"` + After json.RawMessage `json:"after,omitempty"` + + // AfterUnknown is an object value with similar structure to After, but + // with all unknown leaf values replaced with true, and all known leaf + // values omitted. This can be combined with After to reconstruct a full + // value after the action, including values which will only be known after + // apply. + AfterUnknown json.RawMessage `json:"after_unknown,omitempty"` + + // BeforeSensitive and AfterSensitive are object values with similar + // structure to Before and After, but with all sensitive leaf values + // replaced with true, and all non-sensitive leaf values omitted. These + // objects should be combined with Before and After to prevent accidental + // display of sensitive values in user interfaces. + BeforeSensitive json.RawMessage `json:"before_sensitive,omitempty"` + AfterSensitive json.RawMessage `json:"after_sensitive,omitempty"` + + // ReplacePaths is an array of arrays representing a set of paths into the + // object value which resulted in the action being "replace". This will be + // omitted if the action is not replace, or if no paths caused the + // replacement (for example, if the resource was tainted). Each path + // consists of one or more steps, each of which will be a number or a + // string. + ReplacePaths json.RawMessage `json:"replace_paths,omitempty"` +} + +type output struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +// variables is the JSON representation of the variables provided to the current +// plan. +type variables map[string]*variable + +type variable struct { + Value json.RawMessage `json:"value,omitempty"` +} + +// MarshalForRenderer returns the pre-json encoding changes of the requested +// plan, in a format available to the structured renderer. +// +// This function does a small part of the Marshal function, as it only returns +// the part of the plan required by the jsonformat.Plan renderer. +func MarshalForRenderer( + p *plans.Plan, + schemas *terraform.Schemas, +) (map[string]Change, []ResourceChange, []ResourceChange, []ResourceAttr, error) { + output := newPlan() + + var err error + if output.OutputChanges, err = MarshalOutputChanges(p.Changes); err != nil { + return nil, nil, nil, nil, err + } + + if output.ResourceChanges, err = MarshalResourceChanges(p.Changes.Resources, schemas); err != nil { + return nil, nil, nil, nil, err + } + + if len(p.DriftedResources) > 0 { + // In refresh-only mode, we render all resources marked as drifted, + // including those which have moved without other changes. In other plan + // modes, move-only changes will be included in the planned changes, so + // we skip them here. + var driftedResources []*plans.ResourceInstanceChangeSrc + if p.UIMode == plans.RefreshOnlyMode { + driftedResources = p.DriftedResources + } else { + for _, dr := range p.DriftedResources { + if dr.Action != plans.NoOp { + driftedResources = append(driftedResources, dr) + } + } + } + output.ResourceDrift, err = MarshalResourceChanges(driftedResources, schemas) + if err != nil { + return nil, nil, nil, nil, err + } + } + + if err := output.marshalRelevantAttrs(p); err != nil { + return nil, nil, nil, nil, err + } + + return output.OutputChanges, output.ResourceChanges, output.ResourceDrift, output.RelevantAttributes, nil +} + +// Marshal returns the json encoding of a terraform plan. +func Marshal( + config *configs.Config, + p *plans.Plan, + sf *statefile.File, + schemas *terraform.Schemas, +) ([]byte, error) { + output := newPlan() + output.TerraformVersion = version.String() + + err := output.marshalPlanVariables(p.VariableValues, config.Module.Variables) + if err != nil { + return nil, fmt.Errorf("error in marshalPlanVariables: %s", err) + } + + // output.PlannedValues + err = output.marshalPlannedValues(p.Changes, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshalPlannedValues: %s", err) + } + + // output.ResourceDrift + if len(p.DriftedResources) > 0 { + // In refresh-only mode, we render all resources marked as drifted, + // including those which have moved without other changes. In other plan + // modes, move-only changes will be included in the planned changes, so + // we skip them here. + var driftedResources []*plans.ResourceInstanceChangeSrc + if p.UIMode == plans.RefreshOnlyMode { + driftedResources = p.DriftedResources + } else { + for _, dr := range p.DriftedResources { + if dr.Action != plans.NoOp { + driftedResources = append(driftedResources, dr) + } + } + } + output.ResourceDrift, err = MarshalResourceChanges(driftedResources, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshaling resource drift: %s", err) + } + } + + if err := output.marshalRelevantAttrs(p); err != nil { + return nil, fmt.Errorf("error marshaling relevant attributes for external changes: %s", err) + } + + // output.ResourceChanges + if p.Changes != nil { + output.ResourceChanges, err = MarshalResourceChanges(p.Changes.Resources, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshaling resource changes: %s", err) + } + } + + // output.OutputChanges + if output.OutputChanges, err = MarshalOutputChanges(p.Changes); err != nil { + return nil, fmt.Errorf("error in marshaling output changes: %s", err) + } + + // output.Checks + if p.Checks != nil && p.Checks.ConfigResults.Len() > 0 { + output.Checks = jsonchecks.MarshalCheckStates(p.Checks) + } + + // output.PriorState + if sf != nil && !sf.State.Empty() { + output.PriorState, err = jsonstate.Marshal(sf, schemas) + if err != nil { + return nil, fmt.Errorf("error marshaling prior state: %s", err) + } + } + + // output.Config + output.Config, err = jsonconfig.Marshal(config, schemas) + if err != nil { + return nil, fmt.Errorf("error marshaling config: %s", err) + } + + ret, err := json.Marshal(output) + return ret, err +} + +func (p *plan) marshalPlanVariables(vars map[string]plans.DynamicValue, decls map[string]*configs.Variable) error { + p.Variables = make(variables, len(vars)) + + for k, v := range vars { + val, err := v.Decode(cty.DynamicPseudoType) + if err != nil { + return err + } + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + return err + } + p.Variables[k] = &variable{ + Value: valJSON, + } + } + + // In Terraform v1.1 and earlier we had some confusion about which subsystem + // of Terraform was the one responsible for substituting in default values + // for unset module variables, with root module variables being handled in + // three different places while child module variables were only handled + // during the Terraform Core graph walk. + // + // For Terraform v1.2 and later we rationalized that by having the Terraform + // Core graph walk always be responsible for selecting defaults regardless + // of root vs. child module, but unfortunately our earlier accidental + // misbehavior bled out into the public interface by making the defaults + // show up in the "vars" map to this function. Those are now correctly + // omitted (so that the plan file only records the variables _actually_ + // set by the caller) but consumers of the JSON plan format may be depending + // on our old behavior and so we'll fake it here just in time so that + // outside consumers won't see a behavior change. + for name, decl := range decls { + if _, ok := p.Variables[name]; ok { + continue + } + if val := decl.Default; val != cty.NilVal { + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + return err + } + p.Variables[name] = &variable{ + Value: valJSON, + } + } + } + + if len(p.Variables) == 0 { + p.Variables = nil // omit this property if there are no variables to describe + } + + return nil +} + +// MarshalResourceChanges converts the provided internal representation of +// ResourceInstanceChangeSrc objects into the public structured JSON changes. +// +// This function is referenced directly from the structured renderer tests, to +// ensure parity between the renderers. It probably shouldn't be used anywhere +// else. +func MarshalResourceChanges(resources []*plans.ResourceInstanceChangeSrc, schemas *terraform.Schemas) ([]ResourceChange, error) { + var ret []ResourceChange + + var sortedResources []*plans.ResourceInstanceChangeSrc + sortedResources = append(sortedResources, resources...) + sort.Slice(sortedResources, func(i, j int) bool { + if !sortedResources[i].Addr.Equal(sortedResources[j].Addr) { + return sortedResources[i].Addr.Less(sortedResources[j].Addr) + } + return sortedResources[i].DeposedKey < sortedResources[j].DeposedKey + }) + + for _, rc := range sortedResources { + var r ResourceChange + addr := rc.Addr + r.Address = addr.String() + if !addr.Equal(rc.PrevRunAddr) { + r.PreviousAddress = rc.PrevRunAddr.String() + } + + dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode + // We create "delete" actions for data resources so we can clean up + // their entries in state, but this is an implementation detail that + // users shouldn't see. + if dataSource && rc.Action == plans.Delete { + continue + } + + schema, _ := schemas.ResourceTypeConfig( + rc.ProviderAddr.Provider, + addr.Resource.Resource.Mode, + addr.Resource.Resource.Type, + ) + if schema == nil { + return nil, fmt.Errorf("no schema found for %s (in provider %s)", r.Address, rc.ProviderAddr.Provider) + } + + changeV, err := rc.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + // We drop the marks from the change, as decoding is only an + // intermediate step to re-encode the values as json + changeV.Before, _ = changeV.Before.UnmarkDeep() + changeV.After, _ = changeV.After.UnmarkDeep() + + var before, after []byte + var beforeSensitive, afterSensitive []byte + var afterUnknown cty.Value + + if changeV.Before != cty.NilVal { + before, err = ctyjson.Marshal(changeV.Before, changeV.Before.Type()) + if err != nil { + return nil, err + } + marks := rc.BeforeValMarks + if schema.ContainsSensitive() { + marks = append(marks, schema.ValueMarks(changeV.Before, nil)...) + } + bs := jsonstate.SensitiveAsBool(changeV.Before.MarkWithPaths(marks)) + beforeSensitive, err = ctyjson.Marshal(bs, bs.Type()) + if err != nil { + return nil, err + } + } + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) + if err != nil { + return nil, err + } + afterUnknown = cty.EmptyObjectVal + } else { + filteredAfter := omitUnknowns(changeV.After) + if filteredAfter.IsNull() { + after = nil + } else { + after, err = ctyjson.Marshal(filteredAfter, filteredAfter.Type()) + if err != nil { + return nil, err + } + } + afterUnknown = unknownAsBool(changeV.After) + } + marks := rc.AfterValMarks + if schema.ContainsSensitive() { + marks = append(marks, schema.ValueMarks(changeV.After, nil)...) + } + as := jsonstate.SensitiveAsBool(changeV.After.MarkWithPaths(marks)) + afterSensitive, err = ctyjson.Marshal(as, as.Type()) + if err != nil { + return nil, err + } + } + + a, err := ctyjson.Marshal(afterUnknown, afterUnknown.Type()) + if err != nil { + return nil, err + } + replacePaths, err := encodePaths(rc.RequiredReplace) + if err != nil { + return nil, err + } + + r.Change = Change{ + Actions: actionString(rc.Action.String()), + Before: json.RawMessage(before), + After: json.RawMessage(after), + AfterUnknown: a, + BeforeSensitive: json.RawMessage(beforeSensitive), + AfterSensitive: json.RawMessage(afterSensitive), + ReplacePaths: replacePaths, + } + + if rc.DeposedKey != states.NotDeposed { + r.Deposed = rc.DeposedKey.String() + } + + key := addr.Resource.Key + if key != nil { + value := key.Value() + if r.Index, err = ctyjson.Marshal(value, value.Type()); err != nil { + return nil, err + } + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + r.Mode = jsonstate.ManagedResourceMode + case addrs.DataResourceMode: + r.Mode = jsonstate.DataResourceMode + default: + return nil, fmt.Errorf("resource %s has an unsupported mode %s", r.Address, addr.Resource.Resource.Mode.String()) + } + r.ModuleAddress = addr.Module.String() + r.Name = addr.Resource.Resource.Name + r.Type = addr.Resource.Resource.Type + r.ProviderName = rc.ProviderAddr.Provider.String() + + switch rc.ActionReason { + case plans.ResourceInstanceChangeNoReason: + r.ActionReason = "" // will be omitted in output + case plans.ResourceInstanceReplaceBecauseCannotUpdate: + r.ActionReason = ResourceInstanceReplaceBecauseCannotUpdate + case plans.ResourceInstanceReplaceBecauseTainted: + r.ActionReason = ResourceInstanceReplaceBecauseTainted + case plans.ResourceInstanceReplaceByRequest: + r.ActionReason = ResourceInstanceReplaceByRequest + case plans.ResourceInstanceReplaceByTriggers: + r.ActionReason = ResourceInstanceReplaceByTriggers + case plans.ResourceInstanceDeleteBecauseNoResourceConfig: + r.ActionReason = ResourceInstanceDeleteBecauseNoResourceConfig + case plans.ResourceInstanceDeleteBecauseWrongRepetition: + r.ActionReason = ResourceInstanceDeleteBecauseWrongRepetition + case plans.ResourceInstanceDeleteBecauseCountIndex: + r.ActionReason = ResourceInstanceDeleteBecauseCountIndex + case plans.ResourceInstanceDeleteBecauseEachKey: + r.ActionReason = ResourceInstanceDeleteBecauseEachKey + case plans.ResourceInstanceDeleteBecauseNoModule: + r.ActionReason = ResourceInstanceDeleteBecauseNoModule + case plans.ResourceInstanceDeleteBecauseNoMoveTarget: + r.ActionReason = ResourceInstanceDeleteBecauseNoMoveTarget + case plans.ResourceInstanceReadBecauseConfigUnknown: + r.ActionReason = ResourceInstanceReadBecauseConfigUnknown + case plans.ResourceInstanceReadBecauseDependencyPending: + r.ActionReason = ResourceInstanceReadBecauseDependencyPending + default: + return nil, fmt.Errorf("resource %s has an unsupported action reason %s", r.Address, rc.ActionReason) + } + + ret = append(ret, r) + + } + + return ret, nil +} + +// MarshalOutputChanges converts the provided internal representation of +// Changes objects into the structured JSON representation. +// +// This function is referenced directly from the structured renderer tests, to +// ensure parity between the renderers. It probably shouldn't be used anywhere +// else. +func MarshalOutputChanges(changes *plans.Changes) (map[string]Change, error) { + if changes == nil { + // Nothing to do! + return nil, nil + } + + outputChanges := make(map[string]Change, len(changes.Outputs)) + for _, oc := range changes.Outputs { + + // Skip output changes that are not from the root module. + // These are automatically stripped from plans that are written to disk + // elsewhere, we just need to duplicate the logic here in case anyone + // is converting this plan directly from memory. + if !oc.Addr.Module.IsRoot() { + continue + } + + changeV, err := oc.Decode() + if err != nil { + return nil, err + } + // We drop the marks from the change, as decoding is only an + // intermediate step to re-encode the values as json + changeV.Before, _ = changeV.Before.UnmarkDeep() + changeV.After, _ = changeV.After.UnmarkDeep() + + var before, after []byte + var afterUnknown cty.Value + + if changeV.Before != cty.NilVal { + before, err = ctyjson.Marshal(changeV.Before, changeV.Before.Type()) + if err != nil { + return nil, err + } + } + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) + if err != nil { + return nil, err + } + afterUnknown = cty.False + } else { + filteredAfter := omitUnknowns(changeV.After) + if filteredAfter.IsNull() { + after = nil + } else { + after, err = ctyjson.Marshal(filteredAfter, filteredAfter.Type()) + if err != nil { + return nil, err + } + } + afterUnknown = unknownAsBool(changeV.After) + } + } + + // The only information we have in the plan about output sensitivity is + // a boolean which is true if the output was or is marked sensitive. As + // a result, BeforeSensitive and AfterSensitive will be identical, and + // either false or true. + outputSensitive := cty.False + if oc.Sensitive { + outputSensitive = cty.True + } + sensitive, err := ctyjson.Marshal(outputSensitive, outputSensitive.Type()) + if err != nil { + return nil, err + } + + a, _ := ctyjson.Marshal(afterUnknown, afterUnknown.Type()) + + c := Change{ + Actions: actionString(oc.Action.String()), + Before: json.RawMessage(before), + After: json.RawMessage(after), + AfterUnknown: a, + BeforeSensitive: json.RawMessage(sensitive), + AfterSensitive: json.RawMessage(sensitive), + } + + outputChanges[oc.Addr.OutputValue.Name] = c + } + + return outputChanges, nil +} + +func (p *plan) marshalPlannedValues(changes *plans.Changes, schemas *terraform.Schemas) error { + // marshal the planned changes into a module + plan, err := marshalPlannedValues(changes, schemas) + if err != nil { + return err + } + p.PlannedValues.RootModule = plan + + // marshalPlannedOutputs + outputs, err := marshalPlannedOutputs(changes) + if err != nil { + return err + } + p.PlannedValues.Outputs = outputs + + return nil +} + +func (p *plan) marshalRelevantAttrs(plan *plans.Plan) error { + for _, ra := range plan.RelevantAttributes { + addr := ra.Resource.String() + path, err := encodePath(ra.Attr) + if err != nil { + return err + } + + p.RelevantAttributes = append(p.RelevantAttributes, ResourceAttr{addr, path}) + } + return nil +} + +// omitUnknowns recursively walks the src cty.Value and returns a new cty.Value, +// omitting any unknowns. +// +// The result also normalizes some types: all sequence types are turned into +// tuple types and all mapping types are converted to object types, since we +// assume the result of this is just going to be serialized as JSON (and thus +// lose those distinctions) anyway. +func omitUnknowns(val cty.Value) cty.Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return cty.NilVal + case ty.IsPrimitiveType(): + return val + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + var vals []cty.Value + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + newVal := omitUnknowns(v) + if newVal != cty.NilVal { + vals = append(vals, newVal) + } else if newVal == cty.NilVal { + // element order is how we correlate unknownness, so we must + // replace unknowns with nulls + vals = append(vals, cty.NullVal(v.Type())) + } + } + // We use tuple types always here, because the work we did above + // may have caused the individual elements to have different types, + // and we're doing this work to produce JSON anyway and JSON marshalling + // represents all of these sequence types as an array. + return cty.TupleVal(vals) + case ty.IsMapType() || ty.IsObjectType(): + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + newVal := omitUnknowns(v) + if newVal != cty.NilVal { + vals[k.AsString()] = newVal + } + } + // We use object types always here, because the work we did above + // may have caused the individual elements to have different types, + // and we're doing this work to produce JSON anyway and JSON marshalling + // represents both of these mapping types as an object. + return cty.ObjectVal(vals) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("omitUnknowns cannot handle %#v", val)) + } +} + +// recursively iterate through a cty.Value, replacing unknown values (including +// null) with cty.True and known values with cty.False. +// +// The result also normalizes some types: all sequence types are turned into +// tuple types and all mapping types are converted to object types, since we +// assume the result of this is just going to be serialized as JSON (and thus +// lose those distinctions) anyway. +// +// For map/object values, all known attribute values will be omitted instead of +// returning false, as this results in a more compact serialization. +func unknownAsBool(val cty.Value) cty.Value { + ty := val.Type() + switch { + case val.IsNull(): + return cty.False + case !val.IsKnown(): + if ty.IsPrimitiveType() || ty.Equals(cty.DynamicPseudoType) { + return cty.True + } + fallthrough + case ty.IsPrimitiveType(): + return cty.BoolVal(!val.IsKnown()) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return cty.EmptyTupleVal + } + vals := make([]cty.Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, unknownAsBool(v)) + } + // The above transform may have changed the types of some of the + // elements, so we'll always use a tuple here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these sequence types are + // indistinguishable in JSON. + return cty.TupleVal(vals) + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return cty.EmptyObjectVal + } + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vAsBool := unknownAsBool(v) + // Omit all of the "false"s for known values for more compact + // serialization + if !vAsBool.RawEquals(cty.False) { + vals[k.AsString()] = vAsBool + } + } + // The above transform may have changed the types of some of the + // elements, so we'll always use an object here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these mapping types are + // indistinguishable in JSON. + return cty.ObjectVal(vals) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("unknownAsBool cannot handle %#v", val)) + } +} + +func actionString(action string) []string { + switch { + case action == "NoOp": + return []string{"no-op"} + case action == "Create": + return []string{"create"} + case action == "Delete": + return []string{"delete"} + case action == "Update": + return []string{"update"} + case action == "CreateThenDelete": + return []string{"create", "delete"} + case action == "Read": + return []string{"read"} + case action == "DeleteThenCreate": + return []string{"delete", "create"} + default: + return []string{action} + } +} + +// UnmarshalActions reverses the actionString function. +func UnmarshalActions(actions []string) plans.Action { + if len(actions) == 2 { + if actions[0] == "create" && actions[1] == "delete" { + return plans.CreateThenDelete + } + + if actions[0] == "delete" && actions[1] == "create" { + return plans.DeleteThenCreate + } + } + + if len(actions) == 1 { + switch actions[0] { + case "create": + return plans.Create + case "delete": + return plans.Delete + case "update": + return plans.Update + case "read": + return plans.Read + case "no-op": + return plans.NoOp + } + } + + panic("unrecognized action slice: " + strings.Join(actions, ", ")) +} + +// encodePaths lossily encodes a cty.PathSet into an array of arrays of step +// values, such as: +// +// [["length"],["triggers",0,"value"]] +// +// The lossiness is that we cannot distinguish between an IndexStep with string +// key and a GetAttr step. This is fine with JSON output, because JSON's type +// system means that those two steps are equivalent anyway: both are object +// indexes. +// +// JavaScript (or similar dynamic language) consumers of these values can +// iterate over the the steps starting from the root object to reach the +// value that each path is describing. +func encodePaths(pathSet cty.PathSet) (json.RawMessage, error) { + if pathSet.Empty() { + return nil, nil + } + + pathList := pathSet.List() + jsonPaths := make([]json.RawMessage, 0, len(pathList)) + + for _, path := range pathList { + jsonPath, err := encodePath(path) + if err != nil { + return nil, err + } + jsonPaths = append(jsonPaths, jsonPath) + } + + return json.Marshal(jsonPaths) +} + +func encodePath(path cty.Path) (json.RawMessage, error) { + steps := make([]json.RawMessage, 0, len(path)) + for _, step := range path { + switch s := step.(type) { + case cty.IndexStep: + key, err := ctyjson.Marshal(s.Key, s.Key.Type()) + if err != nil { + return nil, fmt.Errorf("Failed to marshal index step key %#v: %s", s.Key, err) + } + steps = append(steps, key) + case cty.GetAttrStep: + name, err := json.Marshal(s.Name) + if err != nil { + return nil, fmt.Errorf("Failed to marshal get attr step name %#v: %s", s.Name, err) + } + steps = append(steps, name) + default: + return nil, fmt.Errorf("Unsupported path step %#v (%t)", step, step) + } + } + return json.Marshal(steps) +} diff --git a/command/jsonplan/plan_test.go b/command/jsonplan/plan_test.go new file mode 100644 index 000000000000..27212dbc258f --- /dev/null +++ b/command/jsonplan/plan_test.go @@ -0,0 +1,469 @@ +package jsonplan + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" +) + +func TestOmitUnknowns(t *testing.T) { + tests := []struct { + Input cty.Value + Want cty.Value + }{ + { + cty.StringVal("hello"), + cty.StringVal("hello"), + }, + { + cty.NullVal(cty.String), + cty.NullVal(cty.String), + }, + { + cty.UnknownVal(cty.String), + cty.NilVal, + }, + { + cty.ListValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + }, + // + { + cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{ + cty.StringVal("hello"), + cty.NullVal(cty.String), + }), + }, + { + cty.MapVal(map[string]cty.Value{ + "hello": cty.True, + "world": cty.UnknownVal(cty.Bool), + }), + cty.ObjectVal(map[string]cty.Value{ + "hello": cty.True, + }), + }, + { + cty.TupleVal([]cty.Value{ + cty.StringVal("alpha"), + cty.UnknownVal(cty.String), + cty.StringVal("charlie"), + }), + cty.TupleVal([]cty.Value{ + cty.StringVal("alpha"), + cty.NullVal(cty.String), + cty.StringVal("charlie"), + }), + }, + { + cty.SetVal([]cty.Value{ + cty.StringVal("dev"), + cty.StringVal("foo"), + cty.StringVal("stg"), + cty.UnknownVal(cty.String), + }), + cty.TupleVal([]cty.Value{ + cty.StringVal("dev"), + cty.StringVal("foo"), + cty.StringVal("stg"), + cty.NullVal(cty.String), + }), + }, + { + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + }), + cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + cty.EmptyObjectVal, + }), + }, + } + + for _, test := range tests { + got := omitUnknowns(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", + test.Input, got, test.Want, + ) + } + } +} + +func TestUnknownAsBool(t *testing.T) { + tests := []struct { + Input cty.Value + Want cty.Value + }{ + { + cty.StringVal("hello"), + cty.False, + }, + { + cty.NullVal(cty.String), + cty.False, + }, + { + cty.UnknownVal(cty.String), + cty.True, + }, + + { + cty.NullVal(cty.DynamicPseudoType), + cty.False, + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})), + cty.False, + }, + { + cty.DynamicVal, + cty.True, + }, + + { + cty.ListValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.True}), + }, + { + cty.SetValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.SetVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.SetVal([]cty.Value{cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.True}), + }, + { + cty.EmptyTupleVal, + cty.EmptyTupleVal, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.TupleVal([]cty.Value{cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.True}), + }, + { + cty.MapValEmpty(cty.String), + cty.EmptyObjectVal, + }, + { + cty.MapVal(map[string]cty.Value{"greeting": cty.StringVal("hello")}), + cty.EmptyObjectVal, + }, + { + cty.MapVal(map[string]cty.Value{"greeting": cty.NullVal(cty.String)}), + cty.EmptyObjectVal, + }, + { + cty.MapVal(map[string]cty.Value{"greeting": cty.UnknownVal(cty.String)}), + cty.ObjectVal(map[string]cty.Value{"greeting": cty.True}), + }, + { + cty.EmptyObjectVal, + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{"greeting": cty.StringVal("hello")}), + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{"greeting": cty.NullVal(cty.String)}), + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{"greeting": cty.UnknownVal(cty.String)}), + cty.ObjectVal(map[string]cty.Value{"greeting": cty.True}), + }, + { + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + }), + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + }), + }, + { + cty.SetVal([]cty.Value{ + cty.MapValEmpty(cty.String), + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + }), + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + cty.EmptyObjectVal, + }), + }, + } + + for _, test := range tests { + got := unknownAsBool(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", + test.Input, got, test.Want, + ) + } + } +} + +func TestEncodePaths(t *testing.T) { + tests := map[string]struct { + Input cty.PathSet + Want json.RawMessage + }{ + "empty set": { + cty.NewPathSet(), + json.RawMessage(nil), + }, + "index path with string and int steps": { + cty.NewPathSet(cty.IndexStringPath("boop").IndexInt(0)), + json.RawMessage(`[["boop",0]]`), + }, + "get attr path with one step": { + cty.NewPathSet(cty.GetAttrPath("triggers")), + json.RawMessage(`[["triggers"]]`), + }, + "multiple paths of different types": { + cty.NewPathSet( + cty.GetAttrPath("alpha").GetAttr("beta").GetAttr("gamma"), + cty.GetAttrPath("triggers").IndexString("name"), + cty.IndexIntPath(0).IndexInt(1).IndexInt(2).IndexInt(3), + ), + json.RawMessage(`[["alpha","beta","gamma"],["triggers","name"],[0,1,2,3]]`), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, err := encodePaths(test.Input) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !cmp.Equal(got, test.Want) { + t.Errorf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + }) + } +} + +func TestOutputs(t *testing.T) { + root := addrs.RootModuleInstance + + child, diags := addrs.ParseModuleInstanceStr("module.child") + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + tests := map[string]struct { + changes *plans.Changes + expected map[string]Change + }{ + "copies all outputs": { + changes: &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: root.OutputValue("first"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: root.OutputValue("second"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + }, + expected: map[string]Change{ + "first": { + Actions: []string{"create"}, + Before: json.RawMessage("null"), + After: json.RawMessage("null"), + AfterUnknown: json.RawMessage("false"), + BeforeSensitive: json.RawMessage("false"), + AfterSensitive: json.RawMessage("false"), + }, + "second": { + Actions: []string{"create"}, + Before: json.RawMessage("null"), + After: json.RawMessage("null"), + AfterUnknown: json.RawMessage("false"), + BeforeSensitive: json.RawMessage("false"), + AfterSensitive: json.RawMessage("false"), + }, + }, + }, + "skips non root modules": { + changes: &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: root.OutputValue("first"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: child.OutputValue("second"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + }, + expected: map[string]Change{ + "first": { + Actions: []string{"create"}, + Before: json.RawMessage("null"), + After: json.RawMessage("null"), + AfterUnknown: json.RawMessage("false"), + BeforeSensitive: json.RawMessage("false"), + AfterSensitive: json.RawMessage("false"), + }, + }, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + changes, err := MarshalOutputChanges(test.changes) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + if !cmp.Equal(changes, test.expected) { + t.Errorf("wrong result:\n %v\n", cmp.Diff(changes, test.expected)) + } + }) + } +} + +func deepObjectValue(depth int) cty.Value { + v := cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a"), + "b": cty.NumberIntVal(2), + "c": cty.True, + "d": cty.UnknownVal(cty.String), + }) + + result := v + + for i := 0; i < depth; i++ { + result = cty.ObjectVal(map[string]cty.Value{ + "a": result, + "b": result, + "c": result, + }) + } + + return result +} + +func BenchmarkUnknownAsBool_2(b *testing.B) { + value := deepObjectValue(2) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} + +func BenchmarkUnknownAsBool_3(b *testing.B) { + value := deepObjectValue(3) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} + +func BenchmarkUnknownAsBool_5(b *testing.B) { + value := deepObjectValue(5) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} + +func BenchmarkUnknownAsBool_7(b *testing.B) { + value := deepObjectValue(7) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} + +func BenchmarkUnknownAsBool_9(b *testing.B) { + value := deepObjectValue(9) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} diff --git a/command/jsonplan/resource.go b/command/jsonplan/resource.go new file mode 100644 index 000000000000..336fa1e0c137 --- /dev/null +++ b/command/jsonplan/resource.go @@ -0,0 +1,92 @@ +package jsonplan + +import ( + "encoding/json" + + "github.com/hashicorp/terraform/addrs" +) + +// Resource is the representation of a resource in the json plan +type resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // Index is omitted for a resource not using `count` or `for_each` + Index addrs.InstanceKey `json:"index,omitempty"` + + // ProviderName allows the property "type" to be interpreted unambiguously + // in the unusual situation where a provider offers a resource type whose + // name does not start with its own name, such as the "googlebeta" provider + // offering "google_compute_instance". + ProviderName string `json:"provider_name,omitempty"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // AttributeValues is the JSON representation of the attribute values of the + // resource, whose structure depends on the resource type schema. Any + // unknown values are omitted or set to null, making them indistinguishable + // from absent values. + AttributeValues attributeValues `json:"values,omitempty"` + + // SensitiveValues is similar to AttributeValues, but with all sensitive + // values replaced with true, and all non-sensitive leaf values omitted. + SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"` +} + +// ResourceChange is a description of an individual change action that Terraform +// plans to use to move from the prior state to a new state matching the +// configuration. +type ResourceChange struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // PreviousAddress is the absolute address that this resource instance had + // at the conclusion of a previous run. + // + // This will typically be omitted, but will be present if the previous + // resource instance was subject to a "moved" block that we handled in the + // process of creating this plan. + // + // Note that this behavior diverges from the internal plan data structure, + // where the previous address is set equal to the current address in the + // common case, rather than being omitted. + PreviousAddress string `json:"previous_address,omitempty"` + + // ModuleAddress is the module portion of the above address. Omitted if the + // instance is in the root module. + ModuleAddress string `json:"module_address,omitempty"` + + // "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Index json.RawMessage `json:"index,omitempty"` + ProviderName string `json:"provider_name,omitempty"` + + // "deposed", if set, indicates that this action applies to a "deposed" + // object of the given instance rather than to its "current" object. Omitted + // for changes to the current object. + Deposed string `json:"deposed,omitempty"` + + // Change describes the change that will be made to this object + Change Change `json:"change,omitempty"` + + // ActionReason is a keyword representing some optional extra context + // for why the actions in Change.Actions were chosen. + // + // This extra detail is only for display purposes, to help a UI layer + // present some additional explanation to a human user. The possible + // values here might grow and change over time, so any consumer of this + // information should be resilient to encountering unrecognized values + // and treat them as an unspecified reason. + ActionReason string `json:"action_reason,omitempty"` +} diff --git a/command/jsonplan/values.go b/command/jsonplan/values.go new file mode 100644 index 000000000000..80824bf50c98 --- /dev/null +++ b/command/jsonplan/values.go @@ -0,0 +1,282 @@ +package jsonplan + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +// stateValues is the common representation of resolved values for both the +// prior state (which is always complete) and the planned new state. +type stateValues struct { + Outputs map[string]output `json:"outputs,omitempty"` + RootModule module `json:"root_module,omitempty"` +} + +// attributeValues is the JSON representation of the attribute values of the +// resource, whose structure depends on the resource type schema. +type attributeValues map[string]interface{} + +func marshalAttributeValues(value cty.Value, schema *configschema.Block) attributeValues { + if value == cty.NilVal || value.IsNull() { + return nil + } + ret := make(attributeValues) + + it := value.ElementIterator() + for it.Next() { + k, v := it.Element() + vJSON, _ := ctyjson.Marshal(v, v.Type()) + ret[k.AsString()] = json.RawMessage(vJSON) + } + return ret +} + +// marshalPlannedOutputs takes a list of changes and returns a map of output +// values +func marshalPlannedOutputs(changes *plans.Changes) (map[string]output, error) { + if changes.Outputs == nil { + // No changes - we're done here! + return nil, nil + } + + ret := make(map[string]output) + + for _, oc := range changes.Outputs { + if oc.ChangeSrc.Action == plans.Delete { + continue + } + + var after, afterType []byte + changeV, err := oc.Decode() + if err != nil { + return ret, err + } + // The values may be marked, but we must rely on the Sensitive flag + // as the decoded value is only an intermediate step in transcoding + // this to a json format. + changeV.After, _ = changeV.After.UnmarkDeep() + + if changeV.After != cty.NilVal && changeV.After.IsWhollyKnown() { + ty := changeV.After.Type() + after, err = ctyjson.Marshal(changeV.After, ty) + if err != nil { + return ret, err + } + afterType, err = ctyjson.MarshalType(ty) + if err != nil { + return ret, err + } + } + + ret[oc.Addr.OutputValue.Name] = output{ + Value: json.RawMessage(after), + Type: json.RawMessage(afterType), + Sensitive: oc.Sensitive, + } + } + + return ret, nil + +} + +func marshalPlannedValues(changes *plans.Changes, schemas *terraform.Schemas) (module, error) { + var ret module + + // build two maps: + // module name -> [resource addresses] + // module -> [children modules] + moduleResourceMap := make(map[string][]addrs.AbsResourceInstance) + moduleMap := make(map[string][]addrs.ModuleInstance) + seenModules := make(map[string]bool) + + for _, resource := range changes.Resources { + // If the resource is being deleted, skip over it. + // Deposed instances are always conceptually a destroy, but if they + // were gone during refresh then the change becomes a noop. + if resource.Action != plans.Delete && resource.DeposedKey == states.NotDeposed { + containingModule := resource.Addr.Module.String() + moduleResourceMap[containingModule] = append(moduleResourceMap[containingModule], resource.Addr) + + // the root module has no parents + if !resource.Addr.Module.IsRoot() { + parent := resource.Addr.Module.Parent().String() + // we expect to see multiple resources in one module, so we + // only need to report the "parent" module for each child module + // once. + if !seenModules[containingModule] { + moduleMap[parent] = append(moduleMap[parent], resource.Addr.Module) + seenModules[containingModule] = true + } + + // If any given parent module has no resources, it needs to be + // added to the moduleMap. This walks through the current + // resources' modules' ancestors, taking advantage of the fact + // that Ancestors() returns an ordered slice, and verifies that + // each one is in the map. + ancestors := resource.Addr.Module.Ancestors() + for i, ancestor := range ancestors[:len(ancestors)-1] { + aStr := ancestor.String() + + // childStr here is the immediate child of the current step + childStr := ancestors[i+1].String() + // we likely will see multiple resources in one module, so we + // only need to report the "parent" module for each child module + // once. + if !seenModules[childStr] { + moduleMap[aStr] = append(moduleMap[aStr], ancestors[i+1]) + seenModules[childStr] = true + } + } + } + } + } + + // start with the root module + resources, err := marshalPlanResources(changes, moduleResourceMap[""], schemas) + if err != nil { + return ret, err + } + ret.Resources = resources + + childModules, err := marshalPlanModules(changes, schemas, moduleMap[""], moduleMap, moduleResourceMap) + if err != nil { + return ret, err + } + sort.Slice(childModules, func(i, j int) bool { + return childModules[i].Address < childModules[j].Address + }) + + ret.ChildModules = childModules + + return ret, nil +} + +// marshalPlanResources +func marshalPlanResources(changes *plans.Changes, ris []addrs.AbsResourceInstance, schemas *terraform.Schemas) ([]resource, error) { + var ret []resource + + for _, ri := range ris { + r := changes.ResourceInstance(ri) + if r.Action == plans.Delete { + continue + } + + resource := resource{ + Address: r.Addr.String(), + Type: r.Addr.Resource.Resource.Type, + Name: r.Addr.Resource.Resource.Name, + ProviderName: r.ProviderAddr.Provider.String(), + Index: r.Addr.Resource.Key, + } + + switch r.Addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + resource.Mode = "managed" + case addrs.DataResourceMode: + resource.Mode = "data" + default: + return nil, fmt.Errorf("resource %s has an unsupported mode %s", + r.Addr.String(), + r.Addr.Resource.Resource.Mode.String(), + ) + } + + schema, schemaVer := schemas.ResourceTypeConfig( + r.ProviderAddr.Provider, + r.Addr.Resource.Resource.Mode, + resource.Type, + ) + if schema == nil { + return nil, fmt.Errorf("no schema found for %s", r.Addr.String()) + } + resource.SchemaVersion = schemaVer + changeV, err := r.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + // copy the marked After values so we can use these in marshalSensitiveValues + markedAfter := changeV.After + + // The values may be marked, but we must rely on the Sensitive flag + // as the decoded value is only an intermediate step in transcoding + // this to a json format. + changeV.Before, _ = changeV.Before.UnmarkDeep() + changeV.After, _ = changeV.After.UnmarkDeep() + + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + resource.AttributeValues = marshalAttributeValues(changeV.After, schema) + } else { + knowns := omitUnknowns(changeV.After) + resource.AttributeValues = marshalAttributeValues(knowns, schema) + } + } + + s := jsonstate.SensitiveAsBool(markedAfter) + v, err := ctyjson.Marshal(s, s.Type()) + if err != nil { + return nil, err + } + resource.SensitiveValues = v + + ret = append(ret, resource) + } + + sort.Slice(ret, func(i, j int) bool { + return ret[i].Address < ret[j].Address + }) + + return ret, nil +} + +// marshalPlanModules iterates over a list of modules to recursively describe +// the full module tree. +func marshalPlanModules( + changes *plans.Changes, + schemas *terraform.Schemas, + childModules []addrs.ModuleInstance, + moduleMap map[string][]addrs.ModuleInstance, + moduleResourceMap map[string][]addrs.AbsResourceInstance, +) ([]module, error) { + + var ret []module + + for _, child := range childModules { + moduleResources := moduleResourceMap[child.String()] + // cm for child module, naming things is hard. + var cm module + // don't populate the address for the root module + if child.String() != "" { + cm.Address = child.String() + } + rs, err := marshalPlanResources(changes, moduleResources, schemas) + if err != nil { + return nil, err + } + cm.Resources = rs + + if len(moduleMap[child.String()]) > 0 { + moreChildModules, err := marshalPlanModules(changes, schemas, moduleMap[child.String()], moduleMap, moduleResourceMap) + if err != nil { + return nil, err + } + cm.ChildModules = moreChildModules + } + + ret = append(ret, cm) + } + + return ret, nil +} diff --git a/command/jsonplan/values_test.go b/command/jsonplan/values_test.go new file mode 100644 index 000000000000..f2b374623418 --- /dev/null +++ b/command/jsonplan/values_test.go @@ -0,0 +1,374 @@ +package jsonplan + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" +) + +func TestMarshalAttributeValues(t *testing.T) { + tests := []struct { + Attr cty.Value + Schema *configschema.Block + Want attributeValues + }{ + { + cty.NilVal, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + nil, + }, + { + cty.NullVal(cty.String), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + nil, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + attributeValues{"foo": json.RawMessage(`"bar"`)}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + attributeValues{"foo": json.RawMessage(`null`)}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + "baz": cty.ListVal([]cty.Value{ + cty.StringVal("goodnight"), + cty.StringVal("moon"), + }), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.Map(cty.String), + Required: true, + }, + "baz": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + attributeValues{ + "bar": json.RawMessage(`{"hello":"world"}`), + "baz": json.RawMessage(`["goodnight","moon"]`), + }, + }, + } + + for _, test := range tests { + got := marshalAttributeValues(test.Attr, test.Schema) + eq := reflect.DeepEqual(got, test.Want) + if !eq { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} + +func TestMarshalPlannedOutputs(t *testing.T) { + after, _ := plans.NewDynamicValue(cty.StringVal("after"), cty.DynamicPseudoType) + + tests := []struct { + Changes *plans.Changes + Want map[string]output + Err bool + }{ + { + &plans.Changes{}, + nil, + false, + }, + { + &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + After: after, + }, + Sensitive: false, + }, + }, + }, + map[string]output{ + "bar": { + Sensitive: false, + Type: json.RawMessage(`"string"`), + Value: json.RawMessage(`"after"`), + }, + }, + false, + }, + { // Delete action + &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + Sensitive: false, + }, + }, + }, + map[string]output{}, + false, + }, + } + + for _, test := range tests { + got, err := marshalPlannedOutputs(test.Changes) + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + eq := reflect.DeepEqual(got, test.Want) + if !eq { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} + +func TestMarshalPlanResources(t *testing.T) { + tests := map[string]struct { + Action plans.Action + Before cty.Value + After cty.Value + Want []resource + Err bool + }{ + "create with unknowns": { + Action: plans.Create, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "woozles": cty.UnknownVal(cty.String), + "foozles": cty.UnknownVal(cty.String), + }), + Want: []resource{{ + Address: "test_thing.example", + Mode: "managed", + Type: "test_thing", + Name: "example", + Index: addrs.InstanceKey(nil), + ProviderName: "registry.terraform.io/hashicorp/test", + SchemaVersion: 1, + AttributeValues: attributeValues{}, + SensitiveValues: json.RawMessage("{}"), + }}, + Err: false, + }, + "delete with null and nil": { + Action: plans.Delete, + Before: cty.NullVal(cty.EmptyObject), + After: cty.NilVal, + Want: nil, + Err: false, + }, + "delete": { + Action: plans.Delete, + Before: cty.ObjectVal(map[string]cty.Value{ + "woozles": cty.StringVal("foo"), + "foozles": cty.StringVal("bar"), + }), + After: cty.NullVal(cty.Object(map[string]cty.Type{ + "woozles": cty.String, + "foozles": cty.String, + })), + Want: nil, + Err: false, + }, + "update without unknowns": { + Action: plans.Update, + Before: cty.ObjectVal(map[string]cty.Value{ + "woozles": cty.StringVal("foo"), + "foozles": cty.StringVal("bar"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "woozles": cty.StringVal("baz"), + "foozles": cty.StringVal("bat"), + }), + Want: []resource{{ + Address: "test_thing.example", + Mode: "managed", + Type: "test_thing", + Name: "example", + Index: addrs.InstanceKey(nil), + ProviderName: "registry.terraform.io/hashicorp/test", + SchemaVersion: 1, + AttributeValues: attributeValues{ + "woozles": json.RawMessage(`"baz"`), + "foozles": json.RawMessage(`"bat"`), + }, + SensitiveValues: json.RawMessage("{}"), + }}, + Err: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + before, err := plans.NewDynamicValue(test.Before, test.Before.Type()) + if err != nil { + t.Fatal(err) + } + + after, err := plans.NewDynamicValue(test.After, test.After.Type()) + if err != nil { + t.Fatal(err) + } + testChange := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "example", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: test.Action, + Before: before, + After: after, + }, + }, + }, + } + + ris := testResourceAddrs() + + got, err := marshalPlanResources(testChange, ris, testSchemas()) + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + eq := reflect.DeepEqual(got, test.Want) + if !eq { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + }) + } +} + +func TestMarshalPlanValuesNoopDeposed(t *testing.T) { + dynamicNull, err := plans.NewDynamicValue(cty.NullVal(cty.DynamicPseudoType), cty.DynamicPseudoType) + if err != nil { + t.Fatal(err) + } + testChange := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "example", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + DeposedKey: "12345678", + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.NoOp, + Before: dynamicNull, + After: dynamicNull, + }, + }, + }, + } + + _, err = marshalPlannedValues(testChange, testSchemas()) + if err != nil { + t.Fatal(err) + } +} + +func testSchemas() *terraform.Schemas { + return &terraform.Schemas{ + Providers: map[addrs.Provider]*terraform.ProviderSchema{ + addrs.NewDefaultProvider("test"): &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_thing": { + Attributes: map[string]*configschema.Attribute{ + "woozles": {Type: cty.String, Optional: true, Computed: true}, + "foozles": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypeSchemaVersions: map[string]uint64{ + "test_thing": 1, + }, + }, + }, + } +} + +func testResourceAddrs() []addrs.AbsResourceInstance { + return []addrs.AbsResourceInstance{ + mustAddr("test_thing.example"), + } +} + +func mustAddr(str string) addrs.AbsResourceInstance { + addr, diags := addrs.ParseAbsResourceInstanceStr(str) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} diff --git a/command/jsonprovider/attribute.go b/command/jsonprovider/attribute.go new file mode 100644 index 000000000000..630dea63bcea --- /dev/null +++ b/command/jsonprovider/attribute.go @@ -0,0 +1,67 @@ +package jsonprovider + +import ( + "encoding/json" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +type Attribute struct { + AttributeType json.RawMessage `json:"type,omitempty"` + AttributeNestedType *NestedType `json:"nested_type,omitempty"` + Description string `json:"description,omitempty"` + DescriptionKind string `json:"description_kind,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Required bool `json:"required,omitempty"` + Optional bool `json:"optional,omitempty"` + Computed bool `json:"computed,omitempty"` + Sensitive bool `json:"sensitive,omitempty"` +} + +type NestedType struct { + Attributes map[string]*Attribute `json:"attributes,omitempty"` + NestingMode string `json:"nesting_mode,omitempty"` +} + +func marshalStringKind(sk configschema.StringKind) string { + switch sk { + default: + return "plain" + case configschema.StringMarkdown: + return "markdown" + } +} + +func marshalAttribute(attr *configschema.Attribute) *Attribute { + ret := &Attribute{ + Description: attr.Description, + DescriptionKind: marshalStringKind(attr.DescriptionKind), + Required: attr.Required, + Optional: attr.Optional, + Computed: attr.Computed, + Sensitive: attr.Sensitive, + Deprecated: attr.Deprecated, + } + + // we're not concerned about errors because at this point the schema has + // already been checked and re-checked. + if attr.Type != cty.NilType { + attrTy, _ := attr.Type.MarshalJSON() + ret.AttributeType = attrTy + } + + if attr.NestedType != nil { + nestedTy := NestedType{ + NestingMode: nestingModeString(attr.NestedType.Nesting), + } + attrs := make(map[string]*Attribute, len(attr.NestedType.Attributes)) + for k, attr := range attr.NestedType.Attributes { + attrs[k] = marshalAttribute(attr) + } + nestedTy.Attributes = attrs + ret.AttributeNestedType = &nestedTy + } + + return ret +} diff --git a/internal/command/jsonprovider/attribute_test.go b/command/jsonprovider/attribute_test.go similarity index 93% rename from internal/command/jsonprovider/attribute_test.go rename to command/jsonprovider/attribute_test.go index f79c5f7cdbdb..887b90104522 100644 --- a/internal/command/jsonprovider/attribute_test.go +++ b/command/jsonprovider/attribute_test.go @@ -7,7 +7,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" ) func TestMarshalAttribute(t *testing.T) { diff --git a/command/jsonprovider/block.go b/command/jsonprovider/block.go new file mode 100644 index 000000000000..82afdd85fe60 --- /dev/null +++ b/command/jsonprovider/block.go @@ -0,0 +1,80 @@ +package jsonprovider + +import ( + "github.com/hashicorp/terraform/configs/configschema" +) + +type Block struct { + Attributes map[string]*Attribute `json:"attributes,omitempty"` + BlockTypes map[string]*BlockType `json:"block_types,omitempty"` + Description string `json:"description,omitempty"` + DescriptionKind string `json:"description_kind,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` +} + +type BlockType struct { + NestingMode string `json:"nesting_mode,omitempty"` + Block *Block `json:"block,omitempty"` + MinItems uint64 `json:"min_items,omitempty"` + MaxItems uint64 `json:"max_items,omitempty"` +} + +func marshalBlockTypes(nestedBlock *configschema.NestedBlock) *BlockType { + if nestedBlock == nil { + return &BlockType{} + } + ret := &BlockType{ + Block: marshalBlock(&nestedBlock.Block), + MinItems: uint64(nestedBlock.MinItems), + MaxItems: uint64(nestedBlock.MaxItems), + NestingMode: nestingModeString(nestedBlock.Nesting), + } + return ret +} + +func marshalBlock(configBlock *configschema.Block) *Block { + if configBlock == nil { + return &Block{} + } + + ret := Block{ + Deprecated: configBlock.Deprecated, + Description: configBlock.Description, + DescriptionKind: marshalStringKind(configBlock.DescriptionKind), + } + + if len(configBlock.Attributes) > 0 { + attrs := make(map[string]*Attribute, len(configBlock.Attributes)) + for k, attr := range configBlock.Attributes { + attrs[k] = marshalAttribute(attr) + } + ret.Attributes = attrs + } + + if len(configBlock.BlockTypes) > 0 { + blockTypes := make(map[string]*BlockType, len(configBlock.BlockTypes)) + for k, bt := range configBlock.BlockTypes { + blockTypes[k] = marshalBlockTypes(bt) + } + ret.BlockTypes = blockTypes + } + + return &ret +} + +func nestingModeString(mode configschema.NestingMode) string { + switch mode { + case configschema.NestingSingle: + return "single" + case configschema.NestingGroup: + return "group" + case configschema.NestingList: + return "list" + case configschema.NestingSet: + return "set" + case configschema.NestingMap: + return "map" + default: + return "invalid" + } +} diff --git a/internal/command/jsonprovider/block_test.go b/command/jsonprovider/block_test.go similarity index 96% rename from internal/command/jsonprovider/block_test.go rename to command/jsonprovider/block_test.go index dea72390f506..9090429049e4 100644 --- a/internal/command/jsonprovider/block_test.go +++ b/command/jsonprovider/block_test.go @@ -7,7 +7,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" ) func TestMarshalBlock(t *testing.T) { diff --git a/internal/command/jsonprovider/doc.go b/command/jsonprovider/doc.go similarity index 100% rename from internal/command/jsonprovider/doc.go rename to command/jsonprovider/doc.go diff --git a/command/jsonprovider/provider.go b/command/jsonprovider/provider.go new file mode 100644 index 000000000000..c26d496f36f8 --- /dev/null +++ b/command/jsonprovider/provider.go @@ -0,0 +1,78 @@ +package jsonprovider + +import ( + "encoding/json" + + "github.com/hashicorp/terraform/terraform" +) + +// FormatVersion represents the version of the json format and will be +// incremented for any change to this format that requires changes to a +// consuming parser. +const FormatVersion = "1.0" + +// providers is the top-level object returned when exporting provider schemas +type providers struct { + FormatVersion string `json:"format_version"` + Schemas map[string]*Provider `json:"provider_schemas,omitempty"` +} + +type Provider struct { + Provider *Schema `json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `json:"resource_schemas,omitempty"` + DataSourceSchemas map[string]*Schema `json:"data_source_schemas,omitempty"` +} + +func newProviders() *providers { + schemas := make(map[string]*Provider) + return &providers{ + FormatVersion: FormatVersion, + Schemas: schemas, + } +} + +// MarshalForRenderer converts the provided internation representation of the +// schema into the public structured JSON versions. +// +// This is a format that can be read by the structured plan renderer. +func MarshalForRenderer(s *terraform.Schemas) map[string]*Provider { + schemas := make(map[string]*Provider, len(s.Providers)) + for k, v := range s.Providers { + schemas[k.String()] = marshalProvider(v) + } + return schemas +} + +func Marshal(s *terraform.Schemas) ([]byte, error) { + providers := newProviders() + providers.Schemas = MarshalForRenderer(s) + ret, err := json.Marshal(providers) + return ret, err +} + +func marshalProvider(tps *terraform.ProviderSchema) *Provider { + if tps == nil { + return &Provider{} + } + + var ps *Schema + var rs, ds map[string]*Schema + + if tps.Provider != nil { + ps = marshalSchema(tps.Provider) + } + + if tps.ResourceTypes != nil { + rs = marshalSchemas(tps.ResourceTypes, tps.ResourceTypeSchemaVersions) + } + + if tps.DataSources != nil { + ds = marshalSchemas(tps.DataSources, tps.ResourceTypeSchemaVersions) + } + + return &Provider{ + Provider: ps, + ResourceSchemas: rs, + DataSourceSchemas: ds, + } +} diff --git a/command/jsonprovider/provider_test.go b/command/jsonprovider/provider_test.go new file mode 100644 index 000000000000..51f2dea93857 --- /dev/null +++ b/command/jsonprovider/provider_test.go @@ -0,0 +1,212 @@ +package jsonprovider + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/terraform" +) + +func TestMarshalProvider(t *testing.T) { + tests := []struct { + Input *terraform.ProviderSchema + Want *Provider + }{ + { + nil, + &Provider{}, + }, + { + testProvider(), + &Provider{ + Provider: &Schema{ + Block: &Block{ + Attributes: map[string]*Attribute{ + "region": { + AttributeType: json.RawMessage(`"string"`), + Required: true, + DescriptionKind: "plain", + }, + }, + DescriptionKind: "plain", + }, + }, + ResourceSchemas: map[string]*Schema{ + "test_instance": { + Version: 42, + Block: &Block{ + Attributes: map[string]*Attribute{ + "id": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + Computed: true, + DescriptionKind: "plain", + }, + "ami": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + "volumes": { + AttributeNestedType: &NestedType{ + NestingMode: "list", + Attributes: map[string]*Attribute{ + "size": { + AttributeType: json.RawMessage(`"string"`), + Required: true, + DescriptionKind: "plain", + }, + "mount_point": { + AttributeType: json.RawMessage(`"string"`), + Required: true, + DescriptionKind: "plain", + }, + }, + }, + Optional: true, + DescriptionKind: "plain", + }, + }, + BlockTypes: map[string]*BlockType{ + "network_interface": { + Block: &Block{ + Attributes: map[string]*Attribute{ + "device_index": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + "description": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + }, + DescriptionKind: "plain", + }, + NestingMode: "list", + }, + }, + DescriptionKind: "plain", + }, + }, + }, + DataSourceSchemas: map[string]*Schema{ + "test_data_source": { + Version: 3, + Block: &Block{ + Attributes: map[string]*Attribute{ + "id": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + Computed: true, + DescriptionKind: "plain", + }, + "ami": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + }, + BlockTypes: map[string]*BlockType{ + "network_interface": { + Block: &Block{ + Attributes: map[string]*Attribute{ + "device_index": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + "description": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + }, + DescriptionKind: "plain", + }, + NestingMode: "list", + }, + }, + DescriptionKind: "plain", + }, + }, + }, + }, + }, + } + + for _, test := range tests { + got := marshalProvider(test.Input) + if !cmp.Equal(got, test.Want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + } +} + +func testProvider() *terraform.ProviderSchema { + return &terraform.ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Required: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "volumes": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "size": {Type: cty.String, Required: true}, + "mount_point": {Type: cty.String, Required: true}, + }, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "test_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + + ResourceTypeSchemaVersions: map[string]uint64{ + "test_instance": 42, + "test_data_source": 3, + }, + } +} diff --git a/command/jsonprovider/schema.go b/command/jsonprovider/schema.go new file mode 100644 index 000000000000..d345ce2fc52f --- /dev/null +++ b/command/jsonprovider/schema.go @@ -0,0 +1,38 @@ +package jsonprovider + +import ( + "github.com/hashicorp/terraform/configs/configschema" +) + +type Schema struct { + Version uint64 `json:"version"` + Block *Block `json:"block,omitempty"` +} + +// marshalSchema is a convenience wrapper around mashalBlock. Schema version +// should be set by the caller. +func marshalSchema(block *configschema.Block) *Schema { + if block == nil { + return &Schema{} + } + + var ret Schema + ret.Block = marshalBlock(block) + + return &ret +} + +func marshalSchemas(blocks map[string]*configschema.Block, rVersions map[string]uint64) map[string]*Schema { + if blocks == nil { + return map[string]*Schema{} + } + ret := make(map[string]*Schema, len(blocks)) + for k, v := range blocks { + ret[k] = marshalSchema(v) + version, ok := rVersions[k] + if ok { + ret[k].Version = version + } + } + return ret +} diff --git a/command/jsonprovider/schema_test.go b/command/jsonprovider/schema_test.go new file mode 100644 index 000000000000..6eba1675b7cc --- /dev/null +++ b/command/jsonprovider/schema_test.go @@ -0,0 +1,49 @@ +package jsonprovider + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/hashicorp/terraform/configs/configschema" +) + +func TestMarshalSchemas(t *testing.T) { + tests := []struct { + Input map[string]*configschema.Block + Versions map[string]uint64 + Want map[string]*Schema + }{ + { + nil, + map[string]uint64{}, + map[string]*Schema{}, + }, + } + + for _, test := range tests { + got := marshalSchemas(test.Input, test.Versions) + if !cmp.Equal(got, test.Want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + } +} + +func TestMarshalSchema(t *testing.T) { + tests := map[string]struct { + Input *configschema.Block + Want *Schema + }{ + "nil_block": { + nil, + &Schema{}, + }, + } + + for _, test := range tests { + got := marshalSchema(test.Input) + if !cmp.Equal(got, test.Want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + } +} diff --git a/internal/command/jsonstate/doc.go b/command/jsonstate/doc.go similarity index 100% rename from internal/command/jsonstate/doc.go rename to command/jsonstate/doc.go diff --git a/command/jsonstate/state.go b/command/jsonstate/state.go new file mode 100644 index 000000000000..59ff05416a7f --- /dev/null +++ b/command/jsonstate/state.go @@ -0,0 +1,553 @@ +package jsonstate + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/jsonchecks" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terraform" +) + +const ( + // FormatVersion represents the version of the json format and will be + // incremented for any change to this format that requires changes to a + // consuming parser. + FormatVersion = "1.0" + + ManagedResourceMode = "managed" + DataResourceMode = "data" +) + +// state is the top-level representation of the json format of a terraform +// state. +type state struct { + FormatVersion string `json:"format_version,omitempty"` + TerraformVersion string `json:"terraform_version,omitempty"` + Values *stateValues `json:"values,omitempty"` + Checks json.RawMessage `json:"checks,omitempty"` +} + +// stateValues is the common representation of resolved values for both the prior +// state (which is always complete) and the planned new state. +type stateValues struct { + Outputs map[string]Output `json:"outputs,omitempty"` + RootModule Module `json:"root_module,omitempty"` +} + +type Output struct { + Sensitive bool `json:"sensitive"` + Value json.RawMessage `json:"value,omitempty"` + Type json.RawMessage `json:"type,omitempty"` +} + +// Module is the representation of a module in state. This can be the root module +// or a child module +type Module struct { + // Resources are sorted in a user-friendly order that is undefined at this + // time, but consistent. + Resources []Resource `json:"resources,omitempty"` + + // Address is the absolute module address, omitted for the root module + Address string `json:"address,omitempty"` + + // Each module object can optionally have its own nested "child_modules", + // recursively describing the full module tree. + ChildModules []Module `json:"child_modules,omitempty"` +} + +// Resource is the representation of a resource in the state. +type Resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // Index is omitted for a resource not using `count` or `for_each`. + Index json.RawMessage `json:"index,omitempty"` + + // ProviderName allows the property "type" to be interpreted unambiguously + // in the unusual situation where a provider offers a resource type whose + // name does not start with its own name, such as the "googlebeta" provider + // offering "google_compute_instance". + ProviderName string `json:"provider_name"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // AttributeValues is the JSON representation of the attribute values of the + // resource, whose structure depends on the resource type schema. Any + // unknown values are omitted or set to null, making them indistinguishable + // from absent values. + AttributeValues AttributeValues `json:"values,omitempty"` + + // SensitiveValues is similar to AttributeValues, but with all sensitive + // values replaced with true, and all non-sensitive leaf values omitted. + SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"` + + // DependsOn contains a list of the resource's dependencies. The entries are + // addresses relative to the containing module. + DependsOn []string `json:"depends_on,omitempty"` + + // Tainted is true if the resource is tainted in terraform state. + Tainted bool `json:"tainted,omitempty"` + + // Deposed is set if the resource is deposed in terraform state. + DeposedKey string `json:"deposed_key,omitempty"` +} + +// AttributeValues is the JSON representation of the attribute values of the +// resource, whose structure depends on the resource type schema. +type AttributeValues map[string]json.RawMessage + +func marshalAttributeValues(value cty.Value) AttributeValues { + // unmark our value to show all values + value, _ = value.UnmarkDeep() + + if value == cty.NilVal || value.IsNull() { + return nil + } + + ret := make(AttributeValues) + + it := value.ElementIterator() + for it.Next() { + k, v := it.Element() + vJSON, _ := ctyjson.Marshal(v, v.Type()) + ret[k.AsString()] = json.RawMessage(vJSON) + } + return ret +} + +// newState() returns a minimally-initialized state +func newState() *state { + return &state{ + FormatVersion: FormatVersion, + } +} + +// MarshalForRenderer returns the pre-json encoding changes of the state, in a +// format available to the structured renderer. +func MarshalForRenderer(sf *statefile.File, schemas *terraform.Schemas) (Module, map[string]Output, error) { + if sf.State.Modules == nil { + // Empty state case. + return Module{}, nil, nil + } + + outputs, err := MarshalOutputs(sf.State.RootModule().OutputValues) + if err != nil { + return Module{}, nil, err + } + + root, err := marshalRootModule(sf.State, schemas) + if err != nil { + return Module{}, nil, err + } + + return root, outputs, err +} + +// Marshal returns the json encoding of a terraform state. +func Marshal(sf *statefile.File, schemas *terraform.Schemas) ([]byte, error) { + output := newState() + + if sf == nil || sf.State.Empty() { + ret, err := json.Marshal(output) + return ret, err + } + + if sf.TerraformVersion != nil { + output.TerraformVersion = sf.TerraformVersion.String() + } + + // output.StateValues + err := output.marshalStateValues(sf.State, schemas) + if err != nil { + return nil, err + } + + // output.Checks + if sf.State.CheckResults != nil && sf.State.CheckResults.ConfigResults.Len() > 0 { + output.Checks = jsonchecks.MarshalCheckStates(sf.State.CheckResults) + } + + ret, err := json.Marshal(output) + return ret, err +} + +func (jsonstate *state) marshalStateValues(s *states.State, schemas *terraform.Schemas) error { + var sv stateValues + var err error + + // only marshal the root module outputs + sv.Outputs, err = MarshalOutputs(s.RootModule().OutputValues) + if err != nil { + return err + } + + // use the state and module map to build up the module structure + sv.RootModule, err = marshalRootModule(s, schemas) + if err != nil { + return err + } + + jsonstate.Values = &sv + return nil +} + +// MarshalOutputs translates a map of states.OutputValue to a map of jsonstate.Output, +// which are defined for json encoding. +func MarshalOutputs(outputs map[string]*states.OutputValue) (map[string]Output, error) { + if outputs == nil { + return nil, nil + } + + ret := make(map[string]Output) + for k, v := range outputs { + ty := v.Value.Type() + ov, err := ctyjson.Marshal(v.Value, ty) + if err != nil { + return ret, err + } + ot, err := ctyjson.MarshalType(ty) + if err != nil { + return ret, err + } + ret[k] = Output{ + Value: ov, + Type: ot, + Sensitive: v.Sensitive, + } + } + + return ret, nil +} + +func marshalRootModule(s *states.State, schemas *terraform.Schemas) (Module, error) { + var ret Module + var err error + + ret.Address = "" + rs, err := marshalResources(s.RootModule().Resources, addrs.RootModuleInstance, schemas) + if err != nil { + return ret, err + } + ret.Resources = rs + + // build a map of module -> set[child module addresses] + moduleChildSet := make(map[string]map[string]struct{}) + for _, mod := range s.Modules { + if mod.Addr.IsRoot() { + continue + } else { + for childAddr := mod.Addr; !childAddr.IsRoot(); childAddr = childAddr.Parent() { + if _, ok := moduleChildSet[childAddr.Parent().String()]; !ok { + moduleChildSet[childAddr.Parent().String()] = map[string]struct{}{} + } + moduleChildSet[childAddr.Parent().String()][childAddr.String()] = struct{}{} + } + } + } + + // transform the previous map into map of module -> [child module addresses] + moduleMap := make(map[string][]addrs.ModuleInstance) + for parent, children := range moduleChildSet { + for child := range children { + childModuleInstance, diags := addrs.ParseModuleInstanceStr(child) + if diags.HasErrors() { + return ret, diags.Err() + } + moduleMap[parent] = append(moduleMap[parent], childModuleInstance) + } + } + + // use the state and module map to build up the module structure + ret.ChildModules, err = marshalModules(s, schemas, moduleMap[""], moduleMap) + return ret, err +} + +// marshalModules is an ungainly recursive function to build a module structure +// out of terraform state. +func marshalModules( + s *states.State, + schemas *terraform.Schemas, + modules []addrs.ModuleInstance, + moduleMap map[string][]addrs.ModuleInstance, +) ([]Module, error) { + var ret []Module + for _, child := range modules { + // cm for child module, naming things is hard. + cm := Module{Address: child.String()} + + // the module may be resourceless and contain only submodules, it will then be nil here + stateMod := s.Module(child) + if stateMod != nil { + rs, err := marshalResources(stateMod.Resources, stateMod.Addr, schemas) + if err != nil { + return nil, err + } + cm.Resources = rs + } + + if moduleMap[child.String()] != nil { + moreChildModules, err := marshalModules(s, schemas, moduleMap[child.String()], moduleMap) + if err != nil { + return nil, err + } + cm.ChildModules = moreChildModules + } + + ret = append(ret, cm) + } + + // sort the child modules by address for consistency. + sort.Slice(ret, func(i, j int) bool { + return ret[i].Address < ret[j].Address + }) + + return ret, nil +} + +func marshalResources(resources map[string]*states.Resource, module addrs.ModuleInstance, schemas *terraform.Schemas) ([]Resource, error) { + var ret []Resource + + var sortedResources []*states.Resource + for _, r := range resources { + sortedResources = append(sortedResources, r) + } + sort.Slice(sortedResources, func(i, j int) bool { + return sortedResources[i].Addr.Less(sortedResources[j].Addr) + }) + + for _, r := range sortedResources { + + var sortedKeys []addrs.InstanceKey + for k := range r.Instances { + sortedKeys = append(sortedKeys, k) + } + sort.Slice(sortedKeys, func(i, j int) bool { + return addrs.InstanceKeyLess(sortedKeys[i], sortedKeys[j]) + }) + + for _, k := range sortedKeys { + ri := r.Instances[k] + + var err error + + resAddr := r.Addr.Resource + + current := Resource{ + Address: r.Addr.Instance(k).String(), + Type: resAddr.Type, + Name: resAddr.Name, + ProviderName: r.ProviderConfig.Provider.String(), + } + + if k != nil { + index := k.Value() + if current.Index, err = ctyjson.Marshal(index, index.Type()); err != nil { + return nil, err + } + } + + switch resAddr.Mode { + case addrs.ManagedResourceMode: + current.Mode = ManagedResourceMode + case addrs.DataResourceMode: + current.Mode = DataResourceMode + default: + return ret, fmt.Errorf("resource %s has an unsupported mode %s", + resAddr.String(), + resAddr.Mode.String(), + ) + } + + schema, version := schemas.ResourceTypeConfig( + r.ProviderConfig.Provider, + resAddr.Mode, + resAddr.Type, + ) + + // It is possible that the only instance is deposed + if ri.Current != nil { + if version != ri.Current.SchemaVersion { + return nil, fmt.Errorf("schema version %d for %s in state does not match version %d from the provider", ri.Current.SchemaVersion, resAddr, version) + } + + current.SchemaVersion = ri.Current.SchemaVersion + + if schema == nil { + return nil, fmt.Errorf("no schema found for %s (in provider %s)", resAddr.String(), r.ProviderConfig.Provider) + } + riObj, err := ri.Current.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + current.AttributeValues = marshalAttributeValues(riObj.Value) + + value, marks := riObj.Value.UnmarkDeepWithPaths() + if schema.ContainsSensitive() { + marks = append(marks, schema.ValueMarks(value, nil)...) + } + s := SensitiveAsBool(value.MarkWithPaths(marks)) + v, err := ctyjson.Marshal(s, s.Type()) + if err != nil { + return nil, err + } + current.SensitiveValues = v + + if len(riObj.Dependencies) > 0 { + dependencies := make([]string, len(riObj.Dependencies)) + for i, v := range riObj.Dependencies { + dependencies[i] = v.String() + } + current.DependsOn = dependencies + } + + if riObj.Status == states.ObjectTainted { + current.Tainted = true + } + ret = append(ret, current) + } + + var sortedDeposedKeys []string + for k := range ri.Deposed { + sortedDeposedKeys = append(sortedDeposedKeys, string(k)) + } + sort.Strings(sortedDeposedKeys) + + for _, deposedKey := range sortedDeposedKeys { + rios := ri.Deposed[states.DeposedKey(deposedKey)] + + // copy the base fields from the current instance + deposed := Resource{ + Address: current.Address, + Type: current.Type, + Name: current.Name, + ProviderName: current.ProviderName, + Mode: current.Mode, + Index: current.Index, + } + + riObj, err := rios.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + deposed.AttributeValues = marshalAttributeValues(riObj.Value) + + value, marks := riObj.Value.UnmarkDeepWithPaths() + if schema.ContainsSensitive() { + marks = append(marks, schema.ValueMarks(value, nil)...) + } + s := SensitiveAsBool(value.MarkWithPaths(marks)) + v, err := ctyjson.Marshal(s, s.Type()) + if err != nil { + return nil, err + } + deposed.SensitiveValues = v + + if len(riObj.Dependencies) > 0 { + dependencies := make([]string, len(riObj.Dependencies)) + for i, v := range riObj.Dependencies { + dependencies[i] = v.String() + } + deposed.DependsOn = dependencies + } + + if riObj.Status == states.ObjectTainted { + deposed.Tainted = true + } + deposed.DeposedKey = deposedKey + ret = append(ret, deposed) + } + } + } + + return ret, nil +} + +func SensitiveAsBool(val cty.Value) cty.Value { + if val.HasMark(marks.Sensitive) { + return cty.True + } + + ty := val.Type() + switch { + case val.IsNull(), ty.IsPrimitiveType(), ty.Equals(cty.DynamicPseudoType): + return cty.False + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + if !val.IsKnown() { + // If the collection is unknown we can't say anything about the + // sensitivity of its contents + return cty.EmptyTupleVal + } + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have sensitive values + return cty.EmptyTupleVal + } + vals := make([]cty.Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, SensitiveAsBool(v)) + } + // The above transform may have changed the types of some of the + // elements, so we'll always use a tuple here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these sequence types are + // indistinguishable in JSON. + return cty.TupleVal(vals) + case ty.IsMapType() || ty.IsObjectType(): + if !val.IsKnown() { + // If the map/object is unknown we can't say anything about the + // sensitivity of its attributes + return cty.EmptyObjectVal + } + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have sensitive values + return cty.EmptyObjectVal + } + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + s := SensitiveAsBool(v) + // Omit all of the "false"s for non-sensitive values for more + // compact serialization + if !s.RawEquals(cty.False) { + vals[k.AsString()] = s + } + } + // The above transform may have changed the types of some of the + // elements, so we'll always use an object here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these mapping types are + // indistinguishable in JSON. + return cty.ObjectVal(vals) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("sensitiveAsBool cannot handle %#v", val)) + } +} diff --git a/command/jsonstate/state_test.go b/command/jsonstate/state_test.go new file mode 100644 index 000000000000..4bc1971ed783 --- /dev/null +++ b/command/jsonstate/state_test.go @@ -0,0 +1,1044 @@ +package jsonstate + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +func TestMarshalOutputs(t *testing.T) { + tests := []struct { + Outputs map[string]*states.OutputValue + Want map[string]Output + Err bool + }{ + { + nil, + nil, + false, + }, + { + map[string]*states.OutputValue{ + "test": { + Sensitive: true, + Value: cty.StringVal("sekret"), + }, + }, + map[string]Output{ + "test": { + Sensitive: true, + Value: json.RawMessage(`"sekret"`), + Type: json.RawMessage(`"string"`), + }, + }, + false, + }, + { + map[string]*states.OutputValue{ + "test": { + Sensitive: false, + Value: cty.StringVal("not_so_sekret"), + }, + }, + map[string]Output{ + "test": { + Sensitive: false, + Value: json.RawMessage(`"not_so_sekret"`), + Type: json.RawMessage(`"string"`), + }, + }, + false, + }, + { + map[string]*states.OutputValue{ + "mapstring": { + Sensitive: false, + Value: cty.MapVal(map[string]cty.Value{ + "beep": cty.StringVal("boop"), + }), + }, + "setnumber": { + Sensitive: false, + Value: cty.SetVal([]cty.Value{ + cty.NumberIntVal(3), + cty.NumberIntVal(5), + cty.NumberIntVal(7), + cty.NumberIntVal(11), + }), + }, + }, + map[string]Output{ + "mapstring": { + Sensitive: false, + Value: json.RawMessage(`{"beep":"boop"}`), + Type: json.RawMessage(`["map","string"]`), + }, + "setnumber": { + Sensitive: false, + Value: json.RawMessage(`[3,5,7,11]`), + Type: json.RawMessage(`["set","number"]`), + }, + }, + false, + }, + } + + for _, test := range tests { + got, err := MarshalOutputs(test.Outputs) + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !cmp.Equal(test.Want, got) { + t.Fatalf("wrong result:\n%s", cmp.Diff(test.Want, got)) + } + } +} + +func TestMarshalAttributeValues(t *testing.T) { + tests := []struct { + Attr cty.Value + Want AttributeValues + }{ + { + cty.NilVal, + nil, + }, + { + cty.NullVal(cty.String), + nil, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + AttributeValues{"foo": json.RawMessage(`"bar"`)}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + AttributeValues{"foo": json.RawMessage(`null`)}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + "baz": cty.ListVal([]cty.Value{ + cty.StringVal("goodnight"), + cty.StringVal("moon"), + }), + }), + AttributeValues{ + "bar": json.RawMessage(`{"hello":"world"}`), + "baz": json.RawMessage(`["goodnight","moon"]`), + }, + }, + // Marked values + { + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + "baz": cty.ListVal([]cty.Value{ + cty.StringVal("goodnight"), + cty.StringVal("moon").Mark(marks.Sensitive), + }), + }), + AttributeValues{ + "bar": json.RawMessage(`{"hello":"world"}`), + "baz": json.RawMessage(`["goodnight","moon"]`), + }, + }, + } + + for _, test := range tests { + got := marshalAttributeValues(test.Attr) + eq := reflect.DeepEqual(got, test.Want) + if !eq { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} + +func TestMarshalResources(t *testing.T) { + deposedKey := states.NewDeposedKey() + tests := map[string]struct { + Resources map[string]*states.Resource + Schemas *terraform.Schemas + Want []Resource + Err bool + }{ + "nil": { + nil, + nil, + nil, + false, + }, + "single resource": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.terraform.io/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "single resource_with_sensitive": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles","foozles":"sensuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.terraform.io/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`"sensuzles"`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "resource with marks": { + map[string]*states.Resource{ + "test_thing.bar": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"foozles":"confuzles"}`), + AttrSensitivePaths: []cty.PathValueMarks{{ + Path: cty.Path{cty.GetAttrStep{Name: "foozles"}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.terraform.io/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`"confuzles"`), + "woozles": json.RawMessage(`null`), + }, + SensitiveValues: json.RawMessage(`{"foozles":true}`), + }, + }, + false, + }, + "single resource wrong schema": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + SchemaVersion: 1, + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":["confuzles"]}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + nil, + true, + }, + "resource with count": { + map[string]*states.Resource{ + "test_thing.bar": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.IntKey(0): { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar[0]", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: json.RawMessage(`0`), + ProviderName: "registry.terraform.io/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "resource with for_each": { + map[string]*states.Resource{ + "test_thing.bar": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.StringKey("rockhopper"): { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar[\"rockhopper\"]", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: json.RawMessage(`"rockhopper"`), + ProviderName: "registry.terraform.io/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "deposed resource": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Deposed: map[states.DeposedKey]*states.ResourceInstanceObjectSrc{ + states.DeposedKey(deposedKey): { + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.terraform.io/hashicorp/test", + DeposedKey: deposedKey.String(), + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "deposed and current resource": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Deposed: map[states.DeposedKey]*states.ResourceInstanceObjectSrc{ + states.DeposedKey(deposedKey): { + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.terraform.io/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.terraform.io/hashicorp/test", + DeposedKey: deposedKey.String(), + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "resource with marked map attr": { + map[string]*states.Resource{ + "test_map_attr.bar": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_map_attr", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"data":{"woozles":"confuzles"}}`), + AttrSensitivePaths: []cty.PathValueMarks{{ + Path: cty.Path{cty.GetAttrStep{Name: "data"}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_map_attr.bar", + Mode: "managed", + Type: "test_map_attr", + Name: "bar", + Index: nil, + ProviderName: "registry.terraform.io/hashicorp/test", + AttributeValues: AttributeValues{ + "data": json.RawMessage(`{"woozles":"confuzles"}`), + }, + SensitiveValues: json.RawMessage(`{"data":true}`), + }, + }, + false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, err := marshalResources(test.Resources, addrs.RootModuleInstance, test.Schemas) + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + diff := cmp.Diff(got, test.Want) + if diff != "" { + t.Fatalf("wrong result: %s\n", diff) + } + + }) + } +} + +func TestMarshalModules_basic(t *testing.T) { + childModule, _ := addrs.ParseModuleInstanceStr("module.child") + subModule, _ := addrs.ParseModuleInstanceStr("module.submodule") + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(childModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: childModule.Module(), + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(subModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: subModule.Module(), + }, + ) + }) + moduleMap := make(map[string][]addrs.ModuleInstance) + moduleMap[""] = []addrs.ModuleInstance{childModule, subModule} + + got, err := marshalModules(testState, testSchemas(), moduleMap[""], moduleMap) + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if len(got) != 2 { + t.Fatalf("wrong result! got %d modules, expected 2", len(got)) + } + + if got[0].Address != "module.child" || got[1].Address != "module.submodule" { + t.Fatalf("wrong result! got %#v\n", got) + } + +} + +func TestMarshalModules_nested(t *testing.T) { + childModule, _ := addrs.ParseModuleInstanceStr("module.child") + subModule, _ := addrs.ParseModuleInstanceStr("module.child.module.submodule") + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(childModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: childModule.Module(), + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(subModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: subModule.Module(), + }, + ) + }) + moduleMap := make(map[string][]addrs.ModuleInstance) + moduleMap[""] = []addrs.ModuleInstance{childModule} + moduleMap[childModule.String()] = []addrs.ModuleInstance{subModule} + + got, err := marshalModules(testState, testSchemas(), moduleMap[""], moduleMap) + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if len(got) != 1 { + t.Fatalf("wrong result! got %d modules, expected 1", len(got)) + } + + if got[0].Address != "module.child" { + t.Fatalf("wrong result! got %#v\n", got) + } + + if got[0].ChildModules[0].Address != "module.child.module.submodule" { + t.Fatalf("wrong result! got %#v\n", got) + } +} + +func TestMarshalModules_parent_no_resources(t *testing.T) { + subModule, _ := addrs.ParseModuleInstanceStr("module.child.module.submodule") + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(subModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: subModule.Module(), + }, + ) + }) + got, err := marshalRootModule(testState, testSchemas()) + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if len(got.ChildModules) != 1 { + t.Fatalf("wrong result! got %d modules, expected 1", len(got.ChildModules)) + } + + if got.ChildModules[0].Address != "module.child" { + t.Fatalf("wrong result! got %#v\n", got) + } + + if got.ChildModules[0].ChildModules[0].Address != "module.child.module.submodule" { + t.Fatalf("wrong result! got %#v\n", got) + } +} + +func testSchemas() *terraform.Schemas { + return &terraform.Schemas{ + Providers: map[addrs.Provider]*terraform.ProviderSchema{ + addrs.NewDefaultProvider("test"): { + ResourceTypes: map[string]*configschema.Block{ + "test_thing": { + Attributes: map[string]*configschema.Attribute{ + "woozles": {Type: cty.String, Optional: true, Computed: true}, + "foozles": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + "test_map_attr": { + Attributes: map[string]*configschema.Attribute{ + "data": {Type: cty.Map(cty.String), Optional: true, Computed: true, Sensitive: true}, + }, + }, + }, + }, + }, + } +} + +func TestSensitiveAsBool(t *testing.T) { + tests := []struct { + Input cty.Value + Want cty.Value + }{ + { + cty.StringVal("hello"), + cty.False, + }, + { + cty.NullVal(cty.String), + cty.False, + }, + { + cty.StringVal("hello").Mark(marks.Sensitive), + cty.True, + }, + { + cty.NullVal(cty.String).Mark(marks.Sensitive), + cty.True, + }, + + { + cty.NullVal(cty.DynamicPseudoType).Mark(marks.Sensitive), + cty.True, + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})), + cty.False, + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})).Mark(marks.Sensitive), + cty.True, + }, + { + cty.DynamicVal, + cty.False, + }, + { + cty.DynamicVal.Mark(marks.Sensitive), + cty.True, + }, + + { + cty.ListValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.ListValEmpty(cty.String).Mark(marks.Sensitive), + cty.True, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friend").Mark(marks.Sensitive), + }), + cty.TupleVal([]cty.Value{ + cty.False, + cty.True, + }), + }, + { + cty.SetValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.SetValEmpty(cty.String).Mark(marks.Sensitive), + cty.True, + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello").Mark(marks.Sensitive)}), + cty.True, + }, + { + cty.EmptyTupleVal.Mark(marks.Sensitive), + cty.True, + }, + { + cty.TupleVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friend").Mark(marks.Sensitive), + }), + cty.TupleVal([]cty.Value{ + cty.False, + cty.True, + }), + }, + { + cty.MapValEmpty(cty.String), + cty.EmptyObjectVal, + }, + { + cty.MapValEmpty(cty.String).Mark(marks.Sensitive), + cty.True, + }, + { + cty.MapVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse"), + }), + cty.EmptyObjectVal, + }, + { + cty.MapVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse").Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "animal": cty.True, + }), + }, + { + cty.MapVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse").Mark(marks.Sensitive), + }).Mark(marks.Sensitive), + cty.True, + }, + { + cty.EmptyObjectVal, + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse"), + }), + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse").Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "animal": cty.True, + }), + }, + { + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse").Mark(marks.Sensitive), + }).Mark(marks.Sensitive), + cty.True, + }, + { + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known").Mark(marks.Sensitive), + }), + }), + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + }), + }, + { + cty.ListVal([]cty.Value{ + cty.MapValEmpty(cty.String), + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("known").Mark(marks.Sensitive), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + }), + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + cty.EmptyObjectVal, + }), + }, + { + cty.ObjectVal(map[string]cty.Value{ + "list": cty.UnknownVal(cty.List(cty.String)), + "set": cty.UnknownVal(cty.Set(cty.Bool)), + "tuple": cty.UnknownVal(cty.Tuple([]cty.Type{cty.String, cty.Number})), + "map": cty.UnknownVal(cty.Map(cty.String)), + "object": cty.UnknownVal(cty.Object(map[string]cty.Type{"a": cty.String})), + }), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.EmptyTupleVal, + "set": cty.EmptyTupleVal, + "tuple": cty.EmptyTupleVal, + "map": cty.EmptyObjectVal, + "object": cty.EmptyObjectVal, + }), + }, + } + + for _, test := range tests { + got := SensitiveAsBool(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", + test.Input, got, test.Want, + ) + } + } +} diff --git a/internal/command/login.go b/command/login.go similarity index 99% rename from internal/command/login.go rename to command/login.go index 6b1d8bddd4d2..74b53571d04d 100644 --- a/internal/command/login.go +++ b/command/login.go @@ -20,11 +20,11 @@ import ( svchost "github.com/hashicorp/terraform-svchost" svcauth "github.com/hashicorp/terraform-svchost/auth" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/command/cliconfig" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/command/cliconfig" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" uuid "github.com/hashicorp/go-uuid" "golang.org/x/oauth2" diff --git a/internal/command/login_test.go b/command/login_test.go similarity index 96% rename from internal/command/login_test.go rename to command/login_test.go index b612b7bbedec..da93327ea620 100644 --- a/internal/command/login_test.go +++ b/command/login_test.go @@ -11,11 +11,11 @@ import ( svchost "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/command/cliconfig" - oauthserver "github.com/hashicorp/terraform/internal/command/testdata/login-oauth-server" - tfeserver "github.com/hashicorp/terraform/internal/command/testdata/login-tfe-server" - "github.com/hashicorp/terraform/internal/command/webbrowser" - "github.com/hashicorp/terraform/internal/httpclient" + "github.com/hashicorp/terraform/command/cliconfig" + oauthserver "github.com/hashicorp/terraform/command/testdata/login-oauth-server" + tfeserver "github.com/hashicorp/terraform/command/testdata/login-tfe-server" + "github.com/hashicorp/terraform/command/webbrowser" + "github.com/hashicorp/terraform/httpclient" "github.com/hashicorp/terraform/version" ) diff --git a/internal/command/logout.go b/command/logout.go similarity index 97% rename from internal/command/logout.go rename to command/logout.go index 904ccc5b0628..5c510876ee82 100644 --- a/internal/command/logout.go +++ b/command/logout.go @@ -6,8 +6,8 @@ import ( "strings" svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/command/cliconfig" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/command/cliconfig" + "github.com/hashicorp/terraform/tfdiags" ) // LogoutCommand is a Command implementation which removes stored credentials diff --git a/internal/command/logout_test.go b/command/logout_test.go similarity index 97% rename from internal/command/logout_test.go rename to command/logout_test.go index 6f2511ee7497..2fbff18f2342 100644 --- a/internal/command/logout_test.go +++ b/command/logout_test.go @@ -9,7 +9,7 @@ import ( svchost "github.com/hashicorp/terraform-svchost" svcauth "github.com/hashicorp/terraform-svchost/auth" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/command/cliconfig" + "github.com/hashicorp/terraform/command/cliconfig" ) func TestLogout(t *testing.T) { diff --git a/internal/command/meta.go b/command/meta.go similarity index 96% rename from internal/command/meta.go rename to command/meta.go index 884cefaad00b..00e02b7f6ea3 100644 --- a/internal/command/meta.go +++ b/command/meta.go @@ -19,24 +19,24 @@ import ( "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/backend/local" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/command/webbrowser" - "github.com/hashicorp/terraform/internal/command/workdir" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/getproviders" - legacy "github.com/hashicorp/terraform/internal/legacy/terraform" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/backend/local" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/command/webbrowser" + "github.com/hashicorp/terraform/command/workdir" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/getproviders" + legacy "github.com/hashicorp/terraform/legacy/terraform" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // Meta are the meta-options that are available on all or most commands. diff --git a/internal/command/meta_backend.go b/command/meta_backend.go similarity index 98% rename from internal/command/meta_backend.go rename to command/meta_backend.go index f61563d81544..0240fe6175a3 100644 --- a/internal/command/meta_backend.go +++ b/command/meta_backend.go @@ -16,22 +16,22 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/cloud" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/cloud" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" - backendInit "github.com/hashicorp/terraform/internal/backend/init" - backendLocal "github.com/hashicorp/terraform/internal/backend/local" - legacy "github.com/hashicorp/terraform/internal/legacy/terraform" + backendInit "github.com/hashicorp/terraform/backend/init" + backendLocal "github.com/hashicorp/terraform/backend/local" + legacy "github.com/hashicorp/terraform/legacy/terraform" ) // BackendOpts are the options used to initialize a backend.Backend. diff --git a/internal/command/meta_backend_migrate.go b/command/meta_backend_migrate.go similarity index 98% rename from internal/command/meta_backend_migrate.go rename to command/meta_backend_migrate.go index 9df931c09714..e96505e3723f 100644 --- a/internal/command/meta_backend_migrate.go +++ b/command/meta_backend_migrate.go @@ -12,15 +12,15 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/backend/remote" - "github.com/hashicorp/terraform/internal/cloud" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/backend/remote" + "github.com/hashicorp/terraform/cloud" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" ) type backendMigrateOpts struct { diff --git a/internal/command/meta_backend_migrate_test.go b/command/meta_backend_migrate_test.go similarity index 100% rename from internal/command/meta_backend_migrate_test.go rename to command/meta_backend_migrate_test.go diff --git a/internal/command/meta_backend_test.go b/command/meta_backend_test.go similarity index 98% rename from internal/command/meta_backend_test.go rename to command/meta_backend_test.go index 2fa61b618f7f..297cd03e562e 100644 --- a/internal/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -9,20 +9,20 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/copy" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/copy" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" - backendInit "github.com/hashicorp/terraform/internal/backend/init" - backendLocal "github.com/hashicorp/terraform/internal/backend/local" - backendInmem "github.com/hashicorp/terraform/internal/backend/remote-state/inmem" + backendInit "github.com/hashicorp/terraform/backend/init" + backendLocal "github.com/hashicorp/terraform/backend/local" + backendInmem "github.com/hashicorp/terraform/backend/remote-state/inmem" ) // Test empty directory with no config/state creates a local state. diff --git a/internal/command/meta_config.go b/command/meta_config.go similarity index 97% rename from internal/command/meta_config.go rename to command/meta_config.go index 349a791365cd..f6618bd27b2a 100644 --- a/internal/command/meta_config.go +++ b/command/meta_config.go @@ -9,13 +9,13 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" ) diff --git a/internal/command/meta_dependencies.go b/command/meta_dependencies.go similarity index 97% rename from internal/command/meta_dependencies.go rename to command/meta_dependencies.go index 1b0cb97f8df8..1e4267dfbbec 100644 --- a/internal/command/meta_dependencies.go +++ b/command/meta_dependencies.go @@ -4,8 +4,8 @@ import ( "log" "os" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/tfdiags" ) // dependenclyLockFilename is the filename of the dependency lock file. diff --git a/internal/command/meta_new.go b/command/meta_new.go similarity index 94% rename from internal/command/meta_new.go rename to command/meta_new.go index b89760a4c06b..a308a11e6647 100644 --- a/internal/command/meta_new.go +++ b/command/meta_new.go @@ -4,7 +4,7 @@ import ( "os" "strconv" - "github.com/hashicorp/terraform/internal/plans/planfile" + "github.com/hashicorp/terraform/plans/planfile" ) // NOTE: Temporary file until this branch is cleaned up. diff --git a/internal/command/meta_providers.go b/command/meta_providers.go similarity index 97% rename from internal/command/meta_providers.go rename to command/meta_providers.go index 05e1fe945022..927944838c10 100644 --- a/internal/command/meta_providers.go +++ b/command/meta_providers.go @@ -11,16 +11,16 @@ import ( plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/addrs" - terraformProvider "github.com/hashicorp/terraform/internal/builtin/providers/terraform" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/moduletest" - tfplugin "github.com/hashicorp/terraform/internal/plugin" - tfplugin6 "github.com/hashicorp/terraform/internal/plugin6" - "github.com/hashicorp/terraform/internal/providercache" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + terraformProvider "github.com/hashicorp/terraform/builtin/providers/terraform" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/moduletest" + tfplugin "github.com/hashicorp/terraform/plugin" + tfplugin6 "github.com/hashicorp/terraform/plugin6" + "github.com/hashicorp/terraform/providercache" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" ) // The TF_DISABLE_PLUGIN_TLS environment variable is intended only for use by diff --git a/internal/command/meta_test.go b/command/meta_test.go similarity index 98% rename from internal/command/meta_test.go rename to command/meta_test.go index b8b6c833725b..ace78cf3c0f3 100644 --- a/internal/command/meta_test.go +++ b/command/meta_test.go @@ -11,9 +11,9 @@ import ( "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/backend/local" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/backend/local" + "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" ) diff --git a/internal/command/meta_vars.go b/command/meta_vars.go similarity index 97% rename from internal/command/meta_vars.go rename to command/meta_vars.go index f082daa0c776..2cecba87a099 100644 --- a/internal/command/meta_vars.go +++ b/command/meta_vars.go @@ -9,10 +9,10 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" hcljson "github.com/hashicorp/hcl/v2/json" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // VarEnvPrefix is the prefix for environment variables that represent values diff --git a/internal/command/metadata_command.go b/command/metadata_command.go similarity index 100% rename from internal/command/metadata_command.go rename to command/metadata_command.go diff --git a/internal/command/metadata_functions.go b/command/metadata_functions.go similarity index 94% rename from internal/command/metadata_functions.go rename to command/metadata_functions.go index 43609cc16b18..d8aa4fa87ce3 100644 --- a/internal/command/metadata_functions.go +++ b/command/metadata_functions.go @@ -3,8 +3,8 @@ package command import ( "fmt" - "github.com/hashicorp/terraform/internal/command/jsonfunction" - "github.com/hashicorp/terraform/internal/lang" + "github.com/hashicorp/terraform/command/jsonfunction" + "github.com/hashicorp/terraform/lang" "github.com/zclconf/go-cty/cty/function" ) diff --git a/internal/command/metadata_functions_test.go b/command/metadata_functions_test.go similarity index 100% rename from internal/command/metadata_functions_test.go rename to command/metadata_functions_test.go diff --git a/command/output.go b/command/output.go new file mode 100644 index 000000000000..9f5411ac02f1 --- /dev/null +++ b/command/output.go @@ -0,0 +1,123 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// OutputCommand is a Command implementation that reads an output +// from a Terraform state and prints it. +type OutputCommand struct { + Meta +} + +func (c *OutputCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Parse and validate flags + args, diags := arguments.ParseOutput(rawArgs) + if diags.HasErrors() { + c.View.Diagnostics(diags) + c.View.HelpPrompt("output") + return 1 + } + + view := views.NewOutput(args.ViewType, c.View) + + // Fetch data from state + outputs, diags := c.Outputs(args.StatePath) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Render the view + viewDiags := view.Output(args.Name, outputs) + diags = diags.Append(viewDiags) + + view.Diagnostics(diags) + + if diags.HasErrors() { + return 1 + } + + return 0 +} + +func (c *OutputCommand) Outputs(statePath string) (map[string]*states.OutputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Allow state path override + if statePath != "" { + c.Meta.statePath = statePath + } + + // Load the backend + b, backendDiags := c.Backend(nil) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + return nil, diags + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + env, err := c.Workspace() + if err != nil { + diags = diags.Append(fmt.Errorf("Error selecting workspace: %s", err)) + return nil, diags + } + + // Get the state + stateStore, err := b.StateMgr(env) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to load state: %s", err)) + return nil, diags + } + + output, err := stateStore.GetRootOutputValues() + if err != nil { + return nil, diags.Append(err) + } + + return output, diags +} + +func (c *OutputCommand) Help() string { + helpText := ` +Usage: terraform [global options] output [options] [NAME] + + Reads an output variable from a Terraform state file and prints + the value. With no additional arguments, output will display all + the outputs for the root module. If NAME is not specified, all + outputs are printed. + +Options: + + -state=path Path to the state file to read. Defaults to + "terraform.tfstate". Ignored when remote + state is used. + + -no-color If specified, output won't contain any color. + + -json If specified, machine readable output will be + printed in JSON format. + + -raw For value types that can be automatically + converted to a string, will print the raw + string directly, rather than a human-oriented + representation of the value. +` + return strings.TrimSpace(helpText) +} + +func (c *OutputCommand) Synopsis() string { + return "Show output values from your root module" +} diff --git a/command/output_test.go b/command/output_test.go new file mode 100644 index 000000000000..22e8afe2364b --- /dev/null +++ b/command/output_test.go @@ -0,0 +1,322 @@ +package command + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" +) + +func TestOutput(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + }) + + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + if actual != `"bar"` { + t.Fatalf("bad: %#v", actual) + } +} + +func TestOutput_json(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + }) + + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-json", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + expected := "{\n \"foo\": {\n \"sensitive\": false,\n \"type\": \"string\",\n \"value\": \"bar\"\n }\n}" + if actual != expected { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestOutput_emptyOutputs(t *testing.T) { + originalState := states.NewState() + statePath := testStateFile(t, originalState) + + p := testProvider() + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-no-color", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + // Warning diagnostics should go to stdout + if got, want := output.Stdout(), "Warning: No outputs found"; !strings.Contains(got, want) { + t.Fatalf("bad output: expected to contain %q, got:\n%s", want, got) + } +} + +func TestOutput_badVar(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + }) + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "bar", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stderr()) + } +} + +func TestOutput_blank(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + s.SetOutputValue( + addrs.OutputValue{Name: "name"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("john-doe"), + false, + ) + }) + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "", + } + + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + expectedOutput := "foo = \"bar\"\nname = \"john-doe\"\n" + if got := output.Stdout(); got != expectedOutput { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", got, expectedOutput) + } +} + +func TestOutput_manyArgs(t *testing.T) { + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "bad", + "bad", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stdout()) + } +} + +func TestOutput_noArgs(t *testing.T) { + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stdout()) + } +} + +func TestOutput_noState(t *testing.T) { + originalState := states.NewState() + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } +} + +func TestOutput_noVars(t *testing.T) { + originalState := states.NewState() + + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "bar", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } +} + +func TestOutput_stateDefault(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + }) + + // Write the state file in a temporary directory with the + // default filename. + td := testTempDir(t) + statePath := filepath.Join(td, DefaultStateFilename) + + f, err := os.Create(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + err = writeStateForTesting(originalState, f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Change to that directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(filepath.Dir(statePath)); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + if actual != `"bar"` { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/command/plan.go b/command/plan.go new file mode 100644 index 000000000000..b834a38ffcac --- /dev/null +++ b/command/plan.go @@ -0,0 +1,278 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/tfdiags" +) + +// PlanCommand is a Command implementation that compares a Terraform +// configuration to an actual infrastructure and shows the differences. +type PlanCommand struct { + Meta +} + +func (c *PlanCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Propagate -no-color for legacy use of Ui. The remote backend and + // cloud package use this; it should be removed when/if they are + // migrated to views. + c.Meta.color = !common.NoColor + c.Meta.Color = c.Meta.color + + // Parse and validate flags + args, diags := arguments.ParsePlan(rawArgs) + + // Instantiate the view, even if there are flag errors, so that we render + // diagnostics according to the desired view + view := views.NewPlan(args.ViewType, c.View) + + if diags.HasErrors() { + view.Diagnostics(diags) + view.HelpPrompt() + return 1 + } + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + // FIXME: the -input flag value is needed to initialize the backend and the + // operation, but there is no clear path to pass this value down, so we + // continue to mutate the Meta object state for now. + c.Meta.input = args.InputEnabled + + // FIXME: the -parallelism flag is used to control the concurrency of + // Terraform operations. At the moment, this value is used both to + // initialize the backend via the ContextOpts field inside CLIOpts, and to + // set a largely unused field on the Operation request. Again, there is no + // clear path to pass this value down, so we continue to mutate the Meta + // object state for now. + c.Meta.parallelism = args.Operation.Parallelism + + diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) + + // Prepare the backend with the backend-specific arguments + be, beDiags := c.PrepareBackend(args.State, args.ViewType) + diags = diags.Append(beDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Build the operation request + opReq, opDiags := c.OperationRequest(be, view, args.ViewType, args.Operation, args.OutPath) + diags = diags.Append(opDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Collect variable value and add them to the operation request + diags = diags.Append(c.GatherVariables(opReq, args.Vars)) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Before we delegate to the backend, we'll print any warning diagnostics + // we've accumulated here, since the backend will start fresh with its own + // diagnostics. + view.Diagnostics(diags) + diags = nil + + // Perform the operation + op, err := c.RunOperation(be, opReq) + if err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + if op.Result != backend.OperationSuccess { + return op.Result.ExitStatus() + } + if args.DetailedExitCode && !op.PlanEmpty { + return 2 + } + + return op.Result.ExitStatus() +} + +func (c *PlanCommand) PrepareBackend(args *arguments.State, viewType arguments.ViewType) (backend.Enhanced, tfdiags.Diagnostics) { + // FIXME: we need to apply the state arguments to the meta object here + // because they are later used when initializing the backend. Carving a + // path to pass these arguments to the functions that need them is + // difficult but would make their use easier to understand. + c.Meta.applyStateArguments(args) + + backendConfig, diags := c.loadBackendConfig(".") + if diags.HasErrors() { + return nil, diags + } + + // Load the backend + be, beDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + ViewType: viewType, + }) + diags = diags.Append(beDiags) + if beDiags.HasErrors() { + return nil, diags + } + + return be, diags +} + +func (c *PlanCommand) OperationRequest( + be backend.Enhanced, + view views.Plan, + viewType arguments.ViewType, + args *arguments.Operation, + planOutPath string, +) (*backend.Operation, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Build the operation + opReq := c.Operation(be, viewType) + opReq.ConfigDir = "." + opReq.PlanMode = args.PlanMode + opReq.Hooks = view.Hooks() + opReq.PlanRefresh = args.Refresh + opReq.PlanOutPath = planOutPath + opReq.Targets = args.Targets + opReq.ForceReplace = args.ForceReplace + opReq.Type = backend.OperationTypePlan + opReq.View = view.Operation() + + var err error + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %s", err)) + return nil, diags + } + + return opReq, diags +} + +func (c *PlanCommand) GatherVariables(opReq *backend.Operation, args *arguments.Vars) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} + opReq.Variables, diags = c.collectVariableValues() + + return diags +} + +func (c *PlanCommand) Help() string { + helpText := ` +Usage: terraform [global options] plan [options] + + Generates a speculative execution plan, showing what actions Terraform + would take to apply the current configuration. This command will not + actually perform the planned actions. + + You can optionally save the plan to a file, which you can then pass to + the "apply" command to perform exactly the actions described in the plan. + +Plan Customization Options: + + The following options customize how Terraform will produce its plan. You + can also use these options when you run "terraform apply" without passing + it a saved plan, in order to plan and apply in a single command. + + -destroy Select the "destroy" planning mode, which creates a plan + to destroy all objects currently managed by this + Terraform configuration instead of the usual behavior. + + -refresh-only Select the "refresh only" planning mode, which checks + whether remote objects still match the outcome of the + most recent Terraform apply but does not propose any + actions to undo any changes made outside of Terraform. + + -refresh=false Skip checking for external changes to remote objects + while creating the plan. This can potentially make + planning faster, but at the expense of possibly planning + against a stale record of the remote system state. + + -replace=resource Force replacement of a particular resource instance using + its resource address. If the plan would've normally + produced an update or no-op action for this instance, + Terraform will plan to replace it instead. You can use + this option multiple times to replace more than one object. + + -target=resource Limit the planning operation to only the given module, + resource, or resource instance and all of its + dependencies. You can use this option multiple times to + include more than one object. This is for exceptional + use only. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +Other Options: + + -compact-warnings If Terraform produces any warnings that are not + accompanied by errors, shows them in a more compact form + that includes only the summary messages. + + -detailed-exitcode Return detailed exit codes when the command exits. This + will change the meaning of exit codes to: + 0 - Succeeded, diff is empty (no changes) + 1 - Errored + 2 - Succeeded, there is a diff + + -input=true Ask for input for variables if not directly set. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -no-color If specified, output won't contain any color. + + -out=path Write a plan file to the given path. This can be used as + input to the "apply" command. + + -parallelism=n Limit the number of concurrent operations. Defaults to 10. + + -state=statefile A legacy option used for the local backend only. See the + local backend's documentation for more information. +` + return strings.TrimSpace(helpText) +} + +func (c *PlanCommand) Synopsis() string { + return "Show changes required by the current configuration" +} diff --git a/command/plan_test.go b/command/plan_test.go new file mode 100644 index 000000000000..e8f60beb10ea --- /dev/null +++ b/command/plan_test.go @@ -0,0 +1,1622 @@ +package command + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + backendinit "github.com/hashicorp/terraform/backend/init" + "github.com/hashicorp/terraform/checks" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestPlan(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +func TestPlan_lockedState(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + unlock, err := testLockState(t, testDataDir, filepath.Join(td, DefaultStateFilename)) + if err != nil { + t.Fatal(err) + } + defer unlock() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + if code == 0 { + t.Fatal("expected error", done(t).Stdout()) + } + + output := done(t).Stderr() + if !strings.Contains(output, "lock") { + t.Fatal("command output does not look like a lock error:", output) + } +} + +func TestPlan_plan(t *testing.T) { + testCwd(t) + + planPath := testPlanFileNoop(t) + + p := testProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{planPath} + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("wrong exit status %d; want 1\nstderr: %s", code, output.Stderr()) + } +} + +func TestPlan_destroy(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + outPath := testTempFile(t) + statePath := testStateFile(t, originalState) + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-destroy", + "-out", outPath, + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + plan := testReadPlan(t, outPath) + for _, rc := range plan.Changes.Resources { + if got, want := rc.Action, plans.Delete; got != want { + t.Fatalf("wrong action %s for %s; want %s\nplanned change: %s", got, rc.Addr, want, spew.Sdump(rc)) + } + } +} + +func TestPlan_noState(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that refresh was called + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["test_instance"].Block.ImpliedType()) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestPlan_outPath(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + outPath := filepath.Join(td, "test.plan") + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.NullVal(cty.EmptyObject), + } + + args := []string{ + "-out", outPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + testReadPlan(t, outPath) // will call t.Fatal itself if the file cannot be read +} + +func TestPlan_outPathNoChange(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + // Aside from "id" (which is computed) the values here must + // exactly match the values in the "plan" test fixture in order + // to produce the empty plan we need for this test. + AttrsJSON: []byte(`{"id":"bar","ami":"bar","network_interface":[{"description":"Main network interface","device_index":"0"}]}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + outPath := filepath.Join(td, "test.plan") + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-out", outPath, + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + plan := testReadPlan(t, outPath) + if !plan.Changes.Empty() { + t.Fatalf("Expected empty plan to be written to plan file, got: %s", spew.Sdump(plan)) + } +} + +func TestPlan_outPathWithError(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-fail-condition"), td) + defer testChdir(t, td)() + + outPath := filepath.Join(td, "test.plan") + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.NullVal(cty.EmptyObject), + } + + args := []string{ + "-out", outPath, + } + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatal("expected non-zero exit status", output) + } + + plan := testReadPlan(t, outPath) // will call t.Fatal itself if the file cannot be read + if !plan.Errored { + t.Fatal("plan should be marked with Errored") + } + + if plan.Checks == nil { + t.Fatal("plan contains no checks") + } + + // the checks should only contain one failure + results := plan.Checks.ConfigResults.Elements() + if len(results) != 1 { + t.Fatal("incorrect number of check results", len(results)) + } + if results[0].Value.Status != checks.StatusFail { + t.Errorf("incorrect status, got %s", results[0].Value.Status) + } +} + +// When using "-out" with a backend, the plan should encode the backend config +func TestPlan_outBackend(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-out-backend"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","ami":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + // Set up our backend state + dataState, srv := testBackendState(t, originalState, 200) + defer srv.Close() + testStateFileRemote(t, dataState) + + outPath := "foo" + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "ami": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-out", outPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Logf("stdout: %s", output.Stdout()) + t.Fatalf("plan command failed with exit code %d\n\n%s", code, output.Stderr()) + } + + plan := testReadPlan(t, outPath) + if !plan.Changes.Empty() { + t.Fatalf("Expected empty plan to be written to plan file, got: %s", spew.Sdump(plan)) + } + + if got, want := plan.Backend.Type, "http"; got != want { + t.Errorf("wrong backend type %q; want %q", got, want) + } + if got, want := plan.Backend.Workspace, "default"; got != want { + t.Errorf("wrong backend workspace %q; want %q", got, want) + } + { + httpBackend := backendinit.Backend("http")() + schema := httpBackend.ConfigSchema() + got, err := plan.Backend.Config.Decode(schema.ImpliedType()) + if err != nil { + t.Fatalf("failed to decode backend config in plan: %s", err) + } + want, err := dataState.Backend.Config(schema) + if err != nil { + t.Fatalf("failed to decode cached config: %s", err) + } + if !want.RawEquals(got) { + t.Errorf("wrong backend config\ngot: %#v\nwant: %#v", got, want) + } + } +} + +func TestPlan_refreshFalse(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-refresh=false", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not have been called") + } +} + +func TestPlan_state(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + originalState := testState() + statePath := testStateFile(t, originalState) + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.NullVal(cty.String), + "network_interface": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "device_index": cty.String, + "description": cty.String, + })), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestPlan_stateDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + // Generate state and move it to the default path + originalState := testState() + statePath := testStateFile(t, originalState) + os.Rename(statePath, path.Join(td, "terraform.tfstate")) + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.NullVal(cty.String), + "network_interface": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "device_index": cty.String, + "description": cty.String, + })), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestPlan_validate(t *testing.T) { + // This is triggered by not asking for input so we have to set this to false + test = false + defer func() { test = true }() + + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-invalid"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{"-no-color"} + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + actual := output.Stderr() + if want := "Error: Invalid count argument"; !strings.Contains(actual, want) { + t.Fatalf("unexpected error output\ngot:\n%s\n\nshould contain: %s", actual, want) + } + if want := "9: count = timestamp()"; !strings.Contains(actual, want) { + t.Fatalf("unexpected error output\ngot:\n%s\n\nshould contain: %s", actual, want) + } +} + +func TestPlan_vars(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + actual = req.ProposedNewState.GetAttr("value").AsString() + resp.PlannedState = req.ProposedNewState + return + } + + args := []string{ + "-var", "foo=bar", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestPlan_varsInvalid(t *testing.T) { + testCases := []struct { + args []string + wantErr string + }{ + { + []string{"-var", "foo"}, + `The given -var option "foo" is not correctly specified.`, + }, + { + []string{"-var", "foo = bar"}, + `Variable name "foo " is invalid due to trailing space.`, + }, + } + + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + for _, tc := range testCases { + t.Run(strings.Join(tc.args, " "), func(t *testing.T) { + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run(tc.args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) + } + + got := output.Stderr() + if !strings.Contains(got, tc.wantErr) { + t.Fatalf("bad error output, want %q, got:\n%s", tc.wantErr, got) + } + }) + } +} + +func TestPlan_varsUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + // The plan command will prompt for interactive input of var.foo. + // We'll answer "bar" to that prompt, which should then allow this + // configuration to apply even though var.foo doesn't have a + // default value and there are no -var arguments on our command line. + + // This will (helpfully) panic if more than one variable is requested during plan: + // https://github.com/hashicorp/terraform/issues/26027 + close := testInteractiveInput(t, []string{"bar"}) + defer close() + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +// This test adds a required argument to the test provider to validate +// processing of user input: +// https://github.com/hashicorp/terraform/issues/26035 +func TestPlan_providerArgumentUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // The plan command will prompt for interactive input of provider.test.region + defaultInputReader = bytes.NewBufferString("us-east-1\n") + + p := planFixtureProvider() + // override the planFixtureProvider schema to include a required provider argument + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Required: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true, Computed: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + "valid": { + Type: cty.Bool, + Computed: true, + }, + }, + }, + }, + }, + } + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +// Test that terraform properly merges provider configuration that's split +// between config files and interactive input variables. +// https://github.com/hashicorp/terraform/issues/28956 +func TestPlan_providerConfigMerge(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-provider-input"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // The plan command will prompt for interactive input of provider.test.region + defaultInputReader = bytes.NewBufferString("us-east-1\n") + + p := planFixtureProvider() + // override the planFixtureProvider schema to include a required provider argument and a nested block + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Required: true}, + "url": {Type: cty.String, Required: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "auth": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "user": {Type: cty.String, Required: true}, + "password": {Type: cty.String, Required: true}, + }, + }, + }, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure provider not called") + } + + // For this test, we want to confirm that we've sent the expected config + // value *to* the provider. + got := p.ConfigureProviderRequest.Config + want := cty.ObjectVal(map[string]cty.Value{ + "auth": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("one"), + "password": cty.StringVal("onepw"), + }), + cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("two"), + "password": cty.StringVal("twopw"), + }), + }), + "region": cty.StringVal("us-east-1"), + "url": cty.StringVal("example.com"), + }) + + if !got.RawEquals(want) { + t.Fatal("wrong provider config") + } + +} + +func TestPlan_varFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + varFilePath := testTempFile(t) + if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + actual = req.ProposedNewState.GetAttr("value").AsString() + resp.PlannedState = req.ProposedNewState + return + } + + args := []string{ + "-var-file", varFilePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestPlan_varFileDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars") + if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + actual = req.ProposedNewState.GetAttr("value").AsString() + resp.PlannedState = req.ProposedNewState + return + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestPlan_varFileWithDecls(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + varFilePath := testTempFile(t) + if err := ioutil.WriteFile(varFilePath, []byte(planVarFileWithDecl), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-var-file", varFilePath, + } + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatalf("succeeded; want failure\n\n%s", output.Stdout()) + } + + msg := output.Stderr() + if got, want := msg, "Variable declaration in .tfvars file"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant message containing %q\ngot:\n%s", want, got) + } +} + +func TestPlan_detailedExitcode(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + t.Run("return 1", func(t *testing.T) { + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + // Running plan without setting testingOverrides is similar to plan without init + View: view, + }, + } + code := c.Run([]string{"-detailed-exitcode"}) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + }) + + t.Run("return 2", func(t *testing.T) { + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run([]string{"-detailed-exitcode"}) + output := done(t) + if code != 2 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + }) +} + +func TestPlan_detailedExitcode_emptyDiff(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-emptydiff"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{"-detailed-exitcode"} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +func TestPlan_shutdown(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-shutdown"), td) + defer testChdir(t, td)() + + cancelled := make(chan struct{}) + shutdownCh := make(chan struct{}) + + p := testProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + ShutdownCh: shutdownCh, + }, + } + + p.StopFn = func() error { + close(cancelled) + return nil + } + + var once sync.Once + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + once.Do(func() { + shutdownCh <- struct{}{} + }) + + // Because of the internal lock in the MockProvider, we can't + // coordinate directly with the calling of Stop, and making the + // MockProvider concurrent is disruptive to a lot of existing tests. + // Wait here a moment to help make sure the main goroutine gets to the + // Stop call before we exit, or the plan may finish before it can be + // canceled. + time.Sleep(200 * time.Millisecond) + + s := req.ProposedNewState.AsValueMap() + s["ami"] = cty.StringVal("bar") + resp.PlannedState = cty.ObjectVal(s) + return + } + + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + code := c.Run([]string{}) + output := done(t) + if code != 1 { + t.Errorf("wrong exit code %d; want 1\noutput:\n%s", code, output.Stdout()) + } + + select { + case <-cancelled: + default: + t.Error("command not cancelled") + } +} + +func TestPlan_init_required(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + // Running plan without setting testingOverrides is similar to plan without init + View: view, + }, + } + + args := []string{"-no-color"} + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("expected error, got success") + } + got := output.Stderr() + if !(strings.Contains(got, "terraform init") && strings.Contains(got, "provider registry.terraform.io/hashicorp/test: required by this configuration but no version is selected")) { + t.Fatal("wrong error message in output:", got) + } +} + +// Config with multiple resources, targeting plan of a subset +func TestPlan_targeted(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-targeted"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-target", "test_instance.foo", + "-target", "test_instance.baz", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if got, want := output.Stdout(), "3 to add, 0 to change, 0 to destroy"; !strings.Contains(got, want) { + t.Fatalf("bad change summary, want %q, got:\n%s", want, got) + } +} + +// Diagnostics for invalid -target flags +func TestPlan_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } + + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + View: view, + }, + } + + args := []string{ + "-target", target, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) + } + + got := output.Stderr() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) + } +} + +func TestPlan_replace(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-replace"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"hello"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-no-color", + "-replace", "test_instance.a", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("wrong exit code %d\n\n%s", code, output.Stderr()) + } + + stdout := output.Stdout() + if got, want := stdout, "1 to add, 0 to change, 1 to destroy"; !strings.Contains(got, want) { + t.Errorf("wrong plan summary\ngot output:\n%s\n\nwant substring: %s", got, want) + } + if got, want := stdout, "test_instance.a will be replaced, as requested"; !strings.Contains(got, want) { + t.Errorf("missing replace explanation\ngot output:\n%s\n\nwant substring: %s", got, want) + } +} + +// Verify that the parallelism flag allows no more than the desired number of +// concurrent calls to PlanResourceChange. +func TestPlan_parallelism(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("parallelism"), td) + defer testChdir(t, td)() + + par := 4 + + // started is a semaphore that we use to ensure that we never have more + // than "par" plan operations happening concurrently + started := make(chan struct{}, par) + + // beginCtx is used as a starting gate to hold back PlanResourceChange + // calls until we reach the desired concurrency. The cancel func "begin" is + // called once we reach the desired concurrency, allowing all apply calls + // to proceed in unison. + beginCtx, begin := context.WithCancel(context.Background()) + + // Since our mock provider has its own mutex preventing concurrent calls + // to ApplyResourceChange, we need to use a number of separate providers + // here. They will all have the same mock implementation function assigned + // but crucially they will each have their own mutex. + providerFactories := map[addrs.Provider]providers.Factory{} + for i := 0; i < 10; i++ { + name := fmt.Sprintf("test%d", i) + provider := &terraform.MockProvider{} + provider.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + name + "_instance": {Block: &configschema.Block{}}, + }, + } + provider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + // If we ever have more than our intended parallelism number of + // plan operations running concurrently, the semaphore will fail. + select { + case started <- struct{}{}: + defer func() { + <-started + }() + default: + t.Fatal("too many concurrent apply operations") + } + + // If we never reach our intended parallelism, the context will + // never be canceled and the test will time out. + if len(started) >= par { + begin() + } + <-beginCtx.Done() + + // do some "work" + // Not required for correctness, but makes it easier to spot a + // failure when there is more overlap. + time.Sleep(10 * time.Millisecond) + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + providerFactories[addrs.NewDefaultProvider(name)] = providers.FactoryFixed(provider) + } + testingOverrides := &testingOverrides{ + Providers: providerFactories, + } + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: testingOverrides, + View: view, + }, + } + + args := []string{ + fmt.Sprintf("-parallelism=%d", par), + } + + res := c.Run(args) + output := done(t) + if res != 0 { + t.Fatal(output.Stdout()) + } +} + +func TestPlan_warnings(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + t.Run("full warnings", func(t *testing.T) { + p := planWarningsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + code := c.Run([]string{}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + // the output should contain 3 warnings (returned by planWarningsFixtureProvider()) + wantWarnings := []string{ + "warning 1", + "warning 2", + "warning 3", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) + + t.Run("compact warnings", func(t *testing.T) { + p := planWarningsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + code := c.Run([]string{"-compact-warnings"}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + // the output should contain 3 warnings (returned by planWarningsFixtureProvider()) + // and the message that plan was run with -compact-warnings + wantWarnings := []string{ + "warning 1", + "warning 2", + "warning 3", + "To see the full warning notes, run Terraform without -compact-warnings.", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) +} + +func TestPlan_jsonGoldenReference(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-json", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + checkGoldenReference(t, output, "plan") +} + +// planFixtureSchema returns a schema suitable for processing the +// configuration in testdata/plan . This schema should be +// assigned to a mock provider named "test". +func planFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + "valid": { + Type: cty.Bool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +// planFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/plan. This mock has +// GetSchemaResponse and PlanResourceChangeFn populated, with the plan +// step just passing through the new object proposed by Terraform Core. +func planFixtureProvider() *terraform.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = planFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("zzzzz"), + "valid": cty.BoolVal(true), + }), + } + } + return p +} + +// planVarsFixtureSchema returns a schema suitable for processing the +// configuration in testdata/plan-vars . This schema should be +// assigned to a mock provider named "test". +func planVarsFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +// planVarsFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/plan-vars. This mock has +// GetSchemaResponse and PlanResourceChangeFn populated, with the plan +// step just passing through the new object proposed by Terraform Core. +func planVarsFixtureProvider() *terraform.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = planVarsFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("zzzzz"), + "valid": cty.BoolVal(true), + }), + } + } + return p +} + +// planFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/plan. This mock has +// GetSchemaResponse and PlanResourceChangeFn populated, returning 3 warnings. +func planWarningsFixtureProvider() *terraform.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = planFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics{ + tfdiags.SimpleWarning("warning 1"), + tfdiags.SimpleWarning("warning 2"), + tfdiags.SimpleWarning("warning 3"), + }, + PlannedState: req.ProposedNewState, + } + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("zzzzz"), + "valid": cty.BoolVal(true), + }), + } + } + return p +} + +const planVarFile = ` +foo = "bar" +` + +const planVarFileWithDecl = ` +foo = "bar" + +variable "nope" { +} +` diff --git a/internal/command/plugins.go b/command/plugins.go similarity index 91% rename from internal/command/plugins.go rename to command/plugins.go index 7467b09db96a..bfa0df42fd46 100644 --- a/internal/command/plugins.go +++ b/command/plugins.go @@ -10,13 +10,13 @@ import ( plugin "github.com/hashicorp/go-plugin" "github.com/kardianos/osext" - fileprovisioner "github.com/hashicorp/terraform/internal/builtin/provisioners/file" - localexec "github.com/hashicorp/terraform/internal/builtin/provisioners/local-exec" - remoteexec "github.com/hashicorp/terraform/internal/builtin/provisioners/remote-exec" - "github.com/hashicorp/terraform/internal/logging" - tfplugin "github.com/hashicorp/terraform/internal/plugin" - "github.com/hashicorp/terraform/internal/plugin/discovery" - "github.com/hashicorp/terraform/internal/provisioners" + fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file" + localexec "github.com/hashicorp/terraform/builtin/provisioners/local-exec" + remoteexec "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" + "github.com/hashicorp/terraform/logging" + tfplugin "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/provisioners" ) // NOTE WELL: The logic in this file is primarily about plugin types OTHER THAN diff --git a/internal/command/plugins_lock.go b/command/plugins_lock.go similarity index 100% rename from internal/command/plugins_lock.go rename to command/plugins_lock.go diff --git a/internal/command/plugins_lock_test.go b/command/plugins_lock_test.go similarity index 100% rename from internal/command/plugins_lock_test.go rename to command/plugins_lock_test.go diff --git a/internal/command/plugins_test.go b/command/plugins_test.go similarity index 100% rename from internal/command/plugins_test.go rename to command/plugins_test.go diff --git a/internal/command/providers.go b/command/providers.go similarity index 96% rename from internal/command/providers.go rename to command/providers.go index c55a91774a9f..6a68617efe58 100644 --- a/internal/command/providers.go +++ b/command/providers.go @@ -6,9 +6,9 @@ import ( "github.com/xlab/treeprint" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/tfdiags" ) // ProvidersCommand is a Command implementation that prints out information diff --git a/internal/command/providers_lock.go b/command/providers_lock.go similarity index 98% rename from internal/command/providers_lock.go rename to command/providers_lock.go index 7dcf20db11b1..6c98b8e569b6 100644 --- a/internal/command/providers_lock.go +++ b/command/providers_lock.go @@ -6,11 +6,11 @@ import ( "net/url" "os" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/providercache" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/providercache" + "github.com/hashicorp/terraform/tfdiags" ) type providersLockChangeType string diff --git a/internal/command/providers_lock_test.go b/command/providers_lock_test.go similarity index 97% rename from internal/command/providers_lock_test.go rename to command/providers_lock_test.go index 5ba792b7e62b..5e38616f5af9 100644 --- a/internal/command/providers_lock_test.go +++ b/command/providers_lock_test.go @@ -8,9 +8,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" "github.com/mitchellh/cli" ) diff --git a/internal/command/providers_mirror.go b/command/providers_mirror.go similarity index 98% rename from internal/command/providers_mirror.go rename to command/providers_mirror.go index f6ab2258244c..1f131ae12007 100644 --- a/internal/command/providers_mirror.go +++ b/command/providers_mirror.go @@ -10,9 +10,9 @@ import ( "github.com/apparentlymart/go-versions/versions" "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/tfdiags" ) // ProvidersMirrorCommand is a Command implementation that implements the diff --git a/internal/command/providers_mirror_test.go b/command/providers_mirror_test.go similarity index 100% rename from internal/command/providers_mirror_test.go rename to command/providers_mirror_test.go diff --git a/internal/command/providers_schema.go b/command/providers_schema.go similarity index 92% rename from internal/command/providers_schema.go rename to command/providers_schema.go index 61201192c368..f9604ae003e1 100644 --- a/internal/command/providers_schema.go +++ b/command/providers_schema.go @@ -4,10 +4,10 @@ import ( "fmt" "os" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/tfdiags" ) // ProvidersCommand is a Command implementation that prints out information diff --git a/internal/command/providers_schema_test.go b/command/providers_schema_test.go similarity index 95% rename from internal/command/providers_schema_test.go rename to command/providers_schema_test.go index 9aa8f810c7a3..75c4652a6357 100644 --- a/internal/command/providers_schema_test.go +++ b/command/providers_schema_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/command/providers_test.go b/command/providers_test.go similarity index 100% rename from internal/command/providers_test.go rename to command/providers_test.go diff --git a/internal/command/push.go b/command/push.go similarity index 95% rename from internal/command/push.go rename to command/push.go index ee1544926d33..27e6d6051fb2 100644 --- a/internal/command/push.go +++ b/command/push.go @@ -3,7 +3,7 @@ package command import ( "strings" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) type PushCommand struct { diff --git a/command/refresh.go b/command/refresh.go new file mode 100644 index 000000000000..532ee05aef5e --- /dev/null +++ b/command/refresh.go @@ -0,0 +1,227 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/tfdiags" +) + +// RefreshCommand is a cli.Command implementation that refreshes the state +// file. +type RefreshCommand struct { + Meta +} + +func (c *RefreshCommand) Run(rawArgs []string) int { + var diags tfdiags.Diagnostics + + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Propagate -no-color for legacy use of Ui. The remote backend and + // cloud package use this; it should be removed when/if they are + // migrated to views. + c.Meta.color = !common.NoColor + c.Meta.Color = c.Meta.color + + // Parse and validate flags + args, diags := arguments.ParseRefresh(rawArgs) + + // Instantiate the view, even if there are flag errors, so that we render + // diagnostics according to the desired view + view := views.NewRefresh(args.ViewType, c.View) + + if diags.HasErrors() { + view.Diagnostics(diags) + view.HelpPrompt() + return 1 + } + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + // FIXME: the -input flag value is needed to initialize the backend and the + // operation, but there is no clear path to pass this value down, so we + // continue to mutate the Meta object state for now. + c.Meta.input = args.InputEnabled + + // FIXME: the -parallelism flag is used to control the concurrency of + // Terraform operations. At the moment, this value is used both to + // initialize the backend via the ContextOpts field inside CLIOpts, and to + // set a largely unused field on the Operation request. Again, there is no + // clear path to pass this value down, so we continue to mutate the Meta + // object state for now. + c.Meta.parallelism = args.Operation.Parallelism + + // Prepare the backend with the backend-specific arguments + be, beDiags := c.PrepareBackend(args.State, args.ViewType) + diags = diags.Append(beDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Build the operation request + opReq, opDiags := c.OperationRequest(be, view, args.ViewType, args.Operation) + diags = diags.Append(opDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Collect variable value and add them to the operation request + diags = diags.Append(c.GatherVariables(opReq, args.Vars)) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Before we delegate to the backend, we'll print any warning diagnostics + // we've accumulated here, since the backend will start fresh with its own + // diagnostics. + view.Diagnostics(diags) + diags = nil + + // Perform the operation + op, err := c.RunOperation(be, opReq) + if err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + if op.State != nil { + view.Outputs(op.State.RootModule().OutputValues) + } + + return op.Result.ExitStatus() +} + +func (c *RefreshCommand) PrepareBackend(args *arguments.State, viewType arguments.ViewType) (backend.Enhanced, tfdiags.Diagnostics) { + // FIXME: we need to apply the state arguments to the meta object here + // because they are later used when initializing the backend. Carving a + // path to pass these arguments to the functions that need them is + // difficult but would make their use easier to understand. + c.Meta.applyStateArguments(args) + + backendConfig, diags := c.loadBackendConfig(".") + if diags.HasErrors() { + return nil, diags + } + + // Load the backend + be, beDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + ViewType: viewType, + }) + diags = diags.Append(beDiags) + if beDiags.HasErrors() { + return nil, diags + } + + return be, diags +} + +func (c *RefreshCommand) OperationRequest(be backend.Enhanced, view views.Refresh, viewType arguments.ViewType, args *arguments.Operation, +) (*backend.Operation, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Build the operation + opReq := c.Operation(be, viewType) + opReq.ConfigDir = "." + opReq.Hooks = view.Hooks() + opReq.Targets = args.Targets + opReq.Type = backend.OperationTypeRefresh + opReq.View = view.Operation() + + var err error + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %s", err)) + return nil, diags + } + + return opReq, diags +} + +func (c *RefreshCommand) GatherVariables(opReq *backend.Operation, args *arguments.Vars) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} + opReq.Variables, diags = c.collectVariableValues() + + return diags +} + +func (c *RefreshCommand) Help() string { + helpText := ` +Usage: terraform [global options] refresh [options] + + Update the state file of your infrastructure with metadata that matches + the physical resources they are tracking. + + This will not modify your infrastructure, but it can modify your + state file to update metadata. This metadata might cause new changes + to occur when you generate a plan or call apply next. + +Options: + + -compact-warnings If Terraform produces any warnings that are not + accompanied by errors, show them in a more compact form + that includes only the summary messages. + + -input=true Ask for input for variables if not directly set. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -no-color If specified, output won't contain any color. + + -parallelism=n Limit the number of concurrent operations. Defaults to 10. + + -target=resource Resource to target. Operation will be limited to this + resource and its dependencies. This flag can be used + multiple times. + + -var 'foo=bar' Set a variable in the Terraform configuration. This + flag can be set multiple times. + + -var-file=foo Set variables in the Terraform configuration from + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. + + -state, state-out, and -backup are legacy options supported for the local + backend only. For more information, see the local backend's documentation. +` + return strings.TrimSpace(helpText) +} + +func (c *RefreshCommand) Synopsis() string { + return "Update the state to match remote systems" +} diff --git a/command/refresh_test.go b/command/refresh_test.go new file mode 100644 index 000000000000..3080891572fd --- /dev/null +++ b/command/refresh_test.go @@ -0,0 +1,975 @@ +package command + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" +) + +var equateEmpty = cmpopts.EquateEmpty() + +func TestRefresh(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should have been called") + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + newStateFile, err := statefile.Read(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(newStateFile.State.String()) + expected := strings.TrimSpace(testRefreshStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestRefresh_empty(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-empty"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not have been called") + } +} + +func TestRefresh_lockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + defer unlock() + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + } + + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatal("expected error") + } + + got := output.Stderr() + if !strings.Contains(got, "lock") { + t.Fatal("command output does not look like a lock error:", got) + } +} + +func TestRefresh_cwd(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(testFixturePath("refresh")); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should have been called") + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + newStateFile, err := statefile.Read(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(newStateFile.State.String()) + expected := strings.TrimSpace(testRefreshCwdStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestRefresh_defaultState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + originalState := testState() + + // Write the state file in a temporary directory with the + // default filename. + statePath := testStateFile(t, originalState) + + localState := statemgr.NewFilesystem(statePath) + if err := localState.RefreshState(); err != nil { + t.Fatal(err) + } + s := localState.State() + if s == nil { + t.Fatal("empty test state") + } + + // Change to that directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(filepath.Dir(statePath)); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should have been called") + } + + newState := testStateRead(t, statePath) + + actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"), + Dependencies: []addrs.ConfigResource{}, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } + + backupState := testStateRead(t, statePath+DefaultBackupExtension) + + actual = backupState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected = originalState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } +} + +func TestRefresh_outPath(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + // Output path + outf, err := ioutil.TempFile(td, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + outPath := outf.Name() + outf.Close() + os.Remove(outPath) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + "-state-out", outPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + newState := testStateRead(t, statePath) + if !reflect.DeepEqual(newState, state) { + t.Fatalf("bad: %#v", newState) + } + + newState = testStateRead(t, outPath) + actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"), + Dependencies: []addrs.ConfigResource{}, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } + + if _, err := os.Stat(outPath + DefaultBackupExtension); !os.IsNotExist(err) { + if err != nil { + t.Fatalf("failed to test for backup file: %s", err) + } + t.Fatalf("backup file exists, but it should not because output file did not initially exist") + } +} + +func TestRefresh_var(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-var"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + p.GetProviderSchemaResponse = refreshVarFixtureSchema() + + args := []string{ + "-var", "foo=bar", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure should be called") + } + if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { + t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestRefresh_varFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-var"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + p.GetProviderSchemaResponse = refreshVarFixtureSchema() + + varFilePath := testTempFile(t) + if err := ioutil.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + args := []string{ + "-var-file", varFilePath, + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure should be called") + } + if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { + t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestRefresh_varFileDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-var"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + p.GetProviderSchemaResponse = refreshVarFixtureSchema() + + varFilePath := filepath.Join(td, "terraform.tfvars") + if err := ioutil.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure should be called") + } + if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { + t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestRefresh_varsUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-unset-var"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + defaultInputReader = bytes.NewBufferString("bar\n") + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +func TestRefresh_backup(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + // Output path + outf, err := ioutil.TempFile(td, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + outPath := outf.Name() + defer outf.Close() + + // Need to put some state content in the output file so that there's + // something to back up. + err = statefile.Write(statefile.New(state, "baz", 0), outf) + if err != nil { + t.Fatalf("error writing initial output state file %s", err) + } + + // Backup path + backupf, err := ioutil.TempFile(td, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + backupPath := backupf.Name() + backupf.Close() + os.Remove(backupPath) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("changed"), + }), + } + + args := []string{ + "-state", statePath, + "-state-out", outPath, + "-backup", backupPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + newState := testStateRead(t, statePath) + if !cmp.Equal(newState, state, cmpopts.EquateEmpty()) { + t.Fatalf("got:\n%s\nexpected:\n%s\n", newState, state) + } + + newState = testStateRead(t, outPath) + actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"changed\"\n }"), + Dependencies: []addrs.ConfigResource{}, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } + + backupState := testStateRead(t, backupPath) + actualStr := strings.TrimSpace(backupState.String()) + expectedStr := strings.TrimSpace(state.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestRefresh_disableBackup(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + // Output path + outf, err := ioutil.TempFile(td, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + outPath := outf.Name() + outf.Close() + os.Remove(outPath) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + "-state-out", outPath, + "-backup", "-", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + newState := testStateRead(t, statePath) + if !cmp.Equal(state, newState, equateEmpty) { + spew.Config.DisableMethods = true + fmt.Println(cmp.Diff(state, newState, equateEmpty)) + t.Fatalf("bad: %s", newState) + } + + newState = testStateRead(t, outPath) + actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"), + Dependencies: []addrs.ConfigResource{}, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } + + // Ensure there is no backup + _, err = os.Stat(outPath + DefaultBackupExtension) + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } + _, err = os.Stat("-") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } +} + +func TestRefresh_displaysOutputs(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-output"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Test that outputs were displayed + outputValue := "foo.example.com" + actual := output.Stdout() + if !strings.Contains(actual, outputValue) { + t.Fatalf("Expected:\n%s\n\nTo include: %q", actual, outputValue) + } +} + +// Config with multiple resources, targeting refresh of a subset +func TestRefresh_targeted(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-targeted"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-target", "test_instance.foo", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + got := output.Stdout() + if want := "test_instance.foo: Refreshing"; !strings.Contains(got, want) { + t.Fatalf("expected output to contain %q, got:\n%s", want, got) + } + if doNotWant := "test_instance.bar: Refreshing"; strings.Contains(got, doNotWant) { + t.Fatalf("expected output not to contain %q, got:\n%s", doNotWant, got) + } +} + +// Diagnostics for invalid -target flags +func TestRefresh_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } + + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + View: view, + }, + } + + args := []string{ + "-target", target, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + got := output.Stderr() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) + } +} + +func TestRefresh_warnings(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + Diagnostics: tfdiags.Diagnostics{ + tfdiags.SimpleWarning("warning 1"), + tfdiags.SimpleWarning("warning 2"), + }, + } + } + + t.Run("full warnings", func(t *testing.T) { + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + wantWarnings := []string{ + "warning 1", + "warning 2", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) + + t.Run("compact warnings", func(t *testing.T) { + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run([]string{"-compact-warnings"}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + // the output should contain 2 warnings and a message about -compact-warnings + wantWarnings := []string{ + "warning 1", + "warning 2", + "To see the full warning notes, run Terraform without -compact-warnings.", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) +} + +// configuration in testdata/refresh . This schema should be +// assigned to a mock provider named "test". +func refreshFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +// refreshVarFixtureSchema returns a schema suitable for processing the +// configuration in testdata/refresh-var . This schema should be +// assigned to a mock provider named "test". +func refreshVarFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } +} + +const refreshVarFile = ` +foo = "bar" +` + +const testRefreshStr = ` +test_instance.foo: + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] +` +const testRefreshCwdStr = ` +test_instance.foo: + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] +` diff --git a/command/show.go b/command/show.go new file mode 100644 index 000000000000..4146f98187df --- /dev/null +++ b/command/show.go @@ -0,0 +1,238 @@ +package command + +import ( + "fmt" + "os" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// ShowCommand is a Command implementation that reads and outputs the +// contents of a Terraform plan or state file. +type ShowCommand struct { + Meta +} + +func (c *ShowCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Parse and validate flags + args, diags := arguments.ParseShow(rawArgs) + if diags.HasErrors() { + c.View.Diagnostics(diags) + c.View.HelpPrompt("show") + return 1 + } + + // Set up view + view := views.NewShow(args.ViewType, c.View) + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(fmt.Errorf("error loading plugin path: %s", err)) + view.Diagnostics(diags) + return 1 + } + + // Get the data we need to display + plan, stateFile, config, schemas, showDiags := c.show(args.Path) + diags = diags.Append(showDiags) + if showDiags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Display the data + return view.Display(config, plan, stateFile, schemas) +} + +func (c *ShowCommand) Help() string { + helpText := ` +Usage: terraform [global options] show [options] [path] + + Reads and outputs a Terraform state or plan file in a human-readable + form. If no path is specified, the current state will be shown. + +Options: + + -no-color If specified, output won't contain any color. + -json If specified, output the Terraform plan or state in + a machine-readable form. + +` + return strings.TrimSpace(helpText) +} + +func (c *ShowCommand) Synopsis() string { + return "Show the current state or a saved plan" +} + +func (c *ShowCommand) show(path string) (*plans.Plan, *statefile.File, *configs.Config, *terraform.Schemas, tfdiags.Diagnostics) { + var diags, showDiags tfdiags.Diagnostics + var plan *plans.Plan + var stateFile *statefile.File + var config *configs.Config + var schemas *terraform.Schemas + + // No plan file or state file argument provided, + // so get the latest state snapshot + if path == "" { + stateFile, showDiags = c.showFromLatestStateSnapshot() + diags = diags.Append(showDiags) + if showDiags.HasErrors() { + return plan, stateFile, config, schemas, diags + } + } + + // Plan file or state file argument provided, + // so try to load the argument as a plan file first. + // If that fails, try to load it as a statefile. + if path != "" { + plan, stateFile, config, showDiags = c.showFromPath(path) + diags = diags.Append(showDiags) + if showDiags.HasErrors() { + return plan, stateFile, config, schemas, diags + } + } + + // Get schemas, if possible + if config != nil || stateFile != nil { + schemas, diags = c.MaybeGetSchemas(stateFile.State, config) + if diags.HasErrors() { + return plan, stateFile, config, schemas, diags + } + } + + return plan, stateFile, config, schemas, diags +} +func (c *ShowCommand) showFromLatestStateSnapshot() (*statefile.File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Load the backend + b, backendDiags := c.Backend(nil) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + return nil, diags + } + c.ignoreRemoteVersionConflict(b) + + // Load the workspace + workspace, err := c.Workspace() + if err != nil { + diags = diags.Append(fmt.Errorf("error selecting workspace: %s", err)) + return nil, diags + } + + // Get the latest state snapshot from the backend for the current workspace + stateFile, stateErr := getStateFromBackend(b, workspace) + if stateErr != nil { + diags = diags.Append(stateErr) + return nil, diags + } + + return stateFile, diags +} + +func (c *ShowCommand) showFromPath(path string) (*plans.Plan, *statefile.File, *configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var planErr, stateErr error + var plan *plans.Plan + var stateFile *statefile.File + var config *configs.Config + + // Try to get the plan file and associated data from + // the path argument. If that fails, try to get the + // statefile from the path argument. + plan, stateFile, config, planErr = getPlanFromPath(path) + if planErr != nil { + stateFile, stateErr = getStateFromPath(path) + if stateErr != nil { + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Failed to read the given file as a state or plan file", + fmt.Sprintf("State read error: %s\n\nPlan read error: %s", stateErr, planErr), + ), + ) + return nil, nil, nil, diags + } + } + return plan, stateFile, config, diags +} + +// getPlanFromPath returns a plan, statefile, and config if the user-supplied +// path points to a plan file. If both plan and error are nil, the path is likely +// a directory. An error could suggest that the given path points to a statefile. +func getPlanFromPath(path string) (*plans.Plan, *statefile.File, *configs.Config, error) { + planReader, err := planfile.Open(path) + if err != nil { + return nil, nil, nil, err + } + + // Get plan + plan, err := planReader.ReadPlan() + if err != nil { + return nil, nil, nil, err + } + + // Get statefile + stateFile, err := planReader.ReadStateFile() + if err != nil { + return nil, nil, nil, err + } + + // Get config + config, diags := planReader.ReadConfig() + if diags.HasErrors() { + return nil, nil, nil, diags.Err() + } + + return plan, stateFile, config, err +} + +// getStateFromPath returns a statefile if the user-supplied path points to a statefile. +func getStateFromPath(path string) (*statefile.File, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("Error loading statefile: %s", err) + } + defer file.Close() + + var stateFile *statefile.File + stateFile, err = statefile.Read(file) + if err != nil { + return nil, fmt.Errorf("Error reading %s as a statefile: %s", path, err) + } + return stateFile, nil +} + +// getStateFromBackend returns the State for the current workspace, if available. +func getStateFromBackend(b backend.Backend, workspace string) (*statefile.File, error) { + // Get the state store for the given workspace + stateStore, err := b.StateMgr(workspace) + if err != nil { + return nil, fmt.Errorf("Failed to load state manager: %s", err) + } + + // Refresh the state store with the latest state snapshot from persistent storage + if err := stateStore.RefreshState(); err != nil { + return nil, fmt.Errorf("Failed to load state: %s", err) + } + + // Get the latest state snapshot and return it + stateFile := statemgr.Export(stateStore) + return stateFile, nil +} diff --git a/command/show_test.go b/command/show_test.go new file mode 100644 index 000000000000..7e5224ad5df5 --- /dev/null +++ b/command/show_test.go @@ -0,0 +1,1156 @@ +package command + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/version" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +func TestShow_badArgs(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "bad", + "bad", + "-no-color", + } + + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } +} + +func TestShow_noArgsNoState(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `No state.` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_noArgsWithState(t *testing.T) { + // Get a temp cwd + testCwd(t) + // Create the default state + testStateFileDefault(t, testState()) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `# test_instance.foo:` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_argsWithState(t *testing.T) { + // Create the default state + statePath := testStateFile(t, testState()) + stateDir := filepath.Dir(statePath) + defer os.RemoveAll(stateDir) + defer testChdir(t, stateDir)() + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + path := filepath.Base(statePath) + args := []string{ + path, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } +} + +// https://github.com/hashicorp/terraform/issues/21462 +func TestShow_argsWithStateAliasedProvider(t *testing.T) { + // Create the default state with aliased resource + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + // The weird whitespace here is reflective of how this would + // get written out in a real state file, due to the indentation + // of all of the containing wrapping objects and arrays. + AttrsJSON: []byte("{\n \"id\": \"bar\"\n }"), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{}, + }, + addrs.RootModuleInstance.ProviderConfigAliased(addrs.NewDefaultProvider("test"), "alias"), + ) + }) + + statePath := testStateFile(t, testState) + stateDir := filepath.Dir(statePath) + defer os.RemoveAll(stateDir) + defer testChdir(t, stateDir)() + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + path := filepath.Base(statePath) + args := []string{ + path, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `# missing schema for provider \"test.alias\"` + if strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s", got) + } +} + +func TestShow_argsPlanFileDoesNotExist(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "doesNotExist.tfplan", + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want := `Plan read error: open doesNotExist.tfplan:` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_argsStatefileDoesNotExist(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "doesNotExist.tfstate", + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want := `State read error: Error loading statefile:` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_json_argsPlanFileDoesNotExist(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-json", + "doesNotExist.tfplan", + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want := `Plan read error: open doesNotExist.tfplan:` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_json_argsStatefileDoesNotExist(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-json", + "doesNotExist.tfstate", + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want := `State read error: Error loading statefile:` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_planNoop(t *testing.T) { + planPath := testPlanFileNoop(t) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + planPath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `No changes. Your infrastructure matches the configuration.` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_planWithChanges(t *testing.T) { + planPathWithChanges := showFixturePlanFile(t, plans.DeleteThenCreate) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + planPathWithChanges, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `test_instance.foo must be replaced` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_planWithForceReplaceChange(t *testing.T) { + // The main goal of this test is to see that the "replace by request" + // resource instance action reason can round-trip through a plan file and + // be reflected correctly in the "terraform show" output, the same way + // as it would appear in "terraform plan" output. + + _, snap := testModuleWithSnapshot(t, "show") + plannedVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("bar"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plan := testPlan(t) + plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + Before: priorValRaw, + After: plannedValRaw, + }, + ActionReason: plans.ResourceInstanceReplaceByRequest, + }) + planFilePath := testPlanFile( + t, + snap, + states.NewState(), + plan, + ) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + planFilePath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `test_instance.foo will be replaced, as requested` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } + + want = `Plan: 1 to add, 0 to change, 1 to destroy.` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } + +} + +func TestShow_plan_json(t *testing.T) { + planPath := showFixturePlanFile(t, plans.Create) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + "-json", + planPath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } +} + +func TestShow_state(t *testing.T) { + originalState := testState() + root := originalState.RootModule() + root.SetOutputValue("test", cty.ObjectVal(map[string]cty.Value{ + "attr": cty.NullVal(cty.DynamicPseudoType), + "null": cty.NullVal(cty.String), + "list": cty.ListVal([]cty.Value{cty.NullVal(cty.Number)}), + }), false) + + statePath := testStateFile(t, originalState) + defer os.RemoveAll(filepath.Dir(statePath)) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + statePath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } +} + +func TestShow_json_output(t *testing.T) { + fixtureDir := "testdata/show-json" + testDirs, err := ioutil.ReadDir(fixtureDir) + if err != nil { + t.Fatal(err) + } + + for _, entry := range testDirs { + if !entry.IsDir() { + continue + } + + t.Run(entry.Name(), func(t *testing.T) { + td := t.TempDir() + inputDir := filepath.Join(fixtureDir, entry.Name()) + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + expectError := strings.Contains(entry.Name(), "error") + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3"}, + "hashicorp2/test": {"1.2.3"}, + }) + defer close() + + p := showFixtureProvider() + + // init + ui := new(cli.MockUi) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + }, + } + if code := ic.Run([]string{}); code != 0 { + if expectError { + // this should error, but not panic. + return + } + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // plan + planView, planDone := testView(t) + pc := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: planView, + ProviderSource: providerSource, + }, + } + + args := []string{ + "-out=terraform.plan", + } + + code := pc.Run(args) + planOutput := planDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, planOutput.Stderr()) + } + + // show + showView, showDone := testView(t) + sc := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: showView, + ProviderSource: providerSource, + }, + } + + args = []string{ + "-json", + "terraform.plan", + } + defer os.Remove("terraform.plan") + code = sc.Run(args) + showOutput := showDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) + } + + // compare view output to wanted output + var got, want plan + + gotString := showOutput.Stdout() + json.Unmarshal([]byte(gotString), &got) + + wantFile, err := os.Open("output.json") + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer wantFile.Close() + byteValue, err := ioutil.ReadAll(wantFile) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + json.Unmarshal([]byte(byteValue), &want) + + // Disregard format version to reduce needless test fixture churn + want.FormatVersion = got.FormatVersion + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } + }) + } +} + +func TestShow_json_output_sensitive(t *testing.T) { + td := t.TempDir() + inputDir := "testdata/show-json-sensitive" + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{"test": {"1.2.3"}}) + defer close() + + p := showFixtureSensitiveProvider() + + // init + ui := new(cli.MockUi) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + }, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // plan + planView, planDone := testView(t) + pc := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: planView, + ProviderSource: providerSource, + }, + } + + args := []string{ + "-out=terraform.plan", + } + code := pc.Run(args) + planOutput := planDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, planOutput.Stderr()) + } + + // show + showView, showDone := testView(t) + sc := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: showView, + ProviderSource: providerSource, + }, + } + + args = []string{ + "-json", + "terraform.plan", + } + defer os.Remove("terraform.plan") + code = sc.Run(args) + showOutput := showDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) + } + + // compare ui output to wanted output + var got, want plan + + gotString := showOutput.Stdout() + json.Unmarshal([]byte(gotString), &got) + + wantFile, err := os.Open("output.json") + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer wantFile.Close() + byteValue, err := ioutil.ReadAll(wantFile) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + json.Unmarshal([]byte(byteValue), &want) + + // Disregard format version to reduce needless test fixture churn + want.FormatVersion = got.FormatVersion + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } +} + +// Failing conditions are only present in JSON output for refresh-only plans, +// so we test that separately here. +func TestShow_json_output_conditions_refresh_only(t *testing.T) { + td := t.TempDir() + inputDir := "testdata/show-json/conditions" + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{"test": {"1.2.3"}}) + defer close() + + p := showFixtureSensitiveProvider() + + // init + ui := new(cli.MockUi) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + }, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // plan + planView, planDone := testView(t) + pc := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: planView, + ProviderSource: providerSource, + }, + } + + args := []string{ + "-refresh-only", + "-out=terraform.plan", + "-var=ami=bad-ami", + "-state=for-refresh.tfstate", + } + code := pc.Run(args) + planOutput := planDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, planOutput.Stderr()) + } + + // show + showView, showDone := testView(t) + sc := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: showView, + ProviderSource: providerSource, + }, + } + + args = []string{ + "-json", + "terraform.plan", + } + defer os.Remove("terraform.plan") + code = sc.Run(args) + showOutput := showDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) + } + + // compare JSON output to wanted output + var got, want plan + + gotString := showOutput.Stdout() + json.Unmarshal([]byte(gotString), &got) + + wantFile, err := os.Open("output-refresh-only.json") + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer wantFile.Close() + byteValue, err := ioutil.ReadAll(wantFile) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + json.Unmarshal([]byte(byteValue), &want) + + // Disregard format version to reduce needless test fixture churn + want.FormatVersion = got.FormatVersion + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } +} + +// similar test as above, without the plan +func TestShow_json_output_state(t *testing.T) { + fixtureDir := "testdata/show-json-state" + testDirs, err := ioutil.ReadDir(fixtureDir) + if err != nil { + t.Fatal(err) + } + + for _, entry := range testDirs { + if !entry.IsDir() { + continue + } + + t.Run(entry.Name(), func(t *testing.T) { + td := t.TempDir() + inputDir := filepath.Join(fixtureDir, entry.Name()) + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3"}, + }) + defer close() + + p := showFixtureProvider() + + // init + ui := new(cli.MockUi) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + }, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // show + showView, showDone := testView(t) + sc := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: showView, + ProviderSource: providerSource, + }, + } + + code := sc.Run([]string{"-json"}) + showOutput := showDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) + } + + // compare ui output to wanted output + type state struct { + FormatVersion string `json:"format_version,omitempty"` + TerraformVersion string `json:"terraform_version"` + Values map[string]interface{} `json:"values,omitempty"` + SensitiveValues map[string]bool `json:"sensitive_values,omitempty"` + } + var got, want state + + gotString := showOutput.Stdout() + json.Unmarshal([]byte(gotString), &got) + + wantFile, err := os.Open("output.json") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer wantFile.Close() + byteValue, err := ioutil.ReadAll(wantFile) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + json.Unmarshal([]byte(byteValue), &want) + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } + }) + } +} + +func TestShow_planWithNonDefaultStateLineage(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("show"), td) + defer testChdir(t, td)() + + // Write default state file with a testing lineage ("fake-for-testing") + testStateFileDefault(t, testState()) + + // Create a plan with a different lineage, which we should still be able + // to show + _, snap := testModuleWithSnapshot(t, "show") + state := testState() + plan := testPlan(t) + stateMeta := statemgr.SnapshotMeta{ + Lineage: "fake-for-plan", + Serial: 1, + TerraformVersion: version.SemVer, + } + planPath := testPlanFileMatchState(t, snap, state, plan, stateMeta) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + planPath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `No changes. Your infrastructure matches the configuration.` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_corruptStatefile(t *testing.T) { + td := t.TempDir() + inputDir := "testdata/show-corrupt-statefile" + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want := `Unsupported state file format` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +// showFixtureSchema returns a schema suitable for processing the configuration +// in testdata/show. This schema should be assigned to a mock provider +// named "test". +func showFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +// showFixtureSensitiveSchema returns a schema suitable for processing the configuration +// in testdata/show. This schema should be assigned to a mock provider +// named "test". It includes a sensitive attribute. +func showFixtureSensitiveSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + }, + } +} + +// showFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/show. This mock has +// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, +// with the plan/apply steps just passing through the data determined by +// Terraform Core. +func showFixtureProvider() *terraform.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = showFixtureSchema() + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + idVal := req.PriorState.GetAttr("id") + amiVal := req.PriorState.GetAttr("ami") + if amiVal.RawEquals(cty.StringVal("refresh-me")) { + amiVal = cty.StringVal("refreshed") + } + return providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": amiVal, + }), + Private: req.Private, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // this is a destroy plan, + if req.ProposedNewState.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + idVal := req.ProposedNewState.GetAttr("id") + amiVal := req.ProposedNewState.GetAttr("ami") + if idVal.IsNull() { + idVal = cty.UnknownVal(cty.String) + } + var reqRep []cty.Path + if amiVal.RawEquals(cty.StringVal("force-replace")) { + reqRep = append(reqRep, cty.GetAttrPath("ami")) + } + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": amiVal, + }), + RequiresReplace: reqRep, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + idVal := req.PlannedState.GetAttr("id") + amiVal := req.PlannedState.GetAttr("ami") + if !idVal.IsKnown() { + idVal = cty.StringVal("placeholder") + } + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": amiVal, + }), + } + } + return p +} + +// showFixtureSensitiveProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/show. This mock has +// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, +// with the plan/apply steps just passing through the data determined by +// Terraform Core. It also has a sensitive attribute in the provider schema. +func showFixtureSensitiveProvider() *terraform.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = showFixtureSensitiveSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + idVal := req.ProposedNewState.GetAttr("id") + if idVal.IsNull() { + idVal = cty.UnknownVal(cty.String) + } + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": req.ProposedNewState.GetAttr("ami"), + "password": req.ProposedNewState.GetAttr("password"), + }), + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + idVal := req.PlannedState.GetAttr("id") + if !idVal.IsKnown() { + idVal = cty.StringVal("placeholder") + } + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": req.PlannedState.GetAttr("ami"), + "password": req.PlannedState.GetAttr("password"), + }), + } + } + return p +} + +// showFixturePlanFile creates a plan file at a temporary location containing a +// single change to create or update the test_instance.foo that is included in the "show" +// test fixture, returning the location of that plan file. +// `action` is the planned change you would like to elicit +func showFixturePlanFile(t *testing.T, action plans.Action) string { + _, snap := testModuleWithSnapshot(t, "show") + plannedVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("bar"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plan := testPlan(t) + plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: action, + Before: priorValRaw, + After: plannedValRaw, + }, + }) + return testPlanFile( + t, + snap, + states.NewState(), + plan, + ) +} + +// this simplified plan struct allows us to preserve field order when marshaling +// the command output. NOTE: we are leaving "terraform_version" out of this test +// to avoid needing to constantly update the expected output; as a potential +// TODO we could write a jsonplan compare function. +type plan struct { + FormatVersion string `json:"format_version,omitempty"` + Variables map[string]interface{} `json:"variables,omitempty"` + PlannedValues map[string]interface{} `json:"planned_values,omitempty"` + ResourceDrift []interface{} `json:"resource_drift,omitempty"` + ResourceChanges []interface{} `json:"resource_changes,omitempty"` + OutputChanges map[string]interface{} `json:"output_changes,omitempty"` + PriorState priorState `json:"prior_state,omitempty"` + Config map[string]interface{} `json:"configuration,omitempty"` +} + +type priorState struct { + FormatVersion string `json:"format_version,omitempty"` + Values map[string]interface{} `json:"values,omitempty"` + SensitiveValues map[string]bool `json:"sensitive_values,omitempty"` +} diff --git a/internal/command/state_command.go b/command/state_command.go similarity index 100% rename from internal/command/state_command.go rename to command/state_command.go diff --git a/internal/command/state_list.go b/command/state_list.go similarity index 96% rename from internal/command/state_list.go rename to command/state_list.go index 54358b28d702..160adb8cd398 100644 --- a/internal/command/state_list.go +++ b/command/state_list.go @@ -4,9 +4,9 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) diff --git a/internal/command/state_list_test.go b/command/state_list_test.go similarity index 100% rename from internal/command/state_list_test.go rename to command/state_list_test.go diff --git a/internal/command/state_meta.go b/command/state_meta.go similarity index 95% rename from internal/command/state_meta.go rename to command/state_meta.go index 17959f5ff90f..90656a91e6e8 100644 --- a/internal/command/state_meta.go +++ b/command/state_meta.go @@ -5,12 +5,12 @@ import ( "sort" "time" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" - backendLocal "github.com/hashicorp/terraform/internal/backend/local" + backendLocal "github.com/hashicorp/terraform/backend/local" ) // StateMeta is the meta struct that should be embedded in state subcommands. diff --git a/internal/command/state_mv.go b/command/state_mv.go similarity index 97% rename from internal/command/state_mv.go rename to command/state_mv.go index feb650ac886f..1c9776d4d90e 100644 --- a/internal/command/state_mv.go +++ b/command/state_mv.go @@ -4,14 +4,14 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) diff --git a/internal/command/state_mv_test.go b/command/state_mv_test.go similarity index 99% rename from internal/command/state_mv_test.go rename to command/state_mv_test.go index 00f871f880eb..d68a22afac26 100644 --- a/internal/command/state_mv_test.go +++ b/command/state_mv_test.go @@ -10,8 +10,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/mitchellh/cli" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" ) func TestStateMv(t *testing.T) { diff --git a/internal/command/state_pull.go b/command/state_pull.go similarity index 94% rename from internal/command/state_pull.go rename to command/state_pull.go index 8872cec65cfa..122ff1d3cbd3 100644 --- a/internal/command/state_pull.go +++ b/command/state_pull.go @@ -5,8 +5,8 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" ) // StatePullCommand is a Command implementation that shows a single resource. diff --git a/internal/command/state_pull_test.go b/command/state_pull_test.go similarity index 100% rename from internal/command/state_pull_test.go rename to command/state_pull_test.go diff --git a/internal/command/state_push.go b/command/state_push.go similarity index 92% rename from internal/command/state_push.go rename to command/state_push.go index c738dc70e36a..769ae0a28564 100644 --- a/internal/command/state_push.go +++ b/command/state_push.go @@ -6,13 +6,13 @@ import ( "os" "strings" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) diff --git a/internal/command/state_push_test.go b/command/state_push_test.go similarity index 97% rename from internal/command/state_push_test.go rename to command/state_push_test.go index f79efa9b3b5e..53f1b0568b90 100644 --- a/internal/command/state_push_test.go +++ b/command/state_push_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/backend/remote-state/inmem" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/backend/remote-state/inmem" + "github.com/hashicorp/terraform/states" "github.com/mitchellh/cli" ) diff --git a/internal/command/state_replace_provider.go b/command/state_replace_provider.go similarity index 94% rename from internal/command/state_replace_provider.go rename to command/state_replace_provider.go index 42fdc6255907..56d23f52c5d0 100644 --- a/internal/command/state_replace_provider.go +++ b/command/state_replace_provider.go @@ -4,13 +4,13 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) diff --git a/internal/command/state_replace_provider_test.go b/command/state_replace_provider_test.go similarity index 99% rename from internal/command/state_replace_provider_test.go rename to command/state_replace_provider_test.go index 9c86cf7797d1..a2576b7eab6c 100644 --- a/internal/command/state_replace_provider_test.go +++ b/command/state_replace_provider_test.go @@ -8,8 +8,8 @@ import ( "github.com/mitchellh/cli" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" ) func TestStateReplaceProvider(t *testing.T) { diff --git a/internal/command/state_rm.go b/command/state_rm.go similarity index 94% rename from internal/command/state_rm.go rename to command/state_rm.go index 77d4b1823b2a..d7957a9a7f46 100644 --- a/internal/command/state_rm.go +++ b/command/state_rm.go @@ -4,12 +4,12 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) diff --git a/internal/command/state_rm_test.go b/command/state_rm_test.go similarity index 99% rename from internal/command/state_rm_test.go rename to command/state_rm_test.go index 1b58a59677ec..6cae9d5be3d4 100644 --- a/internal/command/state_rm_test.go +++ b/command/state_rm_test.go @@ -8,8 +8,8 @@ import ( "github.com/mitchellh/cli" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" ) func TestStateRm(t *testing.T) { diff --git a/internal/command/state_show.go b/command/state_show.go similarity index 94% rename from internal/command/state_show.go rename to command/state_show.go index 3abd63c48934..63c1d01f0821 100644 --- a/internal/command/state_show.go +++ b/command/state_show.go @@ -5,11 +5,11 @@ import ( "os" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/states" "github.com/mitchellh/cli" ) diff --git a/internal/command/state_show_test.go b/command/state_show_test.go similarity index 96% rename from internal/command/state_show_test.go rename to command/state_show_test.go index 3da87c0ecaa1..2ded7ca45d81 100644 --- a/internal/command/state_show_test.go +++ b/command/state_show_test.go @@ -4,10 +4,10 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" ) diff --git a/command/state_test.go b/command/state_test.go new file mode 100644 index 000000000000..a9eddcb00920 --- /dev/null +++ b/command/state_test.go @@ -0,0 +1,40 @@ +package command + +import ( + "path/filepath" + "regexp" + "sort" + "testing" + + "github.com/hashicorp/terraform/states/statemgr" +) + +// testStateBackups returns the list of backups in order of creation +// (oldest first) in the given directory. +func testStateBackups(t *testing.T, dir string) []string { + // Find all the backups + list, err := filepath.Glob(filepath.Join(dir, "*"+DefaultBackupExtension)) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Sort them which will put them naturally in the right order + sort.Strings(list) + + return list +} + +func TestStateDefaultBackupExtension(t *testing.T) { + testCwd(t) + + s, err := (&StateMeta{}).State() + if err != nil { + t.Fatal(err) + } + + backupPath := s.(*statemgr.Filesystem).BackupPath() + match := regexp.MustCompile(`terraform\.tfstate\.\d+\.backup$`).MatchString + if !match(backupPath) { + t.Fatal("Bad backup path:", backupPath) + } +} diff --git a/internal/command/taint.go b/command/taint.go similarity index 95% rename from internal/command/taint.go rename to command/taint.go index e4da31d9b2ef..dba0a98862a7 100644 --- a/internal/command/taint.go +++ b/command/taint.go @@ -4,13 +4,13 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // TaintCommand is a cli.Command implementation that manually taints diff --git a/internal/command/taint_test.go b/command/taint_test.go similarity index 99% rename from internal/command/taint_test.go rename to command/taint_test.go index 001d477082bc..bddf66420a25 100644 --- a/internal/command/taint_test.go +++ b/command/taint_test.go @@ -8,8 +8,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/mitchellh/cli" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" ) func TestTaint(t *testing.T) { diff --git a/command/test.go b/command/test.go new file mode 100644 index 000000000000..b14b86ab0619 --- /dev/null +++ b/command/test.go @@ -0,0 +1,730 @@ +package command + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/moduletest" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providercache" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// TestCommand is the implementation of "terraform test". +type TestCommand struct { + Meta +} + +func (c *TestCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + args, diags := arguments.ParseTest(rawArgs) + view := views.NewTest(c.View, args.Output) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + `The "terraform test" command is experimental`, + "We'd like to invite adventurous module authors to write integration tests for their modules using this command, but all of the behaviors of this command are currently experimental and may change based on feedback.\n\nFor more information on the testing experiment, including ongoing research goals and avenues for feedback, see:\n https://www.terraform.io/docs/language/modules/testing-experiment.html", + )) + + ctx, cancel := c.InterruptibleContext() + defer cancel() + + results, moreDiags := c.run(ctx, args) + diags = diags.Append(moreDiags) + + initFailed := diags.HasErrors() + view.Diagnostics(diags) + diags = view.Results(results) + resultsFailed := diags.HasErrors() + view.Diagnostics(diags) // possible additional errors from saving the results + + var testsFailed bool + for _, suite := range results { + for _, component := range suite.Components { + for _, assertion := range component.Assertions { + if !assertion.Outcome.SuiteCanPass() { + testsFailed = true + } + } + } + } + + // Lots of things can possibly have failed + if initFailed || resultsFailed || testsFailed { + return 1 + } + return 0 +} + +func (c *TestCommand) run(ctx context.Context, args arguments.Test) (results map[string]*moduletest.Suite, diags tfdiags.Diagnostics) { + suiteNames, err := c.collectSuiteNames() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error while searching for test configurations", + fmt.Sprintf("While attempting to scan the 'tests' subdirectory for potential test configurations, Terraform encountered an error: %s.", err), + )) + return nil, diags + } + + ret := make(map[string]*moduletest.Suite, len(suiteNames)) + for _, suiteName := range suiteNames { + if ctx.Err() != nil { + // If the context has already failed in some way then we'll + // halt early and report whatever's already happened. + break + } + suite, moreDiags := c.runSuite(ctx, suiteName) + diags = diags.Append(moreDiags) + ret[suiteName] = suite + } + + return ret, diags +} + +func (c *TestCommand) runSuite(ctx context.Context, suiteName string) (*moduletest.Suite, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := moduletest.Suite{ + Name: suiteName, + Components: map[string]*moduletest.Component{}, + } + + // In order to make this initial round of "terraform test" pretty self + // contained while it's experimental, it's largely just mimicking what + // would happen when running the main Terraform workflow commands, which + // comes at the expense of a few irritants that we'll hopefully resolve + // in future iterations as the design solidifies: + // - We need to install remote modules separately for each of the + // test suites, because we don't have any sense of a shared cache + // of modules that multiple configurations can refer to at once. + // - We _do_ have a sense of a cache of remote providers, but it's fixed + // at being specifically a two-level cache (global vs. directory-specific) + // and so we can't easily capture a third level of "all of the test suites + // for this module" that sits between the two. Consequently, we need to + // dynamically choose between creating a directory-specific "global" + // cache or using the user's existing global cache, to avoid any + // situation were we'd be re-downloading the same providers for every + // one of the test suites. + // - We need to do something a bit horrid in order to have our test + // provider instance persist between the plan and apply steps, because + // normally that is the exact opposite of what we want. + // The above notes are here mainly as an aid to someone who might be + // planning a subsequent phase of this R&D effort, to help distinguish + // between things we're doing here because they are valuable vs. things + // we're doing just to make it work without doing any disruptive + // refactoring. + + suiteDirs, moreDiags := c.prepareSuiteDir(ctx, suiteName) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + // Generate a special failure representing the test initialization + // having failed, since we therefore won'tbe able to run the actual + // tests defined inside. + ret.Components["(init)"] = &moduletest.Component{ + Assertions: map[string]*moduletest.Assertion{ + "(init)": { + Outcome: moduletest.Error, + Description: "terraform init", + Message: "failed to install test suite dependencies", + Diagnostics: diags, + }, + }, + } + return &ret, nil + } + + // When we run the suite itself, we collect up diagnostics associated + // with individual components, so ret.Components may or may not contain + // failed/errored components after runTestSuite returns. + var finalState *states.State + ret.Components, finalState = c.runTestSuite(ctx, suiteDirs) + + // Regardless of the success or failure of the test suite, if there are + // any objects left in the state then we'll generate a top-level error + // about each one to minimize the chance of the user failing to notice + // that there are leftover objects that might continue to cost money + // unless manually deleted. + for _, ms := range finalState.Modules { + for _, rs := range ms.Resources { + for instanceKey, is := range rs.Instances { + var objs []*states.ResourceInstanceObjectSrc + if is.Current != nil { + objs = append(objs, is.Current) + } + for _, obj := range is.Deposed { + objs = append(objs, obj) + } + for _, obj := range objs { + // Unfortunately we don't have provider schemas out here + // and so we're limited in what we can achieve with these + // ResourceInstanceObjectSrc values, but we can try some + // heuristicy things to try to give some useful information + // in common cases. + var k, v string + if ty, err := ctyjson.ImpliedType(obj.AttrsJSON); err == nil { + if approxV, err := ctyjson.Unmarshal(obj.AttrsJSON, ty); err == nil { + k, v = format.ObjectValueIDOrName(approxV) + } + } + + var detail string + if k != "" { + // We can be more specific if we were able to infer + // an identifying attribute for this object. + detail = fmt.Sprintf( + "Due to errors during destroy, test suite %q has left behind an object for %s, with the following identity:\n %s = %q\n\nYou will need to delete this object manually in the remote system, or else it may have an ongoing cost.", + suiteName, + rs.Addr.Instance(instanceKey), + k, v, + ) + } else { + // If our heuristics for finding a suitable identifier + // failed then unfortunately we must be more vague. + // (We can't just print the entire object, because it + // might be overly large and it might contain sensitive + // values.) + detail = fmt.Sprintf( + "Due to errors during destroy, test suite %q has left behind an object for %s. You will need to delete this object manually in the remote system, or else it may have an ongoing cost.", + suiteName, + rs.Addr.Instance(instanceKey), + ) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to clean up after tests", + detail, + )) + } + } + } + } + + return &ret, diags +} + +func (c *TestCommand) prepareSuiteDir(ctx context.Context, suiteName string) (testCommandSuiteDirs, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + configDir := filepath.Join("tests", suiteName) + log.Printf("[TRACE] terraform test: Prepare directory for suite %q in %s", suiteName, configDir) + + suiteDirs := testCommandSuiteDirs{ + SuiteName: suiteName, + ConfigDir: configDir, + } + + // Before we can run a test suite we need to make sure that we have all of + // its dependencies available, so the following is essentially an + // abbreviated form of what happens during "terraform init", with some + // extra trickery in places. + + // First, module installation. This will include linking in the module + // under test, but also includes grabbing the dependencies of that module + // if it has any. + suiteDirs.ModulesDir = filepath.Join(configDir, ".terraform", "modules") + os.MkdirAll(suiteDirs.ModulesDir, 0755) // if this fails then we'll ignore it and let InstallModules below fail instead + reg := c.registryClient() + moduleInst := initwd.NewModuleInstaller(suiteDirs.ModulesDir, reg) + _, moreDiags := moduleInst.InstallModules(ctx, configDir, true, nil) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return suiteDirs, diags + } + + // The installer puts the files in a suitable place on disk, but we + // still need to actually load the configuration. We need to do this + // with a separate config loader because the Meta.configLoader instance + // is intended for interacting with the current working directory, not + // with the test suite subdirectories. + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: suiteDirs.ModulesDir, + Services: c.Services, + }) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create test configuration loader", + fmt.Sprintf("Failed to prepare loader for test configuration %s: %s.", configDir, err), + )) + return suiteDirs, diags + } + cfg, hclDiags := loader.LoadConfig(configDir) + diags = diags.Append(hclDiags) + if diags.HasErrors() { + return suiteDirs, diags + } + suiteDirs.Config = cfg + + // With the full configuration tree available, we can now install + // the necessary providers. We'll use a separate local cache directory + // here, because the test configuration might have additional requirements + // compared to the module itself. + suiteDirs.ProvidersDir = filepath.Join(configDir, ".terraform", "providers") + os.MkdirAll(suiteDirs.ProvidersDir, 0755) // if this fails then we'll ignore it and operations below fail instead + localCacheDir := providercache.NewDir(suiteDirs.ProvidersDir) + providerInst := c.providerInstaller().Clone(localCacheDir) + if !providerInst.HasGlobalCacheDir() { + // If the user already configured a global cache directory then we'll + // just use it for caching the test providers too, because then we + // can potentially reuse cache entries they already have. However, + // if they didn't configure one then we'll still establish one locally + // in the working directory, which we'll then share across all tests + // to avoid downloading the same providers repeatedly. + cachePath := filepath.Join(c.DataDir(), "testing-providers") // note this is _not_ under the suite dir + err := os.MkdirAll(cachePath, 0755) + // If we were unable to create the directory for any reason then we'll + // just proceed without a cache, at the expense of repeated downloads. + // (With that said, later installing might end up failing for the + // same reason anyway...) + if err == nil || os.IsExist(err) { + cacheDir := providercache.NewDir(cachePath) + providerInst.SetGlobalCacheDir(cacheDir) + } + } + reqs, hclDiags := cfg.ProviderRequirements() + diags = diags.Append(hclDiags) + if diags.HasErrors() { + return suiteDirs, diags + } + + // For test suites we only retain the "locks" in memory for the duration + // for one run, just to make sure that we use the same providers when we + // eventually run the test suite. + locks := depsfile.NewLocks() + evts := &providercache.InstallerEvents{ + QueryPackagesFailure: func(provider addrs.Provider, err error) { + if err != nil && addrs.IsDefaultProvider(provider) && provider.Type == "test" { + // This is some additional context for the failure error + // we'll generate afterwards. Not the most ideal UX but + // good enough for this prototype implementation, to help + // hint about the special builtin provider we use here. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Probably-unintended reference to \"hashicorp/test\" provider", + "For the purposes of this experimental implementation of module test suites, you must use the built-in test provider terraform.io/builtin/test, which requires an explicit required_providers declaration.", + )) + } + }, + } + ctx = evts.OnContext(ctx) + locks, err = providerInst.EnsureProviderVersions(ctx, locks, reqs, providercache.InstallUpgrades) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to install required providers", + fmt.Sprintf("Couldn't install necessary providers for test configuration %s: %s.", configDir, err), + )) + return suiteDirs, diags + } + suiteDirs.ProviderLocks = locks + suiteDirs.ProviderCache = localCacheDir + + return suiteDirs, diags +} + +func (c *TestCommand) runTestSuite(ctx context.Context, suiteDirs testCommandSuiteDirs) (map[string]*moduletest.Component, *states.State) { + log.Printf("[TRACE] terraform test: Run test suite %q", suiteDirs.SuiteName) + + ret := make(map[string]*moduletest.Component) + + // To collect test results we'll use an instance of the special "test" + // provider, which records the intention to make a test assertion during + // planning and then hopefully updates that to an actual assertion result + // during apply, unless an apply error causes the graph walk to exit early. + // For this to work correctly, we must ensure we're using the same provider + // instance for both plan and apply. + testProvider := moduletest.NewProvider() + + // synthError is a helper to return early with a synthetic failing + // component, for problems that prevent us from even discovering what an + // appropriate component and assertion name might be. + state := states.NewState() + synthError := func(name string, desc string, msg string, diags tfdiags.Diagnostics) (map[string]*moduletest.Component, *states.State) { + key := "(" + name + ")" // parens ensure this can't conflict with an actual component/assertion key + ret[key] = &moduletest.Component{ + Assertions: map[string]*moduletest.Assertion{ + key: { + Outcome: moduletest.Error, + Description: desc, + Message: msg, + Diagnostics: diags, + }, + }, + } + return ret, state + } + + // NOTE: This function intentionally deviates from the usual pattern of + // gradually appending more diagnostics to the same diags, because + // here we're associating each set of diagnostics with the specific + // operation it belongs to. + + providerFactories, diags := c.testSuiteProviders(suiteDirs, testProvider) + if diags.HasErrors() { + // It should be unusual to get in here, because testSuiteProviders + // should rely only on things guaranteed by prepareSuiteDir, but + // since we're doing external I/O here there is always the risk that + // the filesystem changes or fails between setting up and using the + // providers. + return synthError( + "init", + "terraform init", + "failed to resolve the required providers", + diags, + ) + } + + plan, diags := c.testSuitePlan(ctx, suiteDirs, providerFactories) + if diags.HasErrors() { + // It should be unusual to get in here, because testSuitePlan + // should rely only on things guaranteed by prepareSuiteDir, but + // since we're doing external I/O here there is always the risk that + // the filesystem changes or fails between setting up and using the + // providers. + return synthError( + "plan", + "terraform plan", + "failed to create a plan", + diags, + ) + } + + // Now we'll apply the plan. Once we try to apply, we might've created + // real remote objects, and so we must try to run destroy even if the + // apply returns errors, and we must return whatever state we end up + // with so the caller can generate additional loud errors if anything + // is left in it. + + state, diags = c.testSuiteApply(ctx, plan, suiteDirs, providerFactories) + if diags.HasErrors() { + // We don't return here, unlike the others above, because we want to + // continue to the destroy below even if there are apply errors. + synthError( + "apply", + "terraform apply", + "failed to apply the created plan", + diags, + ) + } + + // By the time we get here, the test provider will have gathered up all + // of the planned assertions and the final results for any assertions that + // were not blocked by an error. This also resets the provider so that + // the destroy operation below won't get tripped up on stale results. + ret = testProvider.Reset() + + state, diags = c.testSuiteDestroy(ctx, state, suiteDirs, providerFactories) + if diags.HasErrors() { + synthError( + "destroy", + "terraform destroy", + "failed to destroy objects created during test (NOTE: leftover remote objects may still exist)", + diags, + ) + } + + return ret, state +} + +func (c *TestCommand) testSuiteProviders(suiteDirs testCommandSuiteDirs, testProvider *moduletest.Provider) (map[addrs.Provider]providers.Factory, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := make(map[addrs.Provider]providers.Factory) + + // We can safely use the internal providers returned by Meta here because + // the built-in provider versions can never vary based on the configuration + // and thus we don't need to worry about potential version differences + // between main module and test suite modules. + for name, factory := range c.internalProviders() { + ret[addrs.NewBuiltInProvider(name)] = factory + } + + // For the remaining non-builtin providers, we'll just take whatever we + // recorded earlier in the in-memory-only "lock file". All of these should + // typically still be available because we would've only just installed + // them, but this could fail if e.g. the filesystem has been somehow + // damaged in the meantime. + for provider, lock := range suiteDirs.ProviderLocks.AllProviders() { + version := lock.Version() + cached := suiteDirs.ProviderCache.ProviderVersion(provider, version) + if cached == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required provider not found", + fmt.Sprintf("Although installation previously succeeded for %s v%s, it no longer seems to be present in the cache directory.", provider.ForDisplay(), version.String()), + )) + continue // potentially collect up multiple errors + } + + // NOTE: We don't consider the checksums for test suite dependencies, + // because we're creating a fresh "lock file" each time we run anyway + // and so they wouldn't actually guarantee anything useful. + + ret[provider] = providerFactory(cached) + } + + // We'll replace the test provider instance with the one our caller + // provided, so it'll be able to interrogate the test results directly. + ret[addrs.NewBuiltInProvider("test")] = func() (providers.Interface, error) { + return testProvider, nil + } + + return ret, diags +} + +type testSuiteRunContext struct { + Core *terraform.Context + + PlanMode plans.Mode + Config *configs.Config + InputState *states.State + Changes *plans.Changes +} + +func (c *TestCommand) testSuiteContext(suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory, state *states.State, plan *plans.Plan, destroy bool) (*testSuiteRunContext, tfdiags.Diagnostics) { + var changes *plans.Changes + if plan != nil { + changes = plan.Changes + } + + planMode := plans.NormalMode + if destroy { + planMode = plans.DestroyMode + } + + tfCtx, diags := terraform.NewContext(&terraform.ContextOpts{ + Providers: providerFactories, + + // We just use the provisioners from the main Meta here, because + // unlike providers provisioner plugins are not automatically + // installable anyway, and so we'll need to hunt for them in the same + // legacy way that normal Terraform operations do. + Provisioners: c.provisionerFactories(), + + Meta: &terraform.ContextMeta{ + Env: "test_" + suiteDirs.SuiteName, + }, + }) + if diags.HasErrors() { + return nil, diags + } + return &testSuiteRunContext{ + Core: tfCtx, + + PlanMode: planMode, + Config: suiteDirs.Config, + InputState: state, + Changes: changes, + }, diags +} + +func (c *TestCommand) testSuitePlan(ctx context.Context, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*plans.Plan, tfdiags.Diagnostics) { + log.Printf("[TRACE] terraform test: create plan for suite %q", suiteDirs.SuiteName) + runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, nil, nil, false) + if diags.HasErrors() { + return nil, diags + } + + // We'll also validate as part of planning, to ensure that the test + // configuration would pass "terraform validate". This is actually + // largely redundant with the runCtx.Core.Plan call below, but was + // included here originally because Plan did _originally_ assume that + // an earlier Validate had already passed, but now does its own + // validation work as (mostly) a superset of validate. + moreDiags := runCtx.Core.Validate(runCtx.Config) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + + plan, moreDiags := runCtx.Core.Plan( + runCtx.Config, runCtx.InputState, &terraform.PlanOpts{Mode: runCtx.PlanMode}, + ) + diags = diags.Append(moreDiags) + return plan, diags +} + +func (c *TestCommand) testSuiteApply(ctx context.Context, plan *plans.Plan, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*states.State, tfdiags.Diagnostics) { + log.Printf("[TRACE] terraform test: apply plan for suite %q", suiteDirs.SuiteName) + runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, nil, plan, false) + if diags.HasErrors() { + // To make things easier on the caller, we'll return a valid empty + // state even in this case. + return states.NewState(), diags + } + + state, moreDiags := runCtx.Core.Apply(plan, runCtx.Config) + diags = diags.Append(moreDiags) + return state, diags +} + +func (c *TestCommand) testSuiteDestroy(ctx context.Context, state *states.State, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*states.State, tfdiags.Diagnostics) { + log.Printf("[TRACE] terraform test: plan to destroy any existing objects for suite %q", suiteDirs.SuiteName) + runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, state, nil, true) + if diags.HasErrors() { + return state, diags + } + + plan, moreDiags := runCtx.Core.Plan( + runCtx.Config, runCtx.InputState, &terraform.PlanOpts{Mode: runCtx.PlanMode}, + ) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return state, diags + } + + log.Printf("[TRACE] terraform test: apply the plan to destroy any existing objects for suite %q", suiteDirs.SuiteName) + runCtx, moreDiags = c.testSuiteContext(suiteDirs, providerFactories, state, plan, true) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return state, diags + } + + state, moreDiags = runCtx.Core.Apply(plan, runCtx.Config) + diags = diags.Append(moreDiags) + return state, diags +} + +func (c *TestCommand) collectSuiteNames() ([]string, error) { + items, err := ioutil.ReadDir("tests") + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + ret := make([]string, 0, len(items)) + for _, item := range items { + if !item.IsDir() { + continue + } + name := item.Name() + suitePath := filepath.Join("tests", name) + tfFiles, err := filepath.Glob(filepath.Join(suitePath, "*.tf")) + if err != nil { + // We'll just ignore it and treat it like a dir with no .tf files + tfFiles = nil + } + tfJSONFiles, err := filepath.Glob(filepath.Join(suitePath, "*.tf.json")) + if err != nil { + // We'll just ignore it and treat it like a dir with no .tf.json files + tfJSONFiles = nil + } + if (len(tfFiles) + len(tfJSONFiles)) == 0 { + // Not a test suite, then. + continue + } + ret = append(ret, name) + } + + return ret, nil +} + +func (c *TestCommand) Help() string { + helpText := ` +Usage: terraform test [options] + + This is an experimental command to help with automated integration + testing of shared modules. The usage and behavior of this command is + likely to change in breaking ways in subsequent releases, as we + are currently using this command primarily for research purposes. + + In its current experimental form, "test" will look under the current + working directory for a subdirectory called "tests", and then within + that directory search for one or more subdirectories that contain + ".tf" or ".tf.json" files. For any that it finds, it will perform + Terraform operations similar to the following sequence of commands + in each of those directories: + terraform validate + terraform apply + terraform destroy + + The test configurations should not declare any input variables and + should at least contain a call to the module being tested, which + will always be available at the path ../.. due to the expected + filesystem layout. + + The tests are considered to be successful if all of the above steps + succeed. + + Test configurations may optionally include uses of the special + built-in test provider terraform.io/builtin/test, which allows + writing explicit test assertions which must also all pass in order + for the test run to be considered successful. + + This initial implementation is intended as a minimally-viable + product to use for further research and experimentation, and in + particular it currently lacks the following capabilities that we + expect to consider in later iterations, based on feedback: + - Testing of subsequent updates to existing infrastructure, + where currently it only supports initial creation and + then destruction. + - Testing top-level modules that are intended to be used for + "real" environments, which typically have hard-coded values + that don't permit creating a separate "copy" for testing. + - Some sort of support for unit test runs that don't interact + with remote systems at all, e.g. for use in checking pull + requests from untrusted contributors. + + In the meantime, we'd like to hear feedback from module authors + who have tried writing some experimental tests for their modules + about what sorts of tests you were able to write, what sorts of + tests you weren't able to write, and any tests that you were + able to write but that were difficult to model in some way. + +Options: + + -compact-warnings Use a more compact representation for warnings, if + this command produces only warnings and no errors. + + -junit-xml=FILE In addition to the usual output, also write test + results to the given file path in JUnit XML format. + This format is commonly supported by CI systems, and + they typically expect to be given a filename to search + for in the test workspace after the test run finishes. + + -no-color Don't include virtual terminal formatting sequences in + the output. +` + return strings.TrimSpace(helpText) +} + +func (c *TestCommand) Synopsis() string { + return "Experimental support for module integration testing" +} + +type testCommandSuiteDirs struct { + SuiteName string + + ConfigDir string + ModulesDir string + ProvidersDir string + + Config *configs.Config + ProviderCache *providercache.Dir + ProviderLocks *depsfile.Locks +} diff --git a/command/test_test.go b/command/test_test.go new file mode 100644 index 000000000000..a697d81b93c0 --- /dev/null +++ b/command/test_test.go @@ -0,0 +1,163 @@ +package command + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/terminal" +) + +// These are the main tests for the "terraform test" command. +func TestTest(t *testing.T) { + t.Run("passes", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("test-passes"), td) + defer testChdir(t, td)() + + streams, close := terminal.StreamsForTesting(t) + cmd := &TestCommand{ + Meta: Meta{ + Streams: streams, + View: views.NewView(streams), + }, + } + exitStatus := cmd.Run([]string{"-junit-xml=junit.xml", "-no-color"}) + outp := close(t) + if got, want := exitStatus, 0; got != want { + t.Fatalf("wrong exit status %d; want %d\nstderr:\n%s", got, want, outp.Stderr()) + } + + gotStdout := strings.TrimSpace(outp.Stdout()) + wantStdout := strings.TrimSpace(` +Warning: The "terraform test" command is experimental + +We'd like to invite adventurous module authors to write integration tests for +their modules using this command, but all of the behaviors of this command +are currently experimental and may change based on feedback. + +For more information on the testing experiment, including ongoing research +goals and avenues for feedback, see: + https://www.terraform.io/docs/language/modules/testing-experiment.html +`) + if diff := cmp.Diff(wantStdout, gotStdout); diff != "" { + t.Errorf("wrong stdout\n%s", diff) + } + + gotStderr := strings.TrimSpace(outp.Stderr()) + wantStderr := strings.TrimSpace(` +Success! All of the test assertions passed. +`) + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong stderr\n%s", diff) + } + + gotXMLSrc, err := ioutil.ReadFile("junit.xml") + if err != nil { + t.Fatal(err) + } + gotXML := string(bytes.TrimSpace(gotXMLSrc)) + wantXML := strings.TrimSpace(` + + 0 + 0 + 1 + + hello + 1 + 0 + 0 + 0 + + output + foo + + + +`) + if diff := cmp.Diff(wantXML, gotXML); diff != "" { + t.Errorf("wrong JUnit XML\n%s", diff) + } + }) + t.Run("fails", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("test-fails"), td) + defer testChdir(t, td)() + + streams, close := terminal.StreamsForTesting(t) + cmd := &TestCommand{ + Meta: Meta{ + Streams: streams, + View: views.NewView(streams), + }, + } + exitStatus := cmd.Run([]string{"-junit-xml=junit.xml", "-no-color"}) + outp := close(t) + if got, want := exitStatus, 1; got != want { + t.Fatalf("wrong exit status %d; want %d\nstderr:\n%s", got, want, outp.Stderr()) + } + + gotStdout := strings.TrimSpace(outp.Stdout()) + wantStdout := strings.TrimSpace(` +Warning: The "terraform test" command is experimental + +We'd like to invite adventurous module authors to write integration tests for +their modules using this command, but all of the behaviors of this command +are currently experimental and may change based on feedback. + +For more information on the testing experiment, including ongoing research +goals and avenues for feedback, see: + https://www.terraform.io/docs/language/modules/testing-experiment.html +`) + if diff := cmp.Diff(wantStdout, gotStdout); diff != "" { + t.Errorf("wrong stdout\n%s", diff) + } + + gotStderr := strings.TrimSpace(outp.Stderr()) + wantStderr := strings.TrimSpace(` +─── Failed: hello.foo.output (output "foo" value) ─────────────────────────── +wrong value + got: "foo value boop" + want: "foo not boop" + +───────────────────────────────────────────────────────────────────────────── +`) + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong stderr\n%s", diff) + } + + gotXMLSrc, err := ioutil.ReadFile("junit.xml") + if err != nil { + t.Fatal(err) + } + gotXML := string(bytes.TrimSpace(gotXMLSrc)) + wantXML := strings.TrimSpace(` + + 0 + 1 + 1 + + hello + 1 + 0 + 0 + 1 + + output + foo + + wrong value got: "foo value boop" want: "foo not boop" + + + + +`) + if diff := cmp.Diff(wantXML, gotXML); diff != "" { + t.Errorf("wrong JUnit XML\n%s", diff) + } + }) + +} diff --git a/internal/command/testdata/apply-config-invalid/main.tf b/command/testdata/apply-config-invalid/main.tf similarity index 100% rename from internal/command/testdata/apply-config-invalid/main.tf rename to command/testdata/apply-config-invalid/main.tf diff --git a/internal/command/testdata/apply-destroy-targeted/main.tf b/command/testdata/apply-destroy-targeted/main.tf similarity index 100% rename from internal/command/testdata/apply-destroy-targeted/main.tf rename to command/testdata/apply-destroy-targeted/main.tf diff --git a/internal/command/testdata/apply-error/main.tf b/command/testdata/apply-error/main.tf similarity index 100% rename from internal/command/testdata/apply-error/main.tf rename to command/testdata/apply-error/main.tf diff --git a/internal/command/testdata/apply-input-partial/main.tf b/command/testdata/apply-input-partial/main.tf similarity index 100% rename from internal/command/testdata/apply-input-partial/main.tf rename to command/testdata/apply-input-partial/main.tf diff --git a/internal/command/testdata/apply-input/main.tf b/command/testdata/apply-input/main.tf similarity index 100% rename from internal/command/testdata/apply-input/main.tf rename to command/testdata/apply-input/main.tf diff --git a/internal/command/testdata/apply-plan-no-module/main.tf b/command/testdata/apply-plan-no-module/main.tf similarity index 100% rename from internal/command/testdata/apply-plan-no-module/main.tf rename to command/testdata/apply-plan-no-module/main.tf diff --git a/internal/command/testdata/apply-replace/main.tf b/command/testdata/apply-replace/main.tf similarity index 100% rename from internal/command/testdata/apply-replace/main.tf rename to command/testdata/apply-replace/main.tf diff --git a/internal/command/testdata/apply-sensitive-output/main.tf b/command/testdata/apply-sensitive-output/main.tf similarity index 100% rename from internal/command/testdata/apply-sensitive-output/main.tf rename to command/testdata/apply-sensitive-output/main.tf diff --git a/internal/command/testdata/apply-shutdown/main.tf b/command/testdata/apply-shutdown/main.tf similarity index 100% rename from internal/command/testdata/apply-shutdown/main.tf rename to command/testdata/apply-shutdown/main.tf diff --git a/internal/command/testdata/apply-targeted/main.tf b/command/testdata/apply-targeted/main.tf similarity index 100% rename from internal/command/testdata/apply-targeted/main.tf rename to command/testdata/apply-targeted/main.tf diff --git a/internal/command/testdata/apply-terraform-env/main.tf b/command/testdata/apply-terraform-env/main.tf similarity index 100% rename from internal/command/testdata/apply-terraform-env/main.tf rename to command/testdata/apply-terraform-env/main.tf diff --git a/internal/command/testdata/apply-vars/main.tf b/command/testdata/apply-vars/main.tf similarity index 100% rename from internal/command/testdata/apply-vars/main.tf rename to command/testdata/apply-vars/main.tf diff --git a/internal/command/testdata/apply/main.tf b/command/testdata/apply/main.tf similarity index 100% rename from internal/command/testdata/apply/main.tf rename to command/testdata/apply/main.tf diff --git a/internal/command/testdata/apply/output.jsonlog b/command/testdata/apply/output.jsonlog similarity index 100% rename from internal/command/testdata/apply/output.jsonlog rename to command/testdata/apply/output.jsonlog diff --git a/internal/command/testdata/backend-change-multi-default-to-single/.terraform/terraform.tfstate b/command/testdata/backend-change-multi-default-to-single/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-default-to-single/.terraform/terraform.tfstate rename to command/testdata/backend-change-multi-default-to-single/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-default-to-single/local-state.tfstate b/command/testdata/backend-change-multi-default-to-single/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-default-to-single/local-state.tfstate rename to command/testdata/backend-change-multi-default-to-single/local-state.tfstate diff --git a/internal/command/testdata/backend-change-multi-default-to-single/main.tf b/command/testdata/backend-change-multi-default-to-single/main.tf similarity index 100% rename from internal/command/testdata/backend-change-multi-default-to-single/main.tf rename to command/testdata/backend-change-multi-default-to-single/main.tf diff --git a/internal/command/testdata/backend-change-multi-to-multi/.terraform/terraform.tfstate b/command/testdata/backend-change-multi-to-multi/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-multi/.terraform/terraform.tfstate rename to command/testdata/backend-change-multi-to-multi/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-multi/local-state.tfstate b/command/testdata/backend-change-multi-to-multi/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-multi/local-state.tfstate rename to command/testdata/backend-change-multi-to-multi/local-state.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-multi/main.tf b/command/testdata/backend-change-multi-to-multi/main.tf similarity index 100% rename from internal/command/testdata/backend-change-multi-to-multi/main.tf rename to command/testdata/backend-change-multi-to-multi/main.tf diff --git a/internal/command/testdata/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate b/command/testdata/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate rename to command/testdata/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-no-default-with-default/.terraform/terraform.tfstate b/command/testdata/backend-change-multi-to-no-default-with-default/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-no-default-with-default/.terraform/terraform.tfstate rename to command/testdata/backend-change-multi-to-no-default-with-default/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-no-default-with-default/local-state.tfstate b/command/testdata/backend-change-multi-to-no-default-with-default/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-no-default-with-default/local-state.tfstate rename to command/testdata/backend-change-multi-to-no-default-with-default/local-state.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-no-default-with-default/main.tf b/command/testdata/backend-change-multi-to-no-default-with-default/main.tf similarity index 100% rename from internal/command/testdata/backend-change-multi-to-no-default-with-default/main.tf rename to command/testdata/backend-change-multi-to-no-default-with-default/main.tf diff --git a/internal/command/testdata/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate b/command/testdata/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate rename to command/testdata/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-no-default-without-default/.terraform/terraform.tfstate b/command/testdata/backend-change-multi-to-no-default-without-default/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-no-default-without-default/.terraform/terraform.tfstate rename to command/testdata/backend-change-multi-to-no-default-without-default/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-no-default-without-default/main.tf b/command/testdata/backend-change-multi-to-no-default-without-default/main.tf similarity index 100% rename from internal/command/testdata/backend-change-multi-to-no-default-without-default/main.tf rename to command/testdata/backend-change-multi-to-no-default-without-default/main.tf diff --git a/internal/command/testdata/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate b/command/testdata/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate rename to command/testdata/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-single/.terraform/environment b/command/testdata/backend-change-multi-to-single/.terraform/environment similarity index 100% rename from internal/command/testdata/backend-change-multi-to-single/.terraform/environment rename to command/testdata/backend-change-multi-to-single/.terraform/environment diff --git a/internal/command/testdata/backend-change-multi-to-single/.terraform/terraform.tfstate b/command/testdata/backend-change-multi-to-single/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-single/.terraform/terraform.tfstate rename to command/testdata/backend-change-multi-to-single/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-single/main.tf b/command/testdata/backend-change-multi-to-single/main.tf similarity index 100% rename from internal/command/testdata/backend-change-multi-to-single/main.tf rename to command/testdata/backend-change-multi-to-single/main.tf diff --git a/internal/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate b/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate rename to command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate diff --git a/internal/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate b/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate rename to command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate diff --git a/internal/command/testdata/backend-change-single-to-single/.terraform/terraform.tfstate b/command/testdata/backend-change-single-to-single/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change-single-to-single/.terraform/terraform.tfstate rename to command/testdata/backend-change-single-to-single/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-change-single-to-single/local-state.tfstate b/command/testdata/backend-change-single-to-single/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-change-single-to-single/local-state.tfstate rename to command/testdata/backend-change-single-to-single/local-state.tfstate diff --git a/internal/command/testdata/backend-change-single-to-single/main.tf b/command/testdata/backend-change-single-to-single/main.tf similarity index 100% rename from internal/command/testdata/backend-change-single-to-single/main.tf rename to command/testdata/backend-change-single-to-single/main.tf diff --git a/internal/command/testdata/backend-change/.terraform/terraform.tfstate b/command/testdata/backend-change/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-change/.terraform/terraform.tfstate rename to command/testdata/backend-change/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-change/local-state.tfstate b/command/testdata/backend-change/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-change/local-state.tfstate rename to command/testdata/backend-change/local-state.tfstate diff --git a/internal/command/testdata/backend-change/main.tf b/command/testdata/backend-change/main.tf similarity index 100% rename from internal/command/testdata/backend-change/main.tf rename to command/testdata/backend-change/main.tf diff --git a/internal/command/testdata/backend-changed-with-legacy/.terraform/terraform.tfstate b/command/testdata/backend-changed-with-legacy/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-changed-with-legacy/.terraform/terraform.tfstate rename to command/testdata/backend-changed-with-legacy/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-changed-with-legacy/local-state-old.tfstate b/command/testdata/backend-changed-with-legacy/local-state-old.tfstate similarity index 100% rename from internal/command/testdata/backend-changed-with-legacy/local-state-old.tfstate rename to command/testdata/backend-changed-with-legacy/local-state-old.tfstate diff --git a/internal/command/testdata/backend-changed-with-legacy/local-state.tfstate b/command/testdata/backend-changed-with-legacy/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-changed-with-legacy/local-state.tfstate rename to command/testdata/backend-changed-with-legacy/local-state.tfstate diff --git a/internal/command/testdata/backend-changed-with-legacy/main.tf b/command/testdata/backend-changed-with-legacy/main.tf similarity index 100% rename from internal/command/testdata/backend-changed-with-legacy/main.tf rename to command/testdata/backend-changed-with-legacy/main.tf diff --git a/internal/command/testdata/backend-from-state/terraform.tfstate b/command/testdata/backend-from-state/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-from-state/terraform.tfstate rename to command/testdata/backend-from-state/terraform.tfstate diff --git a/internal/command/testdata/backend-inmem-locked/main.tf b/command/testdata/backend-inmem-locked/main.tf similarity index 100% rename from internal/command/testdata/backend-inmem-locked/main.tf rename to command/testdata/backend-inmem-locked/main.tf diff --git a/internal/command/testdata/backend-new-interp/main.tf b/command/testdata/backend-new-interp/main.tf similarity index 100% rename from internal/command/testdata/backend-new-interp/main.tf rename to command/testdata/backend-new-interp/main.tf diff --git a/internal/command/testdata/backend-new-legacy/.terraform/terraform.tfstate b/command/testdata/backend-new-legacy/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-new-legacy/.terraform/terraform.tfstate rename to command/testdata/backend-new-legacy/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-new-legacy/local-state-old.tfstate b/command/testdata/backend-new-legacy/local-state-old.tfstate similarity index 100% rename from internal/command/testdata/backend-new-legacy/local-state-old.tfstate rename to command/testdata/backend-new-legacy/local-state-old.tfstate diff --git a/internal/command/testdata/backend-new-legacy/main.tf b/command/testdata/backend-new-legacy/main.tf similarity index 100% rename from internal/command/testdata/backend-new-legacy/main.tf rename to command/testdata/backend-new-legacy/main.tf diff --git a/internal/command/testdata/backend-new-migrate-existing/local-state.tfstate b/command/testdata/backend-new-migrate-existing/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-new-migrate-existing/local-state.tfstate rename to command/testdata/backend-new-migrate-existing/local-state.tfstate diff --git a/internal/command/testdata/backend-new-migrate-existing/main.tf b/command/testdata/backend-new-migrate-existing/main.tf similarity index 100% rename from internal/command/testdata/backend-new-migrate-existing/main.tf rename to command/testdata/backend-new-migrate-existing/main.tf diff --git a/internal/command/testdata/backend-new-migrate-existing/terraform.tfstate b/command/testdata/backend-new-migrate-existing/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-new-migrate-existing/terraform.tfstate rename to command/testdata/backend-new-migrate-existing/terraform.tfstate diff --git a/internal/command/testdata/backend-new-migrate/main.tf b/command/testdata/backend-new-migrate/main.tf similarity index 100% rename from internal/command/testdata/backend-new-migrate/main.tf rename to command/testdata/backend-new-migrate/main.tf diff --git a/internal/command/testdata/backend-new-migrate/terraform.tfstate b/command/testdata/backend-new-migrate/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-new-migrate/terraform.tfstate rename to command/testdata/backend-new-migrate/terraform.tfstate diff --git a/internal/command/testdata/backend-new/main.tf b/command/testdata/backend-new/main.tf similarity index 100% rename from internal/command/testdata/backend-new/main.tf rename to command/testdata/backend-new/main.tf diff --git a/internal/command/testdata/backend-plan-backend-empty-config/.terraform/terraform.tfstate b/command/testdata/backend-plan-backend-empty-config/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-backend-empty-config/.terraform/terraform.tfstate rename to command/testdata/backend-plan-backend-empty-config/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-plan-backend-empty-config/local-state.tfstate b/command/testdata/backend-plan-backend-empty-config/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-backend-empty-config/local-state.tfstate rename to command/testdata/backend-plan-backend-empty-config/local-state.tfstate diff --git a/internal/command/testdata/backend-plan-backend-empty-config/main.tf b/command/testdata/backend-plan-backend-empty-config/main.tf similarity index 100% rename from internal/command/testdata/backend-plan-backend-empty-config/main.tf rename to command/testdata/backend-plan-backend-empty-config/main.tf diff --git a/internal/command/testdata/backend-plan-backend-empty/readme.txt b/command/testdata/backend-plan-backend-empty/readme.txt similarity index 100% rename from internal/command/testdata/backend-plan-backend-empty/readme.txt rename to command/testdata/backend-plan-backend-empty/readme.txt diff --git a/internal/command/testdata/backend-plan-backend-match/local-state.tfstate b/command/testdata/backend-plan-backend-match/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-backend-match/local-state.tfstate rename to command/testdata/backend-plan-backend-match/local-state.tfstate diff --git a/internal/command/testdata/backend-plan-backend-match/readme.txt b/command/testdata/backend-plan-backend-match/readme.txt similarity index 100% rename from internal/command/testdata/backend-plan-backend-match/readme.txt rename to command/testdata/backend-plan-backend-match/readme.txt diff --git a/internal/command/testdata/backend-plan-backend-mismatch/local-state.tfstate b/command/testdata/backend-plan-backend-mismatch/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-backend-mismatch/local-state.tfstate rename to command/testdata/backend-plan-backend-mismatch/local-state.tfstate diff --git a/internal/command/testdata/backend-plan-legacy-data/local-state.tfstate b/command/testdata/backend-plan-legacy-data/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-legacy-data/local-state.tfstate rename to command/testdata/backend-plan-legacy-data/local-state.tfstate diff --git a/internal/command/testdata/backend-plan-legacy-data/main.tf b/command/testdata/backend-plan-legacy-data/main.tf similarity index 100% rename from internal/command/testdata/backend-plan-legacy-data/main.tf rename to command/testdata/backend-plan-legacy-data/main.tf diff --git a/internal/command/testdata/backend-plan-legacy-data/state.tfstate b/command/testdata/backend-plan-legacy-data/state.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-legacy-data/state.tfstate rename to command/testdata/backend-plan-legacy-data/state.tfstate diff --git a/internal/command/testdata/backend-plan-legacy/readme.txt b/command/testdata/backend-plan-legacy/readme.txt similarity index 100% rename from internal/command/testdata/backend-plan-legacy/readme.txt rename to command/testdata/backend-plan-legacy/readme.txt diff --git a/internal/command/testdata/backend-plan-local-match/main.tf b/command/testdata/backend-plan-local-match/main.tf similarity index 100% rename from internal/command/testdata/backend-plan-local-match/main.tf rename to command/testdata/backend-plan-local-match/main.tf diff --git a/internal/command/testdata/backend-plan-local-match/terraform.tfstate b/command/testdata/backend-plan-local-match/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-local-match/terraform.tfstate rename to command/testdata/backend-plan-local-match/terraform.tfstate diff --git a/internal/command/testdata/backend-plan-local-mismatch-lineage/main.tf b/command/testdata/backend-plan-local-mismatch-lineage/main.tf similarity index 100% rename from internal/command/testdata/backend-plan-local-mismatch-lineage/main.tf rename to command/testdata/backend-plan-local-mismatch-lineage/main.tf diff --git a/internal/command/testdata/backend-plan-local-mismatch-lineage/terraform.tfstate b/command/testdata/backend-plan-local-mismatch-lineage/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-local-mismatch-lineage/terraform.tfstate rename to command/testdata/backend-plan-local-mismatch-lineage/terraform.tfstate diff --git a/internal/command/testdata/backend-plan-local-newer/main.tf b/command/testdata/backend-plan-local-newer/main.tf similarity index 100% rename from internal/command/testdata/backend-plan-local-newer/main.tf rename to command/testdata/backend-plan-local-newer/main.tf diff --git a/internal/command/testdata/backend-plan-local-newer/terraform.tfstate b/command/testdata/backend-plan-local-newer/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-plan-local-newer/terraform.tfstate rename to command/testdata/backend-plan-local-newer/terraform.tfstate diff --git a/internal/command/testdata/backend-plan-local/main.tf b/command/testdata/backend-plan-local/main.tf similarity index 100% rename from internal/command/testdata/backend-plan-local/main.tf rename to command/testdata/backend-plan-local/main.tf diff --git a/internal/command/testdata/backend-unchanged-with-legacy/.terraform/terraform.tfstate b/command/testdata/backend-unchanged-with-legacy/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-unchanged-with-legacy/.terraform/terraform.tfstate rename to command/testdata/backend-unchanged-with-legacy/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-unchanged-with-legacy/local-state-old.tfstate b/command/testdata/backend-unchanged-with-legacy/local-state-old.tfstate similarity index 100% rename from internal/command/testdata/backend-unchanged-with-legacy/local-state-old.tfstate rename to command/testdata/backend-unchanged-with-legacy/local-state-old.tfstate diff --git a/internal/command/testdata/backend-unchanged-with-legacy/local-state.tfstate b/command/testdata/backend-unchanged-with-legacy/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-unchanged-with-legacy/local-state.tfstate rename to command/testdata/backend-unchanged-with-legacy/local-state.tfstate diff --git a/internal/command/testdata/backend-unchanged-with-legacy/main.tf b/command/testdata/backend-unchanged-with-legacy/main.tf similarity index 100% rename from internal/command/testdata/backend-unchanged-with-legacy/main.tf rename to command/testdata/backend-unchanged-with-legacy/main.tf diff --git a/internal/command/testdata/backend-unchanged/.terraform/terraform.tfstate b/command/testdata/backend-unchanged/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-unchanged/.terraform/terraform.tfstate rename to command/testdata/backend-unchanged/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-unchanged/local-state.tfstate b/command/testdata/backend-unchanged/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-unchanged/local-state.tfstate rename to command/testdata/backend-unchanged/local-state.tfstate diff --git a/internal/command/testdata/backend-unchanged/main.tf b/command/testdata/backend-unchanged/main.tf similarity index 100% rename from internal/command/testdata/backend-unchanged/main.tf rename to command/testdata/backend-unchanged/main.tf diff --git a/internal/command/testdata/backend-unset-with-legacy/.terraform/terraform.tfstate b/command/testdata/backend-unset-with-legacy/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-unset-with-legacy/.terraform/terraform.tfstate rename to command/testdata/backend-unset-with-legacy/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-unset-with-legacy/local-state-old.tfstate b/command/testdata/backend-unset-with-legacy/local-state-old.tfstate similarity index 100% rename from internal/command/testdata/backend-unset-with-legacy/local-state-old.tfstate rename to command/testdata/backend-unset-with-legacy/local-state-old.tfstate diff --git a/internal/command/testdata/backend-unset-with-legacy/local-state.tfstate b/command/testdata/backend-unset-with-legacy/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-unset-with-legacy/local-state.tfstate rename to command/testdata/backend-unset-with-legacy/local-state.tfstate diff --git a/internal/command/testdata/backend-unset-with-legacy/main.tf b/command/testdata/backend-unset-with-legacy/main.tf similarity index 100% rename from internal/command/testdata/backend-unset-with-legacy/main.tf rename to command/testdata/backend-unset-with-legacy/main.tf diff --git a/internal/command/testdata/backend-unset/.terraform/terraform.tfstate b/command/testdata/backend-unset/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/backend-unset/.terraform/terraform.tfstate rename to command/testdata/backend-unset/.terraform/terraform.tfstate diff --git a/internal/command/testdata/backend-unset/local-state.tfstate b/command/testdata/backend-unset/local-state.tfstate similarity index 100% rename from internal/command/testdata/backend-unset/local-state.tfstate rename to command/testdata/backend-unset/local-state.tfstate diff --git a/internal/command/testdata/backend-unset/main.tf b/command/testdata/backend-unset/main.tf similarity index 100% rename from internal/command/testdata/backend-unset/main.tf rename to command/testdata/backend-unset/main.tf diff --git a/internal/command/testdata/command-check-required-version/main.tf b/command/testdata/command-check-required-version/main.tf similarity index 100% rename from internal/command/testdata/command-check-required-version/main.tf rename to command/testdata/command-check-required-version/main.tf diff --git a/internal/command/testdata/empty-file b/command/testdata/empty-file similarity index 100% rename from internal/command/testdata/empty-file rename to command/testdata/empty-file diff --git a/internal/command/testdata/empty/README b/command/testdata/empty/README similarity index 100% rename from internal/command/testdata/empty/README rename to command/testdata/empty/README diff --git a/internal/command/testdata/fmt/general_in.tf b/command/testdata/fmt/general_in.tf similarity index 100% rename from internal/command/testdata/fmt/general_in.tf rename to command/testdata/fmt/general_in.tf diff --git a/internal/command/testdata/fmt/general_out.tf b/command/testdata/fmt/general_out.tf similarity index 100% rename from internal/command/testdata/fmt/general_out.tf rename to command/testdata/fmt/general_out.tf diff --git a/internal/command/testdata/fmt/variable_type_in.tf b/command/testdata/fmt/variable_type_in.tf similarity index 100% rename from internal/command/testdata/fmt/variable_type_in.tf rename to command/testdata/fmt/variable_type_in.tf diff --git a/internal/command/testdata/fmt/variable_type_out.tf b/command/testdata/fmt/variable_type_out.tf similarity index 100% rename from internal/command/testdata/fmt/variable_type_out.tf rename to command/testdata/fmt/variable_type_out.tf diff --git a/internal/command/testdata/get/foo/main.tf b/command/testdata/get/foo/main.tf similarity index 100% rename from internal/command/testdata/get/foo/main.tf rename to command/testdata/get/foo/main.tf diff --git a/internal/command/testdata/get/main.tf b/command/testdata/get/main.tf similarity index 100% rename from internal/command/testdata/get/main.tf rename to command/testdata/get/main.tf diff --git a/internal/command/testdata/graph/main.tf b/command/testdata/graph/main.tf similarity index 100% rename from internal/command/testdata/graph/main.tf rename to command/testdata/graph/main.tf diff --git a/internal/command/testdata/import-missing-resource-config/main.tf b/command/testdata/import-missing-resource-config/main.tf similarity index 100% rename from internal/command/testdata/import-missing-resource-config/main.tf rename to command/testdata/import-missing-resource-config/main.tf diff --git a/internal/command/testdata/import-module-input-variable/child/main.tf b/command/testdata/import-module-input-variable/child/main.tf similarity index 100% rename from internal/command/testdata/import-module-input-variable/child/main.tf rename to command/testdata/import-module-input-variable/child/main.tf diff --git a/internal/command/testdata/import-module-input-variable/main.tf b/command/testdata/import-module-input-variable/main.tf similarity index 100% rename from internal/command/testdata/import-module-input-variable/main.tf rename to command/testdata/import-module-input-variable/main.tf diff --git a/internal/command/testdata/import-module-input-variable/terraform.tfvars b/command/testdata/import-module-input-variable/terraform.tfvars similarity index 100% rename from internal/command/testdata/import-module-input-variable/terraform.tfvars rename to command/testdata/import-module-input-variable/terraform.tfvars diff --git a/internal/command/testdata/import-module-var-file/child/main.tf b/command/testdata/import-module-var-file/child/main.tf similarity index 100% rename from internal/command/testdata/import-module-var-file/child/main.tf rename to command/testdata/import-module-var-file/child/main.tf diff --git a/internal/command/testdata/import-module-var-file/main.tf b/command/testdata/import-module-var-file/main.tf similarity index 100% rename from internal/command/testdata/import-module-var-file/main.tf rename to command/testdata/import-module-var-file/main.tf diff --git a/internal/command/testdata/import-module-var-file/terraform.tfvars b/command/testdata/import-module-var-file/terraform.tfvars similarity index 100% rename from internal/command/testdata/import-module-var-file/terraform.tfvars rename to command/testdata/import-module-var-file/terraform.tfvars diff --git a/internal/command/testdata/import-provider-aliased/main.tf b/command/testdata/import-provider-aliased/main.tf similarity index 100% rename from internal/command/testdata/import-provider-aliased/main.tf rename to command/testdata/import-provider-aliased/main.tf diff --git a/internal/command/testdata/import-provider-datasource/main.tf b/command/testdata/import-provider-datasource/main.tf similarity index 100% rename from internal/command/testdata/import-provider-datasource/main.tf rename to command/testdata/import-provider-datasource/main.tf diff --git a/internal/command/testdata/import-provider-implicit/main.tf b/command/testdata/import-provider-implicit/main.tf similarity index 100% rename from internal/command/testdata/import-provider-implicit/main.tf rename to command/testdata/import-provider-implicit/main.tf diff --git a/internal/command/testdata/import-provider-invalid/main.tf b/command/testdata/import-provider-invalid/main.tf similarity index 100% rename from internal/command/testdata/import-provider-invalid/main.tf rename to command/testdata/import-provider-invalid/main.tf diff --git a/internal/command/testdata/import-provider-remote-state/main.tf b/command/testdata/import-provider-remote-state/main.tf similarity index 100% rename from internal/command/testdata/import-provider-remote-state/main.tf rename to command/testdata/import-provider-remote-state/main.tf diff --git a/internal/command/testdata/import-provider-var-default/main.tf b/command/testdata/import-provider-var-default/main.tf similarity index 100% rename from internal/command/testdata/import-provider-var-default/main.tf rename to command/testdata/import-provider-var-default/main.tf diff --git a/internal/command/testdata/import-provider-var-default/terraform.tfvars b/command/testdata/import-provider-var-default/terraform.tfvars similarity index 100% rename from internal/command/testdata/import-provider-var-default/terraform.tfvars rename to command/testdata/import-provider-var-default/terraform.tfvars diff --git a/internal/command/testdata/import-provider-var-file/blah.tfvars b/command/testdata/import-provider-var-file/blah.tfvars similarity index 100% rename from internal/command/testdata/import-provider-var-file/blah.tfvars rename to command/testdata/import-provider-var-file/blah.tfvars diff --git a/internal/command/testdata/import-provider-var-file/main.tf b/command/testdata/import-provider-var-file/main.tf similarity index 100% rename from internal/command/testdata/import-provider-var-file/main.tf rename to command/testdata/import-provider-var-file/main.tf diff --git a/internal/command/testdata/import-provider-var/main.tf b/command/testdata/import-provider-var/main.tf similarity index 100% rename from internal/command/testdata/import-provider-var/main.tf rename to command/testdata/import-provider-var/main.tf diff --git a/internal/command/testdata/import-provider/main.tf b/command/testdata/import-provider/main.tf similarity index 100% rename from internal/command/testdata/import-provider/main.tf rename to command/testdata/import-provider/main.tf diff --git a/internal/command/testdata/init-backend-config-file-change-migrate-existing/.terraform/terraform.tfstate b/command/testdata/init-backend-config-file-change-migrate-existing/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/init-backend-config-file-change-migrate-existing/.terraform/terraform.tfstate rename to command/testdata/init-backend-config-file-change-migrate-existing/.terraform/terraform.tfstate diff --git a/internal/command/testdata/init-backend-config-file-change-migrate-existing/input.config b/command/testdata/init-backend-config-file-change-migrate-existing/input.config similarity index 100% rename from internal/command/testdata/init-backend-config-file-change-migrate-existing/input.config rename to command/testdata/init-backend-config-file-change-migrate-existing/input.config diff --git a/internal/command/testdata/init-backend-config-file-change-migrate-existing/local-state.tfstate b/command/testdata/init-backend-config-file-change-migrate-existing/local-state.tfstate similarity index 100% rename from internal/command/testdata/init-backend-config-file-change-migrate-existing/local-state.tfstate rename to command/testdata/init-backend-config-file-change-migrate-existing/local-state.tfstate diff --git a/internal/command/testdata/init-backend-config-file-change-migrate-existing/main.tf b/command/testdata/init-backend-config-file-change-migrate-existing/main.tf similarity index 100% rename from internal/command/testdata/init-backend-config-file-change-migrate-existing/main.tf rename to command/testdata/init-backend-config-file-change-migrate-existing/main.tf diff --git a/internal/command/testdata/init-backend-config-file-change/.terraform/terraform.tfstate b/command/testdata/init-backend-config-file-change/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/init-backend-config-file-change/.terraform/terraform.tfstate rename to command/testdata/init-backend-config-file-change/.terraform/terraform.tfstate diff --git a/internal/command/testdata/init-backend-config-file-change/input.config b/command/testdata/init-backend-config-file-change/input.config similarity index 100% rename from internal/command/testdata/init-backend-config-file-change/input.config rename to command/testdata/init-backend-config-file-change/input.config diff --git a/internal/command/testdata/init-backend-config-file-change/main.tf b/command/testdata/init-backend-config-file-change/main.tf similarity index 100% rename from internal/command/testdata/init-backend-config-file-change/main.tf rename to command/testdata/init-backend-config-file-change/main.tf diff --git a/internal/command/testdata/init-backend-config-file/backend.config b/command/testdata/init-backend-config-file/backend.config similarity index 100% rename from internal/command/testdata/init-backend-config-file/backend.config rename to command/testdata/init-backend-config-file/backend.config diff --git a/internal/command/testdata/init-backend-config-file/input.config b/command/testdata/init-backend-config-file/input.config similarity index 100% rename from internal/command/testdata/init-backend-config-file/input.config rename to command/testdata/init-backend-config-file/input.config diff --git a/internal/command/testdata/init-backend-config-file/invalid.config b/command/testdata/init-backend-config-file/invalid.config similarity index 100% rename from internal/command/testdata/init-backend-config-file/invalid.config rename to command/testdata/init-backend-config-file/invalid.config diff --git a/internal/command/testdata/init-backend-config-file/main.tf b/command/testdata/init-backend-config-file/main.tf similarity index 100% rename from internal/command/testdata/init-backend-config-file/main.tf rename to command/testdata/init-backend-config-file/main.tf diff --git a/internal/command/testdata/init-backend-config-kv/main.tf b/command/testdata/init-backend-config-kv/main.tf similarity index 100% rename from internal/command/testdata/init-backend-config-kv/main.tf rename to command/testdata/init-backend-config-kv/main.tf diff --git a/internal/command/testdata/init-backend-empty/main.tf b/command/testdata/init-backend-empty/main.tf similarity index 100% rename from internal/command/testdata/init-backend-empty/main.tf rename to command/testdata/init-backend-empty/main.tf diff --git a/internal/command/testdata/init-backend-http/main.tf b/command/testdata/init-backend-http/main.tf similarity index 100% rename from internal/command/testdata/init-backend-http/main.tf rename to command/testdata/init-backend-http/main.tf diff --git a/internal/command/testdata/init-backend-migrate-while-locked/.terraform/terraform.tfstate b/command/testdata/init-backend-migrate-while-locked/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/init-backend-migrate-while-locked/.terraform/terraform.tfstate rename to command/testdata/init-backend-migrate-while-locked/.terraform/terraform.tfstate diff --git a/internal/command/testdata/init-backend-migrate-while-locked/input.config b/command/testdata/init-backend-migrate-while-locked/input.config similarity index 100% rename from internal/command/testdata/init-backend-migrate-while-locked/input.config rename to command/testdata/init-backend-migrate-while-locked/input.config diff --git a/internal/command/testdata/init-backend-migrate-while-locked/main.tf b/command/testdata/init-backend-migrate-while-locked/main.tf similarity index 100% rename from internal/command/testdata/init-backend-migrate-while-locked/main.tf rename to command/testdata/init-backend-migrate-while-locked/main.tf diff --git a/internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/environment b/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/environment similarity index 100% rename from internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/environment rename to command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/environment diff --git a/internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/terraform.tfstate b/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/terraform.tfstate rename to command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/terraform.tfstate diff --git a/internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/main.tf b/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/main.tf similarity index 100% rename from internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/main.tf rename to command/testdata/init-backend-selected-workspace-doesnt-exist-multi/main.tf diff --git a/internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate b/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate similarity index 100% rename from internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate rename to command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate diff --git a/internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate.d/foo/terraform.tfstate b/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate.d/foo/terraform.tfstate similarity index 100% rename from internal/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate.d/foo/terraform.tfstate rename to command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate.d/foo/terraform.tfstate diff --git a/internal/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/environment b/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/environment similarity index 100% rename from internal/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/environment rename to command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/environment diff --git a/internal/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/terraform.tfstate b/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/terraform.tfstate rename to command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/terraform.tfstate diff --git a/internal/command/testdata/init-backend-selected-workspace-doesnt-exist-single/main.tf b/command/testdata/init-backend-selected-workspace-doesnt-exist-single/main.tf similarity index 100% rename from internal/command/testdata/init-backend-selected-workspace-doesnt-exist-single/main.tf rename to command/testdata/init-backend-selected-workspace-doesnt-exist-single/main.tf diff --git a/internal/command/testdata/init-backend/main.tf b/command/testdata/init-backend/main.tf similarity index 100% rename from internal/command/testdata/init-backend/main.tf rename to command/testdata/init-backend/main.tf diff --git a/internal/command/testdata/init-check-required-version-first-module/main.tf b/command/testdata/init-check-required-version-first-module/main.tf similarity index 100% rename from internal/command/testdata/init-check-required-version-first-module/main.tf rename to command/testdata/init-check-required-version-first-module/main.tf diff --git a/internal/command/testdata/init-check-required-version-first-module/mod/main.tf b/command/testdata/init-check-required-version-first-module/mod/main.tf similarity index 100% rename from internal/command/testdata/init-check-required-version-first-module/mod/main.tf rename to command/testdata/init-check-required-version-first-module/mod/main.tf diff --git a/internal/command/testdata/init-check-required-version-first/main.tf b/command/testdata/init-check-required-version-first/main.tf similarity index 100% rename from internal/command/testdata/init-check-required-version-first/main.tf rename to command/testdata/init-check-required-version-first/main.tf diff --git a/internal/command/testdata/init-check-required-version/main.tf b/command/testdata/init-check-required-version/main.tf similarity index 100% rename from internal/command/testdata/init-check-required-version/main.tf rename to command/testdata/init-check-required-version/main.tf diff --git a/internal/command/testdata/init-cloud-simple/init-cloud-simple.tf b/command/testdata/init-cloud-simple/init-cloud-simple.tf similarity index 100% rename from internal/command/testdata/init-cloud-simple/init-cloud-simple.tf rename to command/testdata/init-cloud-simple/init-cloud-simple.tf diff --git a/internal/command/testdata/init-get-provider-detected-legacy/.terraform/modules/dicerolls/terraform-random-bar-1.0.0/main.tf b/command/testdata/init-get-provider-detected-legacy/.terraform/modules/dicerolls/terraform-random-bar-1.0.0/main.tf similarity index 100% rename from internal/command/testdata/init-get-provider-detected-legacy/.terraform/modules/dicerolls/terraform-random-bar-1.0.0/main.tf rename to command/testdata/init-get-provider-detected-legacy/.terraform/modules/dicerolls/terraform-random-bar-1.0.0/main.tf diff --git a/internal/command/testdata/init-get-provider-detected-legacy/.terraform/modules/modules.json b/command/testdata/init-get-provider-detected-legacy/.terraform/modules/modules.json similarity index 100% rename from internal/command/testdata/init-get-provider-detected-legacy/.terraform/modules/modules.json rename to command/testdata/init-get-provider-detected-legacy/.terraform/modules/modules.json diff --git a/internal/command/testdata/init-get-provider-detected-legacy/child/main.tf b/command/testdata/init-get-provider-detected-legacy/child/main.tf similarity index 100% rename from internal/command/testdata/init-get-provider-detected-legacy/child/main.tf rename to command/testdata/init-get-provider-detected-legacy/child/main.tf diff --git a/internal/command/testdata/init-get-provider-detected-legacy/main.tf b/command/testdata/init-get-provider-detected-legacy/main.tf similarity index 100% rename from internal/command/testdata/init-get-provider-detected-legacy/main.tf rename to command/testdata/init-get-provider-detected-legacy/main.tf diff --git a/internal/command/testdata/init-get-provider-invalid-package/main.tf b/command/testdata/init-get-provider-invalid-package/main.tf similarity index 100% rename from internal/command/testdata/init-get-provider-invalid-package/main.tf rename to command/testdata/init-get-provider-invalid-package/main.tf diff --git a/internal/command/testdata/init-get-provider-legacy-from-state/main.tf b/command/testdata/init-get-provider-legacy-from-state/main.tf similarity index 100% rename from internal/command/testdata/init-get-provider-legacy-from-state/main.tf rename to command/testdata/init-get-provider-legacy-from-state/main.tf diff --git a/internal/command/testdata/init-get-provider-legacy-from-state/terraform.tfstate b/command/testdata/init-get-provider-legacy-from-state/terraform.tfstate similarity index 100% rename from internal/command/testdata/init-get-provider-legacy-from-state/terraform.tfstate rename to command/testdata/init-get-provider-legacy-from-state/terraform.tfstate diff --git a/internal/command/testdata/init-get-provider-source/main.tf b/command/testdata/init-get-provider-source/main.tf similarity index 100% rename from internal/command/testdata/init-get-provider-source/main.tf rename to command/testdata/init-get-provider-source/main.tf diff --git a/internal/command/testdata/init-get-providers/main.tf b/command/testdata/init-get-providers/main.tf similarity index 100% rename from internal/command/testdata/init-get-providers/main.tf rename to command/testdata/init-get-providers/main.tf diff --git a/internal/command/testdata/init-get/foo/main.tf b/command/testdata/init-get/foo/main.tf similarity index 100% rename from internal/command/testdata/init-get/foo/main.tf rename to command/testdata/init-get/foo/main.tf diff --git a/internal/command/testdata/init-get/main.tf b/command/testdata/init-get/main.tf similarity index 100% rename from internal/command/testdata/init-get/main.tf rename to command/testdata/init-get/main.tf diff --git a/internal/command/testdata/init-internal-invalid/main.tf b/command/testdata/init-internal-invalid/main.tf similarity index 100% rename from internal/command/testdata/init-internal-invalid/main.tf rename to command/testdata/init-internal-invalid/main.tf diff --git a/internal/command/testdata/init-internal/main.tf b/command/testdata/init-internal/main.tf similarity index 100% rename from internal/command/testdata/init-internal/main.tf rename to command/testdata/init-internal/main.tf diff --git a/internal/command/testdata/init-legacy-provider-cache/.terraform/plugins/example.com/test/b/1.1.0/os_arch/terraform-provider-b b/command/testdata/init-legacy-provider-cache/.terraform/plugins/example.com/test/b/1.1.0/os_arch/terraform-provider-b similarity index 100% rename from internal/command/testdata/init-legacy-provider-cache/.terraform/plugins/example.com/test/b/1.1.0/os_arch/terraform-provider-b rename to command/testdata/init-legacy-provider-cache/.terraform/plugins/example.com/test/b/1.1.0/os_arch/terraform-provider-b diff --git a/internal/command/testdata/init-legacy-provider-cache/.terraform/plugins/registry.terraform.io/hashicorp/c/2.0.0/os_arch/terraform-provider-c b/command/testdata/init-legacy-provider-cache/.terraform/plugins/registry.terraform.io/hashicorp/c/2.0.0/os_arch/terraform-provider-c similarity index 100% rename from internal/command/testdata/init-legacy-provider-cache/.terraform/plugins/registry.terraform.io/hashicorp/c/2.0.0/os_arch/terraform-provider-c rename to command/testdata/init-legacy-provider-cache/.terraform/plugins/registry.terraform.io/hashicorp/c/2.0.0/os_arch/terraform-provider-c diff --git a/internal/command/testdata/init-legacy-provider-cache/versions.tf b/command/testdata/init-legacy-provider-cache/versions.tf similarity index 100% rename from internal/command/testdata/init-legacy-provider-cache/versions.tf rename to command/testdata/init-legacy-provider-cache/versions.tf diff --git a/internal/command/testdata/init-legacy-rc/main.tf b/command/testdata/init-legacy-rc/main.tf similarity index 100% rename from internal/command/testdata/init-legacy-rc/main.tf rename to command/testdata/init-legacy-rc/main.tf diff --git a/internal/command/testdata/init-provider-lock-file-readonly-add/main.tf b/command/testdata/init-provider-lock-file-readonly-add/main.tf similarity index 100% rename from internal/command/testdata/init-provider-lock-file-readonly-add/main.tf rename to command/testdata/init-provider-lock-file-readonly-add/main.tf diff --git a/internal/command/testdata/init-provider-lock-file/main.tf b/command/testdata/init-provider-lock-file/main.tf similarity index 100% rename from internal/command/testdata/init-provider-lock-file/main.tf rename to command/testdata/init-provider-lock-file/main.tf diff --git a/internal/command/testdata/init-provider-now-unused/main.tf b/command/testdata/init-provider-now-unused/main.tf similarity index 100% rename from internal/command/testdata/init-provider-now-unused/main.tf rename to command/testdata/init-provider-now-unused/main.tf diff --git a/internal/command/testdata/init-providers-lock/main.tf b/command/testdata/init-providers-lock/main.tf similarity index 100% rename from internal/command/testdata/init-providers-lock/main.tf rename to command/testdata/init-providers-lock/main.tf diff --git a/internal/command/testdata/init-registry-module/main.tf b/command/testdata/init-registry-module/main.tf similarity index 100% rename from internal/command/testdata/init-registry-module/main.tf rename to command/testdata/init-registry-module/main.tf diff --git a/internal/command/testdata/init-required-providers/main.tf b/command/testdata/init-required-providers/main.tf similarity index 100% rename from internal/command/testdata/init-required-providers/main.tf rename to command/testdata/init-required-providers/main.tf diff --git a/internal/command/testdata/init-syntax-invalid-backend-invalid/main.tf b/command/testdata/init-syntax-invalid-backend-invalid/main.tf similarity index 100% rename from internal/command/testdata/init-syntax-invalid-backend-invalid/main.tf rename to command/testdata/init-syntax-invalid-backend-invalid/main.tf diff --git a/internal/command/testdata/init-syntax-invalid-no-backend/main.tf b/command/testdata/init-syntax-invalid-no-backend/main.tf similarity index 100% rename from internal/command/testdata/init-syntax-invalid-no-backend/main.tf rename to command/testdata/init-syntax-invalid-no-backend/main.tf diff --git a/internal/command/testdata/init-syntax-invalid-with-backend/main.tf b/command/testdata/init-syntax-invalid-with-backend/main.tf similarity index 100% rename from internal/command/testdata/init-syntax-invalid-with-backend/main.tf rename to command/testdata/init-syntax-invalid-with-backend/main.tf diff --git a/internal/command/testdata/init/hello.tf b/command/testdata/init/hello.tf similarity index 100% rename from internal/command/testdata/init/hello.tf rename to command/testdata/init/hello.tf diff --git a/internal/command/testdata/inmem-backend/main.tf b/command/testdata/inmem-backend/main.tf similarity index 100% rename from internal/command/testdata/inmem-backend/main.tf rename to command/testdata/inmem-backend/main.tf diff --git a/command/testdata/login-oauth-server/main.go b/command/testdata/login-oauth-server/main.go new file mode 100644 index 000000000000..6fd4ef2759df --- /dev/null +++ b/command/testdata/login-oauth-server/main.go @@ -0,0 +1,72 @@ +//go:build ignore +// +build ignore + +// This file is a helper for those doing _manual_ testing of "terraform login" +// and/or "terraform logout" and want to start up a test OAuth server in a +// separate process for convenience: +// +// go run ./command/testdata/login-oauth-server/main.go :8080 +// +// This is _not_ the main way to use this oauthserver package. For automated +// test code, import it as a normal Go package instead: +// +// import oauthserver "github.com/hashicorp/terraform/command/testdata/login-oauth-server" + +package main + +import ( + "fmt" + "net" + "net/http" + "os" + + oauthserver "github.com/hashicorp/terraform/command/testdata/login-oauth-server" +) + +func main() { + if len(os.Args) < 2 { + fmt.Fprintln(os.Stderr, "Usage: go run ./command/testdata/login-oauth-server/main.go ") + os.Exit(1) + } + + host, port, err := net.SplitHostPort(os.Args[1]) + if err != nil { + fmt.Fprintln(os.Stderr, "Invalid address: %s", err) + os.Exit(1) + } + + if host == "" { + host = "127.0.0.1" + } + addr := fmt.Sprintf("%s:%s", host, port) + + fmt.Printf("Will listen on %s...\n", addr) + fmt.Printf( + configExampleFmt, + fmt.Sprintf("http://%s:%s/authz", host, port), + fmt.Sprintf("http://%s:%s/token", host, port), + fmt.Sprintf("http://%s:%s/revoke", host, port), + ) + + server := &http.Server{ + Addr: addr, + Handler: oauthserver.Handler, + } + err = server.ListenAndServe() + fmt.Fprintln(os.Stderr, err.Error()) +} + +const configExampleFmt = ` +host "login-test.example.com" { + services = { + "login.v1" = { + authz = %q + token = %q + client = "placeholder" + grant_types = ["code", "password"] + } + "logout.v1" = %q + } +} + +` diff --git a/internal/command/testdata/login-oauth-server/oauthserver.go b/command/testdata/login-oauth-server/oauthserver.go similarity index 100% rename from internal/command/testdata/login-oauth-server/oauthserver.go rename to command/testdata/login-oauth-server/oauthserver.go diff --git a/internal/command/testdata/login-tfe-server/tfeserver.go b/command/testdata/login-tfe-server/tfeserver.go similarity index 100% rename from internal/command/testdata/login-tfe-server/tfeserver.go rename to command/testdata/login-tfe-server/tfeserver.go diff --git a/internal/command/testdata/modules/.terraform/modules/modules.json b/command/testdata/modules/.terraform/modules/modules.json similarity index 100% rename from internal/command/testdata/modules/.terraform/modules/modules.json rename to command/testdata/modules/.terraform/modules/modules.json diff --git a/internal/command/testdata/modules/child/main.tf b/command/testdata/modules/child/main.tf similarity index 100% rename from internal/command/testdata/modules/child/main.tf rename to command/testdata/modules/child/main.tf diff --git a/internal/command/testdata/modules/main.tf b/command/testdata/modules/main.tf similarity index 100% rename from internal/command/testdata/modules/main.tf rename to command/testdata/modules/main.tf diff --git a/internal/command/testdata/modules/terraform.tfstate b/command/testdata/modules/terraform.tfstate similarity index 100% rename from internal/command/testdata/modules/terraform.tfstate rename to command/testdata/modules/terraform.tfstate diff --git a/internal/command/testdata/parallelism/main.tf b/command/testdata/parallelism/main.tf similarity index 100% rename from internal/command/testdata/parallelism/main.tf rename to command/testdata/parallelism/main.tf diff --git a/internal/command/testdata/plan-emptydiff/main.tf b/command/testdata/plan-emptydiff/main.tf similarity index 100% rename from internal/command/testdata/plan-emptydiff/main.tf rename to command/testdata/plan-emptydiff/main.tf diff --git a/internal/command/testdata/plan-fail-condition/main.tf b/command/testdata/plan-fail-condition/main.tf similarity index 100% rename from internal/command/testdata/plan-fail-condition/main.tf rename to command/testdata/plan-fail-condition/main.tf diff --git a/internal/command/testdata/plan-invalid/main.tf b/command/testdata/plan-invalid/main.tf similarity index 100% rename from internal/command/testdata/plan-invalid/main.tf rename to command/testdata/plan-invalid/main.tf diff --git a/internal/command/testdata/plan-out-backend-legacy/main.tf b/command/testdata/plan-out-backend-legacy/main.tf similarity index 100% rename from internal/command/testdata/plan-out-backend-legacy/main.tf rename to command/testdata/plan-out-backend-legacy/main.tf diff --git a/internal/command/testdata/plan-out-backend/main.tf b/command/testdata/plan-out-backend/main.tf similarity index 100% rename from internal/command/testdata/plan-out-backend/main.tf rename to command/testdata/plan-out-backend/main.tf diff --git a/internal/command/testdata/plan-provider-input/main.tf b/command/testdata/plan-provider-input/main.tf similarity index 100% rename from internal/command/testdata/plan-provider-input/main.tf rename to command/testdata/plan-provider-input/main.tf diff --git a/internal/command/testdata/plan-replace/main.tf b/command/testdata/plan-replace/main.tf similarity index 100% rename from internal/command/testdata/plan-replace/main.tf rename to command/testdata/plan-replace/main.tf diff --git a/internal/command/testdata/plan-vars/main.tf b/command/testdata/plan-vars/main.tf similarity index 100% rename from internal/command/testdata/plan-vars/main.tf rename to command/testdata/plan-vars/main.tf diff --git a/internal/command/testdata/plan/main.tf b/command/testdata/plan/main.tf similarity index 100% rename from internal/command/testdata/plan/main.tf rename to command/testdata/plan/main.tf diff --git a/internal/command/testdata/plan/output.jsonlog b/command/testdata/plan/output.jsonlog similarity index 100% rename from internal/command/testdata/plan/output.jsonlog rename to command/testdata/plan/output.jsonlog diff --git a/internal/command/testdata/providers-lock/append/.terraform.lock.hcl b/command/testdata/providers-lock/append/.terraform.lock.hcl similarity index 100% rename from internal/command/testdata/providers-lock/append/.terraform.lock.hcl rename to command/testdata/providers-lock/append/.terraform.lock.hcl diff --git a/internal/command/testdata/providers-lock/append/fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch/terraform-provider-test b/command/testdata/providers-lock/append/fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch/terraform-provider-test similarity index 100% rename from internal/command/testdata/providers-lock/append/fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch/terraform-provider-test rename to command/testdata/providers-lock/append/fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch/terraform-provider-test diff --git a/internal/command/testdata/providers-lock/append/main.tf b/command/testdata/providers-lock/append/main.tf similarity index 100% rename from internal/command/testdata/providers-lock/append/main.tf rename to command/testdata/providers-lock/append/main.tf diff --git a/internal/command/testdata/providers-lock/basic/fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch/terraform-provider-test b/command/testdata/providers-lock/basic/fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch/terraform-provider-test similarity index 100% rename from internal/command/testdata/providers-lock/basic/fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch/terraform-provider-test rename to command/testdata/providers-lock/basic/fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch/terraform-provider-test diff --git a/internal/command/testdata/providers-lock/basic/main.tf b/command/testdata/providers-lock/basic/main.tf similarity index 100% rename from internal/command/testdata/providers-lock/basic/main.tf rename to command/testdata/providers-lock/basic/main.tf diff --git a/internal/command/testdata/providers-schema/basic/output.json b/command/testdata/providers-schema/basic/output.json similarity index 100% rename from internal/command/testdata/providers-schema/basic/output.json rename to command/testdata/providers-schema/basic/output.json diff --git a/internal/command/testdata/providers-schema/basic/provider.tf b/command/testdata/providers-schema/basic/provider.tf similarity index 100% rename from internal/command/testdata/providers-schema/basic/provider.tf rename to command/testdata/providers-schema/basic/provider.tf diff --git a/internal/command/testdata/providers-schema/empty/main.tf b/command/testdata/providers-schema/empty/main.tf similarity index 100% rename from internal/command/testdata/providers-schema/empty/main.tf rename to command/testdata/providers-schema/empty/main.tf diff --git a/internal/command/testdata/providers-schema/empty/output.json b/command/testdata/providers-schema/empty/output.json similarity index 100% rename from internal/command/testdata/providers-schema/empty/output.json rename to command/testdata/providers-schema/empty/output.json diff --git a/internal/command/testdata/providers-schema/required/output.json b/command/testdata/providers-schema/required/output.json similarity index 100% rename from internal/command/testdata/providers-schema/required/output.json rename to command/testdata/providers-schema/required/output.json diff --git a/internal/command/testdata/providers-schema/required/provider.tf b/command/testdata/providers-schema/required/provider.tf similarity index 100% rename from internal/command/testdata/providers-schema/required/provider.tf rename to command/testdata/providers-schema/required/provider.tf diff --git a/internal/command/testdata/providers/basic/main.tf b/command/testdata/providers/basic/main.tf similarity index 100% rename from internal/command/testdata/providers/basic/main.tf rename to command/testdata/providers/basic/main.tf diff --git a/internal/command/testdata/providers/modules/child/main.tf b/command/testdata/providers/modules/child/main.tf similarity index 100% rename from internal/command/testdata/providers/modules/child/main.tf rename to command/testdata/providers/modules/child/main.tf diff --git a/internal/command/testdata/providers/modules/main.tf b/command/testdata/providers/modules/main.tf similarity index 100% rename from internal/command/testdata/providers/modules/main.tf rename to command/testdata/providers/modules/main.tf diff --git a/internal/command/testdata/providers/state/main.tf b/command/testdata/providers/state/main.tf similarity index 100% rename from internal/command/testdata/providers/state/main.tf rename to command/testdata/providers/state/main.tf diff --git a/internal/command/testdata/providers/state/terraform.tfstate b/command/testdata/providers/state/terraform.tfstate similarity index 100% rename from internal/command/testdata/providers/state/terraform.tfstate rename to command/testdata/providers/state/terraform.tfstate diff --git a/internal/command/testdata/push-backend-new/main.tf b/command/testdata/push-backend-new/main.tf similarity index 100% rename from internal/command/testdata/push-backend-new/main.tf rename to command/testdata/push-backend-new/main.tf diff --git a/internal/command/testdata/push-input-partial/main.tf b/command/testdata/push-input-partial/main.tf similarity index 100% rename from internal/command/testdata/push-input-partial/main.tf rename to command/testdata/push-input-partial/main.tf diff --git a/internal/command/testdata/push-input/main.tf b/command/testdata/push-input/main.tf similarity index 100% rename from internal/command/testdata/push-input/main.tf rename to command/testdata/push-input/main.tf diff --git a/internal/command/testdata/push-no-remote/main.tf b/command/testdata/push-no-remote/main.tf similarity index 100% rename from internal/command/testdata/push-no-remote/main.tf rename to command/testdata/push-no-remote/main.tf diff --git a/internal/command/testdata/push-no-upload/child/main.tf b/command/testdata/push-no-upload/child/main.tf similarity index 100% rename from internal/command/testdata/push-no-upload/child/main.tf rename to command/testdata/push-no-upload/child/main.tf diff --git a/internal/command/testdata/push-no-upload/main.tf b/command/testdata/push-no-upload/main.tf similarity index 100% rename from internal/command/testdata/push-no-upload/main.tf rename to command/testdata/push-no-upload/main.tf diff --git a/internal/command/testdata/push-tfvars/main.tf b/command/testdata/push-tfvars/main.tf similarity index 100% rename from internal/command/testdata/push-tfvars/main.tf rename to command/testdata/push-tfvars/main.tf diff --git a/internal/command/testdata/push-tfvars/terraform.tfvars b/command/testdata/push-tfvars/terraform.tfvars similarity index 100% rename from internal/command/testdata/push-tfvars/terraform.tfvars rename to command/testdata/push-tfvars/terraform.tfvars diff --git a/internal/command/testdata/push/main.tf b/command/testdata/push/main.tf similarity index 100% rename from internal/command/testdata/push/main.tf rename to command/testdata/push/main.tf diff --git a/internal/command/testdata/refresh-empty/main.tf b/command/testdata/refresh-empty/main.tf similarity index 100% rename from internal/command/testdata/refresh-empty/main.tf rename to command/testdata/refresh-empty/main.tf diff --git a/internal/command/testdata/refresh-output/main.tf b/command/testdata/refresh-output/main.tf similarity index 100% rename from internal/command/testdata/refresh-output/main.tf rename to command/testdata/refresh-output/main.tf diff --git a/internal/command/testdata/refresh-targeted/main.tf b/command/testdata/refresh-targeted/main.tf similarity index 100% rename from internal/command/testdata/refresh-targeted/main.tf rename to command/testdata/refresh-targeted/main.tf diff --git a/internal/command/testdata/refresh-unset-var/main.tf b/command/testdata/refresh-unset-var/main.tf similarity index 100% rename from internal/command/testdata/refresh-unset-var/main.tf rename to command/testdata/refresh-unset-var/main.tf diff --git a/internal/command/testdata/refresh-var/main.tf b/command/testdata/refresh-var/main.tf similarity index 100% rename from internal/command/testdata/refresh-var/main.tf rename to command/testdata/refresh-var/main.tf diff --git a/internal/command/testdata/refresh/main.tf b/command/testdata/refresh/main.tf similarity index 100% rename from internal/command/testdata/refresh/main.tf rename to command/testdata/refresh/main.tf diff --git a/internal/command/testdata/show-corrupt-statefile/terraform.tfstate b/command/testdata/show-corrupt-statefile/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-corrupt-statefile/terraform.tfstate rename to command/testdata/show-corrupt-statefile/terraform.tfstate diff --git a/internal/command/testdata/show-json-sensitive/main.tf b/command/testdata/show-json-sensitive/main.tf similarity index 100% rename from internal/command/testdata/show-json-sensitive/main.tf rename to command/testdata/show-json-sensitive/main.tf diff --git a/internal/command/testdata/show-json-sensitive/output.json b/command/testdata/show-json-sensitive/output.json similarity index 100% rename from internal/command/testdata/show-json-sensitive/output.json rename to command/testdata/show-json-sensitive/output.json diff --git a/internal/command/testdata/show-json-state/basic/output.json b/command/testdata/show-json-state/basic/output.json similarity index 100% rename from internal/command/testdata/show-json-state/basic/output.json rename to command/testdata/show-json-state/basic/output.json diff --git a/internal/command/testdata/show-json-state/basic/terraform.tfstate b/command/testdata/show-json-state/basic/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json-state/basic/terraform.tfstate rename to command/testdata/show-json-state/basic/terraform.tfstate diff --git a/internal/command/testdata/show-json-state/empty/output.json b/command/testdata/show-json-state/empty/output.json similarity index 100% rename from internal/command/testdata/show-json-state/empty/output.json rename to command/testdata/show-json-state/empty/output.json diff --git a/internal/command/testdata/show-json-state/empty/terraform.tfstate b/command/testdata/show-json-state/empty/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json-state/empty/terraform.tfstate rename to command/testdata/show-json-state/empty/terraform.tfstate diff --git a/internal/command/testdata/show-json-state/modules/bar/main.tf b/command/testdata/show-json-state/modules/bar/main.tf similarity index 100% rename from internal/command/testdata/show-json-state/modules/bar/main.tf rename to command/testdata/show-json-state/modules/bar/main.tf diff --git a/internal/command/testdata/show-json-state/modules/foo/main.tf b/command/testdata/show-json-state/modules/foo/main.tf similarity index 100% rename from internal/command/testdata/show-json-state/modules/foo/main.tf rename to command/testdata/show-json-state/modules/foo/main.tf diff --git a/internal/command/testdata/show-json-state/modules/main.tf b/command/testdata/show-json-state/modules/main.tf similarity index 100% rename from internal/command/testdata/show-json-state/modules/main.tf rename to command/testdata/show-json-state/modules/main.tf diff --git a/internal/command/testdata/show-json-state/modules/output.json b/command/testdata/show-json-state/modules/output.json similarity index 100% rename from internal/command/testdata/show-json-state/modules/output.json rename to command/testdata/show-json-state/modules/output.json diff --git a/internal/command/testdata/show-json-state/modules/terraform.tfstate b/command/testdata/show-json-state/modules/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json-state/modules/terraform.tfstate rename to command/testdata/show-json-state/modules/terraform.tfstate diff --git a/internal/command/testdata/show-json-state/no-state/output.json b/command/testdata/show-json-state/no-state/output.json similarity index 100% rename from internal/command/testdata/show-json-state/no-state/output.json rename to command/testdata/show-json-state/no-state/output.json diff --git a/internal/command/testdata/show-json-state/sensitive-variables/output.json b/command/testdata/show-json-state/sensitive-variables/output.json similarity index 100% rename from internal/command/testdata/show-json-state/sensitive-variables/output.json rename to command/testdata/show-json-state/sensitive-variables/output.json diff --git a/internal/command/testdata/show-json-state/sensitive-variables/terraform.tfstate b/command/testdata/show-json-state/sensitive-variables/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json-state/sensitive-variables/terraform.tfstate rename to command/testdata/show-json-state/sensitive-variables/terraform.tfstate diff --git a/internal/command/testdata/show-json/basic-create/main.tf b/command/testdata/show-json/basic-create/main.tf similarity index 100% rename from internal/command/testdata/show-json/basic-create/main.tf rename to command/testdata/show-json/basic-create/main.tf diff --git a/internal/command/testdata/show-json/basic-create/output.json b/command/testdata/show-json/basic-create/output.json similarity index 100% rename from internal/command/testdata/show-json/basic-create/output.json rename to command/testdata/show-json/basic-create/output.json diff --git a/internal/command/testdata/show-json/basic-delete/main.tf b/command/testdata/show-json/basic-delete/main.tf similarity index 100% rename from internal/command/testdata/show-json/basic-delete/main.tf rename to command/testdata/show-json/basic-delete/main.tf diff --git a/internal/command/testdata/show-json/basic-delete/output.json b/command/testdata/show-json/basic-delete/output.json similarity index 100% rename from internal/command/testdata/show-json/basic-delete/output.json rename to command/testdata/show-json/basic-delete/output.json diff --git a/internal/command/testdata/show-json/basic-delete/terraform.tfstate b/command/testdata/show-json/basic-delete/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json/basic-delete/terraform.tfstate rename to command/testdata/show-json/basic-delete/terraform.tfstate diff --git a/internal/command/testdata/show-json/basic-update/main.tf b/command/testdata/show-json/basic-update/main.tf similarity index 100% rename from internal/command/testdata/show-json/basic-update/main.tf rename to command/testdata/show-json/basic-update/main.tf diff --git a/internal/command/testdata/show-json/basic-update/output.json b/command/testdata/show-json/basic-update/output.json similarity index 100% rename from internal/command/testdata/show-json/basic-update/output.json rename to command/testdata/show-json/basic-update/output.json diff --git a/internal/command/testdata/show-json/basic-update/terraform.tfstate b/command/testdata/show-json/basic-update/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json/basic-update/terraform.tfstate rename to command/testdata/show-json/basic-update/terraform.tfstate diff --git a/internal/command/testdata/show-json/conditions/for-refresh.tfstate b/command/testdata/show-json/conditions/for-refresh.tfstate similarity index 100% rename from internal/command/testdata/show-json/conditions/for-refresh.tfstate rename to command/testdata/show-json/conditions/for-refresh.tfstate diff --git a/internal/command/testdata/show-json/conditions/main.tf b/command/testdata/show-json/conditions/main.tf similarity index 100% rename from internal/command/testdata/show-json/conditions/main.tf rename to command/testdata/show-json/conditions/main.tf diff --git a/internal/command/testdata/show-json/conditions/output-refresh-only.json b/command/testdata/show-json/conditions/output-refresh-only.json similarity index 100% rename from internal/command/testdata/show-json/conditions/output-refresh-only.json rename to command/testdata/show-json/conditions/output-refresh-only.json diff --git a/internal/command/testdata/show-json/conditions/output.json b/command/testdata/show-json/conditions/output.json similarity index 100% rename from internal/command/testdata/show-json/conditions/output.json rename to command/testdata/show-json/conditions/output.json diff --git a/internal/command/testdata/show-json/drift/main.tf b/command/testdata/show-json/drift/main.tf similarity index 100% rename from internal/command/testdata/show-json/drift/main.tf rename to command/testdata/show-json/drift/main.tf diff --git a/internal/command/testdata/show-json/drift/output.json b/command/testdata/show-json/drift/output.json similarity index 100% rename from internal/command/testdata/show-json/drift/output.json rename to command/testdata/show-json/drift/output.json diff --git a/internal/command/testdata/show-json/drift/terraform.tfstate b/command/testdata/show-json/drift/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json/drift/terraform.tfstate rename to command/testdata/show-json/drift/terraform.tfstate diff --git a/internal/command/testdata/show-json/module-depends-on/foo/main.tf b/command/testdata/show-json/module-depends-on/foo/main.tf similarity index 100% rename from internal/command/testdata/show-json/module-depends-on/foo/main.tf rename to command/testdata/show-json/module-depends-on/foo/main.tf diff --git a/internal/command/testdata/show-json/module-depends-on/main.tf b/command/testdata/show-json/module-depends-on/main.tf similarity index 100% rename from internal/command/testdata/show-json/module-depends-on/main.tf rename to command/testdata/show-json/module-depends-on/main.tf diff --git a/internal/command/testdata/show-json/module-depends-on/output.json b/command/testdata/show-json/module-depends-on/output.json similarity index 100% rename from internal/command/testdata/show-json/module-depends-on/output.json rename to command/testdata/show-json/module-depends-on/output.json diff --git a/internal/command/testdata/show-json/modules/bar/main.tf b/command/testdata/show-json/modules/bar/main.tf similarity index 100% rename from internal/command/testdata/show-json/modules/bar/main.tf rename to command/testdata/show-json/modules/bar/main.tf diff --git a/internal/command/testdata/show-json/modules/foo/main.tf b/command/testdata/show-json/modules/foo/main.tf similarity index 100% rename from internal/command/testdata/show-json/modules/foo/main.tf rename to command/testdata/show-json/modules/foo/main.tf diff --git a/internal/command/testdata/show-json/modules/main.tf b/command/testdata/show-json/modules/main.tf similarity index 100% rename from internal/command/testdata/show-json/modules/main.tf rename to command/testdata/show-json/modules/main.tf diff --git a/internal/command/testdata/show-json/modules/output.json b/command/testdata/show-json/modules/output.json similarity index 100% rename from internal/command/testdata/show-json/modules/output.json rename to command/testdata/show-json/modules/output.json diff --git a/internal/command/testdata/show-json/moved-drift/main.tf b/command/testdata/show-json/moved-drift/main.tf similarity index 100% rename from internal/command/testdata/show-json/moved-drift/main.tf rename to command/testdata/show-json/moved-drift/main.tf diff --git a/internal/command/testdata/show-json/moved-drift/output.json b/command/testdata/show-json/moved-drift/output.json similarity index 100% rename from internal/command/testdata/show-json/moved-drift/output.json rename to command/testdata/show-json/moved-drift/output.json diff --git a/internal/command/testdata/show-json/moved-drift/terraform.tfstate b/command/testdata/show-json/moved-drift/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json/moved-drift/terraform.tfstate rename to command/testdata/show-json/moved-drift/terraform.tfstate diff --git a/internal/command/testdata/show-json/moved/main.tf b/command/testdata/show-json/moved/main.tf similarity index 100% rename from internal/command/testdata/show-json/moved/main.tf rename to command/testdata/show-json/moved/main.tf diff --git a/internal/command/testdata/show-json/moved/output.json b/command/testdata/show-json/moved/output.json similarity index 100% rename from internal/command/testdata/show-json/moved/output.json rename to command/testdata/show-json/moved/output.json diff --git a/internal/command/testdata/show-json/moved/terraform.tfstate b/command/testdata/show-json/moved/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json/moved/terraform.tfstate rename to command/testdata/show-json/moved/terraform.tfstate diff --git a/internal/command/testdata/show-json/multi-resource-update/main.tf b/command/testdata/show-json/multi-resource-update/main.tf similarity index 100% rename from internal/command/testdata/show-json/multi-resource-update/main.tf rename to command/testdata/show-json/multi-resource-update/main.tf diff --git a/internal/command/testdata/show-json/multi-resource-update/output.json b/command/testdata/show-json/multi-resource-update/output.json similarity index 100% rename from internal/command/testdata/show-json/multi-resource-update/output.json rename to command/testdata/show-json/multi-resource-update/output.json diff --git a/internal/command/testdata/show-json/multi-resource-update/terraform.tfstate b/command/testdata/show-json/multi-resource-update/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json/multi-resource-update/terraform.tfstate rename to command/testdata/show-json/multi-resource-update/terraform.tfstate diff --git a/internal/command/testdata/show-json/nested-module-error/main.tf b/command/testdata/show-json/nested-module-error/main.tf similarity index 100% rename from internal/command/testdata/show-json/nested-module-error/main.tf rename to command/testdata/show-json/nested-module-error/main.tf diff --git a/internal/command/testdata/show-json/nested-module-error/modules/main.tf b/command/testdata/show-json/nested-module-error/modules/main.tf similarity index 100% rename from internal/command/testdata/show-json/nested-module-error/modules/main.tf rename to command/testdata/show-json/nested-module-error/modules/main.tf diff --git a/internal/command/testdata/show-json/nested-module-error/modules/more-modules/main.tf b/command/testdata/show-json/nested-module-error/modules/more-modules/main.tf similarity index 100% rename from internal/command/testdata/show-json/nested-module-error/modules/more-modules/main.tf rename to command/testdata/show-json/nested-module-error/modules/more-modules/main.tf diff --git a/internal/command/testdata/show-json/nested-modules/main.tf b/command/testdata/show-json/nested-modules/main.tf similarity index 100% rename from internal/command/testdata/show-json/nested-modules/main.tf rename to command/testdata/show-json/nested-modules/main.tf diff --git a/internal/command/testdata/show-json/nested-modules/modules/main.tf b/command/testdata/show-json/nested-modules/modules/main.tf similarity index 100% rename from internal/command/testdata/show-json/nested-modules/modules/main.tf rename to command/testdata/show-json/nested-modules/modules/main.tf diff --git a/internal/command/testdata/show-json/nested-modules/modules/more-modules/main.tf b/command/testdata/show-json/nested-modules/modules/more-modules/main.tf similarity index 100% rename from internal/command/testdata/show-json/nested-modules/modules/more-modules/main.tf rename to command/testdata/show-json/nested-modules/modules/more-modules/main.tf diff --git a/internal/command/testdata/show-json/nested-modules/output.json b/command/testdata/show-json/nested-modules/output.json similarity index 100% rename from internal/command/testdata/show-json/nested-modules/output.json rename to command/testdata/show-json/nested-modules/output.json diff --git a/internal/command/testdata/show-json/provider-aliasing-conflict/child/main.tf b/command/testdata/show-json/provider-aliasing-conflict/child/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing-conflict/child/main.tf rename to command/testdata/show-json/provider-aliasing-conflict/child/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing-conflict/main.tf b/command/testdata/show-json/provider-aliasing-conflict/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing-conflict/main.tf rename to command/testdata/show-json/provider-aliasing-conflict/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing-conflict/output.json b/command/testdata/show-json/provider-aliasing-conflict/output.json similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing-conflict/output.json rename to command/testdata/show-json/provider-aliasing-conflict/output.json diff --git a/internal/command/testdata/show-json/provider-aliasing-default/child/main.tf b/command/testdata/show-json/provider-aliasing-default/child/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing-default/child/main.tf rename to command/testdata/show-json/provider-aliasing-default/child/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing-default/child/nested-no-requirements/main.tf b/command/testdata/show-json/provider-aliasing-default/child/nested-no-requirements/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing-default/child/nested-no-requirements/main.tf rename to command/testdata/show-json/provider-aliasing-default/child/nested-no-requirements/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing-default/child/nested/main.tf b/command/testdata/show-json/provider-aliasing-default/child/nested/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing-default/child/nested/main.tf rename to command/testdata/show-json/provider-aliasing-default/child/nested/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing-default/main.tf b/command/testdata/show-json/provider-aliasing-default/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing-default/main.tf rename to command/testdata/show-json/provider-aliasing-default/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing-default/output.json b/command/testdata/show-json/provider-aliasing-default/output.json similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing-default/output.json rename to command/testdata/show-json/provider-aliasing-default/output.json diff --git a/internal/command/testdata/show-json/provider-aliasing/child/main.tf b/command/testdata/show-json/provider-aliasing/child/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing/child/main.tf rename to command/testdata/show-json/provider-aliasing/child/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing/child/nested/main.tf b/command/testdata/show-json/provider-aliasing/child/nested/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing/child/nested/main.tf rename to command/testdata/show-json/provider-aliasing/child/nested/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing/main.tf b/command/testdata/show-json/provider-aliasing/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing/main.tf rename to command/testdata/show-json/provider-aliasing/main.tf diff --git a/internal/command/testdata/show-json/provider-aliasing/output.json b/command/testdata/show-json/provider-aliasing/output.json similarity index 100% rename from internal/command/testdata/show-json/provider-aliasing/output.json rename to command/testdata/show-json/provider-aliasing/output.json diff --git a/internal/command/testdata/show-json/provider-version-no-config/main.tf b/command/testdata/show-json/provider-version-no-config/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-version-no-config/main.tf rename to command/testdata/show-json/provider-version-no-config/main.tf diff --git a/internal/command/testdata/show-json/provider-version-no-config/output.json b/command/testdata/show-json/provider-version-no-config/output.json similarity index 100% rename from internal/command/testdata/show-json/provider-version-no-config/output.json rename to command/testdata/show-json/provider-version-no-config/output.json diff --git a/internal/command/testdata/show-json/provider-version/main.tf b/command/testdata/show-json/provider-version/main.tf similarity index 100% rename from internal/command/testdata/show-json/provider-version/main.tf rename to command/testdata/show-json/provider-version/main.tf diff --git a/internal/command/testdata/show-json/provider-version/output.json b/command/testdata/show-json/provider-version/output.json similarity index 100% rename from internal/command/testdata/show-json/provider-version/output.json rename to command/testdata/show-json/provider-version/output.json diff --git a/internal/command/testdata/show-json/requires-replace/main.tf b/command/testdata/show-json/requires-replace/main.tf similarity index 100% rename from internal/command/testdata/show-json/requires-replace/main.tf rename to command/testdata/show-json/requires-replace/main.tf diff --git a/internal/command/testdata/show-json/requires-replace/output.json b/command/testdata/show-json/requires-replace/output.json similarity index 100% rename from internal/command/testdata/show-json/requires-replace/output.json rename to command/testdata/show-json/requires-replace/output.json diff --git a/internal/command/testdata/show-json/requires-replace/terraform.tfstate b/command/testdata/show-json/requires-replace/terraform.tfstate similarity index 100% rename from internal/command/testdata/show-json/requires-replace/terraform.tfstate rename to command/testdata/show-json/requires-replace/terraform.tfstate diff --git a/internal/command/testdata/show-json/sensitive-values/main.tf b/command/testdata/show-json/sensitive-values/main.tf similarity index 100% rename from internal/command/testdata/show-json/sensitive-values/main.tf rename to command/testdata/show-json/sensitive-values/main.tf diff --git a/internal/command/testdata/show-json/sensitive-values/output.json b/command/testdata/show-json/sensitive-values/output.json similarity index 100% rename from internal/command/testdata/show-json/sensitive-values/output.json rename to command/testdata/show-json/sensitive-values/output.json diff --git a/internal/command/testdata/show-json/unknown-output/main.tf b/command/testdata/show-json/unknown-output/main.tf similarity index 100% rename from internal/command/testdata/show-json/unknown-output/main.tf rename to command/testdata/show-json/unknown-output/main.tf diff --git a/internal/command/testdata/show-json/unknown-output/output.json b/command/testdata/show-json/unknown-output/output.json similarity index 100% rename from internal/command/testdata/show-json/unknown-output/output.json rename to command/testdata/show-json/unknown-output/output.json diff --git a/internal/command/testdata/show/main.tf b/command/testdata/show/main.tf similarity index 100% rename from internal/command/testdata/show/main.tf rename to command/testdata/show/main.tf diff --git a/internal/command/testdata/state-list-backend-custom/.terraform/terraform.tfstate b/command/testdata/state-list-backend-custom/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-list-backend-custom/.terraform/terraform.tfstate rename to command/testdata/state-list-backend-custom/.terraform/terraform.tfstate diff --git a/internal/command/testdata/state-list-backend-custom/local-state.tfstate b/command/testdata/state-list-backend-custom/local-state.tfstate similarity index 100% rename from internal/command/testdata/state-list-backend-custom/local-state.tfstate rename to command/testdata/state-list-backend-custom/local-state.tfstate diff --git a/internal/command/testdata/state-list-backend-custom/main.tf b/command/testdata/state-list-backend-custom/main.tf similarity index 100% rename from internal/command/testdata/state-list-backend-custom/main.tf rename to command/testdata/state-list-backend-custom/main.tf diff --git a/internal/command/testdata/state-list-backend-default/.terraform/terraform.tfstate b/command/testdata/state-list-backend-default/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-list-backend-default/.terraform/terraform.tfstate rename to command/testdata/state-list-backend-default/.terraform/terraform.tfstate diff --git a/internal/command/testdata/state-list-backend-default/main.tf b/command/testdata/state-list-backend-default/main.tf similarity index 100% rename from internal/command/testdata/state-list-backend-default/main.tf rename to command/testdata/state-list-backend-default/main.tf diff --git a/internal/command/testdata/state-list-backend-default/terraform.tfstate b/command/testdata/state-list-backend-default/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-list-backend-default/terraform.tfstate rename to command/testdata/state-list-backend-default/terraform.tfstate diff --git a/internal/command/testdata/state-list-nested-modules/terraform.tfstate b/command/testdata/state-list-nested-modules/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-list-nested-modules/terraform.tfstate rename to command/testdata/state-list-nested-modules/terraform.tfstate diff --git a/internal/command/testdata/state-pull-backend/.terraform/terraform.tfstate b/command/testdata/state-pull-backend/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-pull-backend/.terraform/terraform.tfstate rename to command/testdata/state-pull-backend/.terraform/terraform.tfstate diff --git a/internal/command/testdata/state-pull-backend/local-state.tfstate b/command/testdata/state-pull-backend/local-state.tfstate similarity index 100% rename from internal/command/testdata/state-pull-backend/local-state.tfstate rename to command/testdata/state-pull-backend/local-state.tfstate diff --git a/internal/command/testdata/state-pull-backend/main.tf b/command/testdata/state-pull-backend/main.tf similarity index 100% rename from internal/command/testdata/state-pull-backend/main.tf rename to command/testdata/state-pull-backend/main.tf diff --git a/internal/command/testdata/state-push-bad-lineage/.terraform/terraform.tfstate b/command/testdata/state-push-bad-lineage/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-push-bad-lineage/.terraform/terraform.tfstate rename to command/testdata/state-push-bad-lineage/.terraform/terraform.tfstate diff --git a/internal/command/testdata/state-push-bad-lineage/local-state.tfstate b/command/testdata/state-push-bad-lineage/local-state.tfstate similarity index 100% rename from internal/command/testdata/state-push-bad-lineage/local-state.tfstate rename to command/testdata/state-push-bad-lineage/local-state.tfstate diff --git a/internal/command/testdata/state-push-bad-lineage/main.tf b/command/testdata/state-push-bad-lineage/main.tf similarity index 100% rename from internal/command/testdata/state-push-bad-lineage/main.tf rename to command/testdata/state-push-bad-lineage/main.tf diff --git a/internal/command/testdata/state-push-bad-lineage/replace.tfstate b/command/testdata/state-push-bad-lineage/replace.tfstate similarity index 100% rename from internal/command/testdata/state-push-bad-lineage/replace.tfstate rename to command/testdata/state-push-bad-lineage/replace.tfstate diff --git a/internal/command/testdata/state-push-good/.terraform/terraform.tfstate b/command/testdata/state-push-good/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-push-good/.terraform/terraform.tfstate rename to command/testdata/state-push-good/.terraform/terraform.tfstate diff --git a/internal/command/testdata/state-push-good/main.tf b/command/testdata/state-push-good/main.tf similarity index 100% rename from internal/command/testdata/state-push-good/main.tf rename to command/testdata/state-push-good/main.tf diff --git a/internal/command/testdata/state-push-good/replace.tfstate b/command/testdata/state-push-good/replace.tfstate similarity index 100% rename from internal/command/testdata/state-push-good/replace.tfstate rename to command/testdata/state-push-good/replace.tfstate diff --git a/internal/command/testdata/state-push-replace-match/.terraform/terraform.tfstate b/command/testdata/state-push-replace-match/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-push-replace-match/.terraform/terraform.tfstate rename to command/testdata/state-push-replace-match/.terraform/terraform.tfstate diff --git a/internal/command/testdata/state-push-replace-match/local-state.tfstate b/command/testdata/state-push-replace-match/local-state.tfstate similarity index 100% rename from internal/command/testdata/state-push-replace-match/local-state.tfstate rename to command/testdata/state-push-replace-match/local-state.tfstate diff --git a/internal/command/testdata/state-push-replace-match/main.tf b/command/testdata/state-push-replace-match/main.tf similarity index 100% rename from internal/command/testdata/state-push-replace-match/main.tf rename to command/testdata/state-push-replace-match/main.tf diff --git a/internal/command/testdata/state-push-replace-match/replace.tfstate b/command/testdata/state-push-replace-match/replace.tfstate similarity index 100% rename from internal/command/testdata/state-push-replace-match/replace.tfstate rename to command/testdata/state-push-replace-match/replace.tfstate diff --git a/internal/command/testdata/state-push-serial-newer/.terraform/terraform.tfstate b/command/testdata/state-push-serial-newer/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-push-serial-newer/.terraform/terraform.tfstate rename to command/testdata/state-push-serial-newer/.terraform/terraform.tfstate diff --git a/internal/command/testdata/state-push-serial-newer/local-state.tfstate b/command/testdata/state-push-serial-newer/local-state.tfstate similarity index 100% rename from internal/command/testdata/state-push-serial-newer/local-state.tfstate rename to command/testdata/state-push-serial-newer/local-state.tfstate diff --git a/internal/command/testdata/state-push-serial-newer/main.tf b/command/testdata/state-push-serial-newer/main.tf similarity index 100% rename from internal/command/testdata/state-push-serial-newer/main.tf rename to command/testdata/state-push-serial-newer/main.tf diff --git a/internal/command/testdata/state-push-serial-newer/replace.tfstate b/command/testdata/state-push-serial-newer/replace.tfstate similarity index 100% rename from internal/command/testdata/state-push-serial-newer/replace.tfstate rename to command/testdata/state-push-serial-newer/replace.tfstate diff --git a/internal/command/testdata/state-push-serial-older/.terraform/terraform.tfstate b/command/testdata/state-push-serial-older/.terraform/terraform.tfstate similarity index 100% rename from internal/command/testdata/state-push-serial-older/.terraform/terraform.tfstate rename to command/testdata/state-push-serial-older/.terraform/terraform.tfstate diff --git a/internal/command/testdata/state-push-serial-older/local-state.tfstate b/command/testdata/state-push-serial-older/local-state.tfstate similarity index 100% rename from internal/command/testdata/state-push-serial-older/local-state.tfstate rename to command/testdata/state-push-serial-older/local-state.tfstate diff --git a/internal/command/testdata/state-push-serial-older/main.tf b/command/testdata/state-push-serial-older/main.tf similarity index 100% rename from internal/command/testdata/state-push-serial-older/main.tf rename to command/testdata/state-push-serial-older/main.tf diff --git a/internal/command/testdata/state-push-serial-older/replace.tfstate b/command/testdata/state-push-serial-older/replace.tfstate similarity index 100% rename from internal/command/testdata/state-push-serial-older/replace.tfstate rename to command/testdata/state-push-serial-older/replace.tfstate diff --git a/internal/command/testdata/statelocker.go b/command/testdata/statelocker.go similarity index 89% rename from internal/command/testdata/statelocker.go rename to command/testdata/statelocker.go index a31708605ae8..98e13a23a45a 100644 --- a/internal/command/testdata/statelocker.go +++ b/command/testdata/statelocker.go @@ -11,8 +11,8 @@ import ( "syscall" "time" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/states/statemgr" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/states/statemgr" ) func main() { diff --git a/internal/command/testdata/test-fails/test-fails.tf b/command/testdata/test-fails/test-fails.tf similarity index 100% rename from internal/command/testdata/test-fails/test-fails.tf rename to command/testdata/test-fails/test-fails.tf diff --git a/internal/command/testdata/test-fails/tests/hello/hello.tf b/command/testdata/test-fails/tests/hello/hello.tf similarity index 100% rename from internal/command/testdata/test-fails/tests/hello/hello.tf rename to command/testdata/test-fails/tests/hello/hello.tf diff --git a/internal/command/testdata/test-passes/test-passes.tf b/command/testdata/test-passes/test-passes.tf similarity index 100% rename from internal/command/testdata/test-passes/test-passes.tf rename to command/testdata/test-passes/test-passes.tf diff --git a/internal/command/testdata/test-passes/tests/hello/hello.tf b/command/testdata/test-passes/tests/hello/hello.tf similarity index 100% rename from internal/command/testdata/test-passes/tests/hello/hello.tf rename to command/testdata/test-passes/tests/hello/hello.tf diff --git a/internal/command/testdata/validate-invalid/incorrectmodulename/main.tf b/command/testdata/validate-invalid/incorrectmodulename/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/incorrectmodulename/main.tf rename to command/testdata/validate-invalid/incorrectmodulename/main.tf diff --git a/internal/command/testdata/validate-invalid/incorrectmodulename/output.json b/command/testdata/validate-invalid/incorrectmodulename/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/incorrectmodulename/output.json rename to command/testdata/validate-invalid/incorrectmodulename/output.json diff --git a/internal/command/testdata/validate-invalid/interpolation/main.tf b/command/testdata/validate-invalid/interpolation/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/interpolation/main.tf rename to command/testdata/validate-invalid/interpolation/main.tf diff --git a/internal/command/testdata/validate-invalid/interpolation/output.json b/command/testdata/validate-invalid/interpolation/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/interpolation/output.json rename to command/testdata/validate-invalid/interpolation/output.json diff --git a/internal/command/testdata/validate-invalid/main.tf b/command/testdata/validate-invalid/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/main.tf rename to command/testdata/validate-invalid/main.tf diff --git a/internal/command/testdata/validate-invalid/missing_defined_var/main.tf b/command/testdata/validate-invalid/missing_defined_var/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/missing_defined_var/main.tf rename to command/testdata/validate-invalid/missing_defined_var/main.tf diff --git a/internal/command/testdata/validate-invalid/missing_defined_var/output.json b/command/testdata/validate-invalid/missing_defined_var/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/missing_defined_var/output.json rename to command/testdata/validate-invalid/missing_defined_var/output.json diff --git a/internal/command/testdata/validate-invalid/missing_quote/main.tf b/command/testdata/validate-invalid/missing_quote/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/missing_quote/main.tf rename to command/testdata/validate-invalid/missing_quote/main.tf diff --git a/internal/command/testdata/validate-invalid/missing_quote/output.json b/command/testdata/validate-invalid/missing_quote/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/missing_quote/output.json rename to command/testdata/validate-invalid/missing_quote/output.json diff --git a/internal/command/testdata/validate-invalid/missing_var/main.tf b/command/testdata/validate-invalid/missing_var/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/missing_var/main.tf rename to command/testdata/validate-invalid/missing_var/main.tf diff --git a/internal/command/testdata/validate-invalid/missing_var/output.json b/command/testdata/validate-invalid/missing_var/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/missing_var/output.json rename to command/testdata/validate-invalid/missing_var/output.json diff --git a/internal/command/testdata/validate-invalid/multiple_modules/main.tf b/command/testdata/validate-invalid/multiple_modules/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/multiple_modules/main.tf rename to command/testdata/validate-invalid/multiple_modules/main.tf diff --git a/internal/command/testdata/validate-invalid/multiple_modules/output.json b/command/testdata/validate-invalid/multiple_modules/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/multiple_modules/output.json rename to command/testdata/validate-invalid/multiple_modules/output.json diff --git a/internal/command/testdata/validate-invalid/multiple_providers/main.tf b/command/testdata/validate-invalid/multiple_providers/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/multiple_providers/main.tf rename to command/testdata/validate-invalid/multiple_providers/main.tf diff --git a/internal/command/testdata/validate-invalid/multiple_providers/output.json b/command/testdata/validate-invalid/multiple_providers/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/multiple_providers/output.json rename to command/testdata/validate-invalid/multiple_providers/output.json diff --git a/internal/command/testdata/validate-invalid/multiple_resources/main.tf b/command/testdata/validate-invalid/multiple_resources/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/multiple_resources/main.tf rename to command/testdata/validate-invalid/multiple_resources/main.tf diff --git a/internal/command/testdata/validate-invalid/multiple_resources/output.json b/command/testdata/validate-invalid/multiple_resources/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/multiple_resources/output.json rename to command/testdata/validate-invalid/multiple_resources/output.json diff --git a/internal/command/testdata/validate-invalid/output.json b/command/testdata/validate-invalid/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/output.json rename to command/testdata/validate-invalid/output.json diff --git a/internal/command/testdata/validate-invalid/outputs/main.tf b/command/testdata/validate-invalid/outputs/main.tf similarity index 100% rename from internal/command/testdata/validate-invalid/outputs/main.tf rename to command/testdata/validate-invalid/outputs/main.tf diff --git a/internal/command/testdata/validate-invalid/outputs/output.json b/command/testdata/validate-invalid/outputs/output.json similarity index 100% rename from internal/command/testdata/validate-invalid/outputs/output.json rename to command/testdata/validate-invalid/outputs/output.json diff --git a/internal/command/testdata/validate-valid/main.tf b/command/testdata/validate-valid/main.tf similarity index 100% rename from internal/command/testdata/validate-valid/main.tf rename to command/testdata/validate-valid/main.tf diff --git a/internal/command/testdata/validate-valid/output.json b/command/testdata/validate-valid/output.json similarity index 100% rename from internal/command/testdata/validate-valid/output.json rename to command/testdata/validate-valid/output.json diff --git a/internal/command/testdata/validate-valid/with-tfvars-file/main.tf b/command/testdata/validate-valid/with-tfvars-file/main.tf similarity index 100% rename from internal/command/testdata/validate-valid/with-tfvars-file/main.tf rename to command/testdata/validate-valid/with-tfvars-file/main.tf diff --git a/internal/command/testdata/validate-valid/with-tfvars-file/terraform.tfvars b/command/testdata/validate-valid/with-tfvars-file/terraform.tfvars similarity index 100% rename from internal/command/testdata/validate-valid/with-tfvars-file/terraform.tfvars rename to command/testdata/validate-valid/with-tfvars-file/terraform.tfvars diff --git a/internal/command/testdata/variables/main.tf b/command/testdata/variables/main.tf similarity index 100% rename from internal/command/testdata/variables/main.tf rename to command/testdata/variables/main.tf diff --git a/command/ui_input.go b/command/ui_input.go new file mode 100644 index 000000000000..5492a0ab5cf3 --- /dev/null +++ b/command/ui_input.go @@ -0,0 +1,191 @@ +package command + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "os" + "os/signal" + "strings" + "sync" + "sync/atomic" + "unicode" + + "github.com/bgentry/speakeasy" + "github.com/hashicorp/terraform/terraform" + "github.com/mattn/go-isatty" + "github.com/mitchellh/colorstring" +) + +var defaultInputReader io.Reader +var defaultInputWriter io.Writer +var testInputResponse []string +var testInputResponseMap map[string]string + +// UIInput is an implementation of terraform.UIInput that asks the CLI +// for input stdin. +type UIInput struct { + // Colorize will color the output. + Colorize *colorstring.Colorize + + // Reader and Writer for IO. If these aren't set, they will default to + // Stdin and Stdout respectively. + Reader io.Reader + Writer io.Writer + + listening int32 + result chan string + err chan string + + interrupted bool + l sync.Mutex + once sync.Once +} + +func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { + i.once.Do(i.init) + + r := i.Reader + w := i.Writer + if r == nil { + r = defaultInputReader + } + if w == nil { + w = defaultInputWriter + } + if r == nil { + r = os.Stdin + } + if w == nil { + w = os.Stdout + } + + // Make sure we only ask for input once at a time. Terraform + // should enforce this, but it doesn't hurt to verify. + i.l.Lock() + defer i.l.Unlock() + + // If we're interrupted, then don't ask for input + if i.interrupted { + return "", errors.New("interrupted") + } + + // If we have test results, return those. testInputResponse is the + // "old" way of doing it and we should remove that. + if testInputResponse != nil { + v := testInputResponse[0] + testInputResponse = testInputResponse[1:] + return v, nil + } + + // testInputResponseMap is the new way for test responses, based on + // the query ID. + if testInputResponseMap != nil { + v, ok := testInputResponseMap[opts.Id] + if !ok { + return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) + } + + delete(testInputResponseMap, opts.Id) + return v, nil + } + + log.Printf("[DEBUG] command: asking for input: %q", opts.Query) + + // Listen for interrupts so we can cancel the input ask + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Build the output format for asking + var buf bytes.Buffer + buf.WriteString("[reset]") + buf.WriteString(fmt.Sprintf("[bold]%s[reset]\n", opts.Query)) + if opts.Description != "" { + s := bufio.NewScanner(strings.NewReader(opts.Description)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + buf.WriteString("\n") + } + if opts.Default != "" { + buf.WriteString(" [bold]Default:[reset] ") + buf.WriteString(opts.Default) + buf.WriteString("\n") + } + buf.WriteString(" [bold]Enter a value:[reset] ") + + // Ask the user for their input + if _, err := fmt.Fprint(w, i.Colorize.Color(buf.String())); err != nil { + return "", err + } + + // Listen for the input in a goroutine. This will allow us to + // interrupt this if we are interrupted (SIGINT). + go func() { + if !atomic.CompareAndSwapInt32(&i.listening, 0, 1) { + return // We are already listening for input. + } + defer atomic.CompareAndSwapInt32(&i.listening, 1, 0) + + var line string + var err error + if opts.Secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + buf := bufio.NewReader(r) + line, err = buf.ReadString('\n') + } + if err != nil { + log.Printf("[ERR] UIInput scan err: %s", err) + i.err <- string(err.Error()) + } else { + i.result <- strings.TrimRightFunc(line, unicode.IsSpace) + } + }() + + select { + case err := <-i.err: + return "", errors.New(err) + + case line := <-i.result: + fmt.Fprint(w, "\n") + + if line == "" { + line = opts.Default + } + + return line, nil + case <-ctx.Done(): + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(w) + + return "", ctx.Err() + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(w) + + // Mark that we were interrupted so future Ask calls fail. + i.interrupted = true + + return "", errors.New("interrupted") + } +} + +func (i *UIInput) init() { + i.result = make(chan string) + i.err = make(chan string) + + if i.Colorize == nil { + i.Colorize = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } + } +} diff --git a/command/ui_input_test.go b/command/ui_input_test.go new file mode 100644 index 000000000000..7bda3962d309 --- /dev/null +++ b/command/ui_input_test.go @@ -0,0 +1,119 @@ +package command + +import ( + "bytes" + "context" + "fmt" + "io" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/terraform/terraform" +) + +func TestUIInput_impl(t *testing.T) { + var _ terraform.UIInput = new(UIInput) +} + +func TestUIInputInput(t *testing.T) { + i := &UIInput{ + Reader: bytes.NewBufferString("foo\n"), + Writer: bytes.NewBuffer(nil), + } + + v, err := i.Input(context.Background(), &terraform.InputOpts{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if v != "foo" { + t.Fatalf("unexpected input: %s", v) + } +} + +func TestUIInputInput_canceled(t *testing.T) { + r, w := io.Pipe() + i := &UIInput{ + Reader: r, + Writer: bytes.NewBuffer(nil), + } + + // Make a context that can be canceled. + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + // Cancel the context after 2 seconds. + time.Sleep(2 * time.Second) + cancel() + }() + + // Get input until the context is canceled. + v, err := i.Input(ctx, &terraform.InputOpts{}) + if err != context.Canceled { + t.Fatalf("expected a context.Canceled error, got: %v", err) + } + + // As the context was canceled v should be empty. + if v != "" { + t.Fatalf("unexpected input: %s", v) + } + + // As the context was canceled we should still be listening. + listening := atomic.LoadInt32(&i.listening) + if listening != 1 { + t.Fatalf("expected listening to be 1, got: %d", listening) + } + + go func() { + // Fake input is given after 1 second. + time.Sleep(time.Second) + fmt.Fprint(w, "foo\n") + w.Close() + }() + + v, err = i.Input(context.Background(), &terraform.InputOpts{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if v != "foo" { + t.Fatalf("unexpected input: %s", v) + } +} + +func TestUIInputInput_spaces(t *testing.T) { + i := &UIInput{ + Reader: bytes.NewBufferString("foo bar\n"), + Writer: bytes.NewBuffer(nil), + } + + v, err := i.Input(context.Background(), &terraform.InputOpts{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if v != "foo bar" { + t.Fatalf("unexpected input: %s", v) + } +} + +func TestUIInputInput_Error(t *testing.T) { + i := &UIInput{ + Reader: bytes.NewBuffer(nil), + Writer: bytes.NewBuffer(nil), + } + + v, err := i.Input(context.Background(), &terraform.InputOpts{}) + if err == nil { + t.Fatalf("Error is not 'nil'") + } + + if err.Error() != "EOF" { + t.Fatalf("unexpected error: %v", err) + } + + if v != "" { + t.Fatalf("input must be empty") + } +} diff --git a/internal/command/unlock.go b/command/unlock.go similarity index 95% rename from internal/command/unlock.go rename to command/unlock.go index 1cb915e065a9..f2ce6b144db6 100644 --- a/internal/command/unlock.go +++ b/command/unlock.go @@ -5,10 +5,10 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/states/statemgr" + "github.com/hashicorp/terraform/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) diff --git a/internal/command/unlock_test.go b/command/unlock_test.go similarity index 94% rename from internal/command/unlock_test.go rename to command/unlock_test.go index 9822b38426c7..4ba9a9b53144 100644 --- a/internal/command/unlock_test.go +++ b/command/unlock_test.go @@ -4,10 +4,10 @@ import ( "os" "testing" - "github.com/hashicorp/terraform/internal/backend/remote-state/inmem" + "github.com/hashicorp/terraform/backend/remote-state/inmem" "github.com/mitchellh/cli" - legacy "github.com/hashicorp/terraform/internal/legacy/terraform" + legacy "github.com/hashicorp/terraform/legacy/terraform" ) // Since we can't unlock a local state file, just test that calling unlock diff --git a/internal/command/untaint.go b/command/untaint.go similarity index 94% rename from internal/command/untaint.go rename to command/untaint.go index d02a794cfeab..1860ca0fba8d 100644 --- a/internal/command/untaint.go +++ b/command/untaint.go @@ -4,13 +4,13 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // UntaintCommand is a cli.Command implementation that manually untaints diff --git a/internal/command/untaint_test.go b/command/untaint_test.go similarity index 99% rename from internal/command/untaint_test.go rename to command/untaint_test.go index cc193125e474..e1aa0777107c 100644 --- a/internal/command/untaint_test.go +++ b/command/untaint_test.go @@ -6,8 +6,8 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" "github.com/mitchellh/cli" ) diff --git a/command/validate.go b/command/validate.go new file mode 100644 index 000000000000..fccc201fe325 --- /dev/null +++ b/command/validate.go @@ -0,0 +1,130 @@ +package command + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// ValidateCommand is a Command implementation that validates the terraform files +type ValidateCommand struct { + Meta +} + +func (c *ValidateCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Parse and validate flags + args, diags := arguments.ParseValidate(rawArgs) + if diags.HasErrors() { + c.View.Diagnostics(diags) + c.View.HelpPrompt("validate") + return 1 + } + + view := views.NewValidate(args.ViewType, c.View) + + // After this point, we must only produce JSON output if JSON mode is + // enabled, so all errors should be accumulated into diags and we'll + // print out a suitable result at the end, depending on the format + // selection. All returns from this point on must be tail-calls into + // view.Results in order to produce the expected output. + + dir, err := filepath.Abs(args.Path) + if err != nil { + diags = diags.Append(fmt.Errorf("unable to locate module: %s", err)) + return view.Results(diags) + } + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(fmt.Errorf("error loading plugin path: %s", err)) + return view.Results(diags) + } + + validateDiags := c.validate(dir) + diags = diags.Append(validateDiags) + + // Validating with dev overrides in effect means that the result might + // not be valid for a stable release, so we'll warn about that in case + // the user is trying to use "terraform validate" as a sort of pre-flight + // check before submitting a change. + diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) + + return view.Results(diags) +} + +func (c *ValidateCommand) validate(dir string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + cfg, cfgDiags := c.loadConfig(dir) + diags = diags.Append(cfgDiags) + + if diags.HasErrors() { + return diags + } + + opts, err := c.contextOpts() + if err != nil { + diags = diags.Append(err) + return diags + } + + tfCtx, ctxDiags := terraform.NewContext(opts) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + return diags + } + + validateDiags := tfCtx.Validate(cfg) + diags = diags.Append(validateDiags) + return diags +} + +func (c *ValidateCommand) Synopsis() string { + return "Check whether the configuration is valid" +} + +func (c *ValidateCommand) Help() string { + helpText := ` +Usage: terraform [global options] validate [options] + + Validate the configuration files in a directory, referring only to the + configuration and not accessing any remote services such as remote state, + provider APIs, etc. + + Validate runs checks that verify whether a configuration is syntactically + valid and internally consistent, regardless of any provided variables or + existing state. It is thus primarily useful for general verification of + reusable modules, including correctness of attribute names and value types. + + It is safe to run this command automatically, for example as a post-save + check in a text editor or as a test step for a re-usable module in a CI + system. + + Validation requires an initialized working directory with any referenced + plugins and modules installed. To initialize a working directory for + validation without accessing any configured remote backend, use: + terraform init -backend=false + + To verify configuration in the context of a particular run (a particular + target workspace, input variable values, etc), use the 'terraform plan' + command instead, which includes an implied validation check. + +Options: + + -json Produce output in a machine-readable JSON format, suitable for + use in text editor integrations and other automated systems. + Always disables color. + + -no-color If specified, output won't contain any color. +` + return strings.TrimSpace(helpText) +} diff --git a/command/validate_test.go b/command/validate_test.go new file mode 100644 index 000000000000..6015f48c1ab0 --- /dev/null +++ b/command/validate_test.go @@ -0,0 +1,266 @@ +package command + +import ( + "encoding/json" + "io/ioutil" + "os" + "path" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/terminal" +) + +func setupTest(t *testing.T, fixturepath string, args ...string) (*terminal.TestOutput, int) { + view, done := testView(t) + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + "name": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + } + c := &ValidateCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args = append(args, "-no-color") + args = append(args, testFixturePath(fixturepath)) + + code := c.Run(args) + return done(t), code +} + +func TestValidateCommand(t *testing.T) { + if output, code := setupTest(t, "validate-valid"); code != 0 { + t.Fatalf("unexpected non-successful exit code %d\n\n%s", code, output.Stderr()) + } +} + +func TestValidateCommandWithTfvarsFile(t *testing.T) { + // Create a temporary working directory that is empty because this test + // requires scanning the current working directory by validate command. + td := t.TempDir() + testCopyDir(t, testFixturePath("validate-valid/with-tfvars-file"), td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &ValidateCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad %d\n\n%s", code, output.Stderr()) + } +} + +func TestValidateFailingCommand(t *testing.T) { + if output, code := setupTest(t, "validate-invalid"); code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } +} + +func TestValidateFailingCommandMissingQuote(t *testing.T) { + output, code := setupTest(t, "validate-invalid/missing_quote") + + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := "Error: Invalid reference" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestValidateFailingCommandMissingVariable(t *testing.T) { + output, code := setupTest(t, "validate-invalid/missing_var") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := "Error: Reference to undeclared input variable" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestSameProviderMutipleTimesShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/multiple_providers") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := "Error: Duplicate provider configuration" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestSameModuleMultipleTimesShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/multiple_modules") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := "Error: Duplicate module call" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestSameResourceMultipleTimesShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/multiple_resources") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := `Error: Duplicate resource "aws_instance" configuration` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestOutputWithoutValueShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/outputs") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := `The argument "value" is required, but no definition was found.` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } + wantError = `An argument named "values" is not expected here. Did you mean "value"?` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestModuleWithIncorrectNameShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/incorrectmodulename") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + + wantError := `Error: Invalid module instance name` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } + wantError = `Error: Variables not allowed` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestWronglyUsedInterpolationShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/interpolation") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + + wantError := `Error: Variables not allowed` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } + wantError = `A single static variable reference is required` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestMissingDefinedVar(t *testing.T) { + output, code := setupTest(t, "validate-invalid/missing_defined_var") + // This is allowed because validate tests only that variables are referenced + // correctly, not that they all have defined values. + if code != 0 { + t.Fatalf("Should have passed: %d\n\n%s", code, output.Stderr()) + } +} + +func TestValidate_json(t *testing.T) { + tests := []struct { + path string + valid bool + }{ + {"validate-valid", true}, + {"validate-invalid", false}, + {"validate-invalid/missing_quote", false}, + {"validate-invalid/missing_var", false}, + {"validate-invalid/multiple_providers", false}, + {"validate-invalid/multiple_modules", false}, + {"validate-invalid/multiple_resources", false}, + {"validate-invalid/outputs", false}, + {"validate-invalid/incorrectmodulename", false}, + {"validate-invalid/interpolation", false}, + {"validate-invalid/missing_defined_var", true}, + } + + for _, tc := range tests { + t.Run(tc.path, func(t *testing.T) { + var want, got map[string]interface{} + + wantFile, err := os.Open(path.Join(testFixturePath(tc.path), "output.json")) + if err != nil { + t.Fatalf("failed to open output file: %s", err) + } + defer wantFile.Close() + wantBytes, err := ioutil.ReadAll(wantFile) + if err != nil { + t.Fatalf("failed to read output file: %s", err) + } + err = json.Unmarshal([]byte(wantBytes), &want) + if err != nil { + t.Fatalf("failed to unmarshal expected JSON: %s", err) + } + + output, code := setupTest(t, tc.path, "-json") + + gotString := output.Stdout() + err = json.Unmarshal([]byte(gotString), &got) + if err != nil { + t.Fatalf("failed to unmarshal actual JSON: %s", err) + } + + if !cmp.Equal(got, want) { + t.Errorf("wrong output:\n %v\n", cmp.Diff(got, want)) + t.Errorf("raw output:\n%s\n", gotString) + } + + if tc.valid && code != 0 { + t.Errorf("wrong exit code: want 0, got %d", code) + } else if !tc.valid && code != 1 { + t.Errorf("wrong exit code: want 1, got %d", code) + } + + if errorOutput := output.Stderr(); errorOutput != "" { + t.Errorf("unexpected error output:\n%s", errorOutput) + } + }) + } +} diff --git a/internal/command/version.go b/command/version.go similarity index 96% rename from internal/command/version.go rename to command/version.go index 7fef59202daa..b940b4b26356 100644 --- a/internal/command/version.go +++ b/command/version.go @@ -7,9 +7,9 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" ) // VersionCommand is a Command implementation prints the version. diff --git a/command/version_test.go b/command/version_test.go new file mode 100644 index 000000000000..b9b153cbb40e --- /dev/null +++ b/command/version_test.go @@ -0,0 +1,228 @@ +package command + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" + "github.com/mitchellh/cli" +) + +func TestVersionCommand_implements(t *testing.T) { + var _ cli.Command = &VersionCommand{} +} + +func TestVersion(t *testing.T) { + td := t.TempDir() + defer testChdir(t, td)() + + // We'll create a fixed dependency lock file in our working directory + // so we can verify that the version command shows the information + // from it. + locks := depsfile.NewLocks() + locks.SetProvider( + addrs.NewDefaultProvider("test2"), + getproviders.MustParseVersion("1.2.3"), + nil, + nil, + ) + locks.SetProvider( + addrs.NewDefaultProvider("test1"), + getproviders.MustParseVersion("7.8.9-beta.2"), + nil, + nil, + ) + + ui := cli.NewMockUi() + c := &VersionCommand{ + Meta: Meta{ + Ui: ui, + }, + Version: "4.5.6", + VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + if err := c.replaceLockedDependencies(locks); err != nil { + t.Fatal(err) + } + if code := c.Run([]string{}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "Terraform v4.5.6-foo\non aros_riscv64\n+ provider registry.terraform.io/hashicorp/test1 v7.8.9-beta.2\n+ provider registry.terraform.io/hashicorp/test2 v1.2.3" + if actual != expected { + t.Fatalf("wrong output\ngot:\n%s\nwant:\n%s", actual, expected) + } + +} + +func TestVersion_flags(t *testing.T) { + ui := new(cli.MockUi) + m := Meta{ + Ui: ui, + } + + // `terraform version` + c := &VersionCommand{ + Meta: m, + Version: "4.5.6", + VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + + if code := c.Run([]string{"-v", "-version"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "Terraform v4.5.6-foo\non aros_riscv64" + if actual != expected { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestVersion_outdated(t *testing.T) { + ui := new(cli.MockUi) + m := Meta{ + Ui: ui, + } + + c := &VersionCommand{ + Meta: m, + Version: "4.5.6", + CheckFunc: mockVersionCheckFunc(true, "4.5.7"), + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + + if code := c.Run([]string{}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "Terraform v4.5.6\non aros_riscv64\n\nYour version of Terraform is out of date! The latest version\nis 4.5.7. You can update by downloading from https://www.terraform.io/downloads.html" + if actual != expected { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestVersion_json(t *testing.T) { + td := t.TempDir() + defer testChdir(t, td)() + + ui := cli.NewMockUi() + meta := Meta{ + Ui: ui, + } + + // `terraform version -json` without prerelease + c := &VersionCommand{ + Meta: meta, + Version: "4.5.6", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + if code := c.Run([]string{"-json"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := strings.TrimSpace(` +{ + "terraform_version": "4.5.6", + "platform": "aros_riscv64", + "provider_selections": {}, + "terraform_outdated": false +} +`) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong output\n%s", diff) + } + + // flush the output from the mock ui + ui.OutputWriter.Reset() + + // Now we'll create a fixed dependency lock file in our working directory + // so we can verify that the version command shows the information + // from it. + locks := depsfile.NewLocks() + locks.SetProvider( + addrs.NewDefaultProvider("test2"), + getproviders.MustParseVersion("1.2.3"), + nil, + nil, + ) + locks.SetProvider( + addrs.NewDefaultProvider("test1"), + getproviders.MustParseVersion("7.8.9-beta.2"), + nil, + nil, + ) + + // `terraform version -json` with prerelease and provider dependencies + c = &VersionCommand{ + Meta: meta, + Version: "4.5.6", + VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + if err := c.replaceLockedDependencies(locks); err != nil { + t.Fatal(err) + } + if code := c.Run([]string{"-json"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual = strings.TrimSpace(ui.OutputWriter.String()) + expected = strings.TrimSpace(` +{ + "terraform_version": "4.5.6-foo", + "platform": "aros_riscv64", + "provider_selections": { + "registry.terraform.io/hashicorp/test1": "7.8.9-beta.2", + "registry.terraform.io/hashicorp/test2": "1.2.3" + }, + "terraform_outdated": false +} +`) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong output\n%s", diff) + } + +} + +func TestVersion_jsonoutdated(t *testing.T) { + ui := new(cli.MockUi) + m := Meta{ + Ui: ui, + } + + c := &VersionCommand{ + Meta: m, + Version: "4.5.6", + CheckFunc: mockVersionCheckFunc(true, "4.5.7"), + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + + if code := c.Run([]string{"-json"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "{\n \"terraform_version\": \"4.5.6\",\n \"platform\": \"aros_riscv64\",\n \"provider_selections\": {},\n \"terraform_outdated\": true\n}" + if actual != expected { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func mockVersionCheckFunc(outdated bool, latest string) VersionCheckFunc { + return func() (VersionCheckInfo, error) { + return VersionCheckInfo{ + Outdated: outdated, + Latest: latest, + // Alerts is not used by version command + }, nil + } +} diff --git a/command/views/apply.go b/command/views/apply.go new file mode 100644 index 000000000000..988924f99948 --- /dev/null +++ b/command/views/apply.go @@ -0,0 +1,162 @@ +package views + +import ( + "fmt" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// The Apply view is used for the apply command. +type Apply interface { + ResourceCount(stateOutPath string) + Outputs(outputValues map[string]*states.OutputValue) + + Operation() Operation + Hooks() []terraform.Hook + + Diagnostics(diags tfdiags.Diagnostics) + HelpPrompt() +} + +// NewApply returns an initialized Apply implementation for the given ViewType. +func NewApply(vt arguments.ViewType, destroy bool, view *View) Apply { + switch vt { + case arguments.ViewJSON: + return &ApplyJSON{ + view: NewJSONView(view), + destroy: destroy, + countHook: &countHook{}, + } + case arguments.ViewHuman: + return &ApplyHuman{ + view: view, + destroy: destroy, + inAutomation: view.RunningInAutomation(), + countHook: &countHook{}, + } + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The ApplyHuman implementation renders human-readable text logs, suitable for +// a scrolling terminal. +type ApplyHuman struct { + view *View + + destroy bool + inAutomation bool + + countHook *countHook +} + +var _ Apply = (*ApplyHuman)(nil) + +func (v *ApplyHuman) ResourceCount(stateOutPath string) { + if v.destroy { + v.view.streams.Printf( + v.view.colorize.Color("[reset][bold][green]\nDestroy complete! Resources: %d destroyed.\n"), + v.countHook.Removed, + ) + } else { + v.view.streams.Printf( + v.view.colorize.Color("[reset][bold][green]\nApply complete! Resources: %d added, %d changed, %d destroyed.\n"), + v.countHook.Added, + v.countHook.Changed, + v.countHook.Removed, + ) + } + if (v.countHook.Added > 0 || v.countHook.Changed > 0) && stateOutPath != "" { + v.view.streams.Printf("\n%s\n\n", format.WordWrap(stateOutPathPostApply, v.view.outputColumns())) + v.view.streams.Printf("State path: %s\n", stateOutPath) + } +} + +func (v *ApplyHuman) Outputs(outputValues map[string]*states.OutputValue) { + if len(outputValues) > 0 { + v.view.streams.Print(v.view.colorize.Color("[reset][bold][green]\nOutputs:\n\n")) + NewOutput(arguments.ViewHuman, v.view).Output("", outputValues) + } +} + +func (v *ApplyHuman) Operation() Operation { + return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) +} + +func (v *ApplyHuman) Hooks() []terraform.Hook { + return []terraform.Hook{ + v.countHook, + NewUiHook(v.view), + } +} + +func (v *ApplyHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *ApplyHuman) HelpPrompt() { + command := "apply" + if v.destroy { + command = "destroy" + } + v.view.HelpPrompt(command) +} + +const stateOutPathPostApply = "The state of your infrastructure has been saved to the path below. This state is required to modify and destroy your infrastructure, so keep it safe. To inspect the complete state use the `terraform show` command." + +// The ApplyJSON implementation renders streaming JSON logs, suitable for +// integrating with other software. +type ApplyJSON struct { + view *JSONView + + destroy bool + + countHook *countHook +} + +var _ Apply = (*ApplyJSON)(nil) + +func (v *ApplyJSON) ResourceCount(stateOutPath string) { + operation := json.OperationApplied + if v.destroy { + operation = json.OperationDestroyed + } + v.view.ChangeSummary(&json.ChangeSummary{ + Add: v.countHook.Added, + Change: v.countHook.Changed, + Remove: v.countHook.Removed, + Operation: operation, + }) +} + +func (v *ApplyJSON) Outputs(outputValues map[string]*states.OutputValue) { + outputs, diags := json.OutputsFromMap(outputValues) + if diags.HasErrors() { + v.Diagnostics(diags) + } else { + v.view.Outputs(outputs) + } +} + +func (v *ApplyJSON) Operation() Operation { + return &OperationJSON{view: v.view} +} + +func (v *ApplyJSON) Hooks() []terraform.Hook { + return []terraform.Hook{ + v.countHook, + newJSONHook(v.view), + } +} + +func (v *ApplyJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *ApplyJSON) HelpPrompt() { +} diff --git a/command/views/apply_test.go b/command/views/apply_test.go new file mode 100644 index 000000000000..f4b9265bd066 --- /dev/null +++ b/command/views/apply_test.go @@ -0,0 +1,255 @@ +package views + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/zclconf/go-cty/cty" +) + +// This test is mostly because I am paranoid about having two consecutive +// boolean arguments. +func TestApply_new(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams).SetRunningInAutomation(true)) + hv, ok := v.(*ApplyHuman) + if !ok { + t.Fatalf("unexpected return type %t", v) + } + + if hv.destroy != false { + t.Fatalf("unexpected destroy value") + } + + if hv.inAutomation != true { + t.Fatalf("unexpected inAutomation value") + } +} + +// Basic test coverage of Outputs, since most of its functionality is tested +// elsewhere. +func TestApplyHuman_outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{ + "foo": {Value: cty.StringVal("secret")}, + }) + + got := done(t).Stdout() + for _, want := range []string{"Outputs:", `foo = "secret"`} { + if !strings.Contains(got, want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + } +} + +// Outputs should do nothing if there are no outputs to render. +func TestApplyHuman_outputsEmpty(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{}) + + got := done(t).Stdout() + if got != "" { + t.Errorf("output should be empty, but got: %q", got) + } +} + +// Ensure that the correct view type and in-automation settings propagate to the +// Operation view. +func TestApplyHuman_operation(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams).SetRunningInAutomation(true)).Operation() + if hv, ok := v.(*OperationHuman); !ok { + t.Fatalf("unexpected return type %t", v) + } else if hv.inAutomation != true { + t.Fatalf("unexpected inAutomation value on Operation view") + } +} + +// This view is used for both apply and destroy commands, so the help output +// needs to cover both. +func TestApplyHuman_help(t *testing.T) { + testCases := map[string]bool{ + "apply": false, + "destroy": true, + } + + for name, destroy := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewHuman, destroy, NewView(streams)) + v.HelpPrompt() + got := done(t).Stderr() + if !strings.Contains(got, name) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, name) + } + }) + } +} + +// Hooks and ResourceCount are tangled up and easiest to test together. +func TestApply_resourceCount(t *testing.T) { + testCases := map[string]struct { + destroy bool + want string + }{ + "apply": { + false, + "Apply complete! Resources: 1 added, 2 changed, 3 destroyed.", + }, + "destroy": { + true, + "Destroy complete! Resources: 3 destroyed.", + }, + } + + // For compatibility reasons, these tests should hold true for both human + // and JSON output modes + views := []arguments.ViewType{arguments.ViewHuman, arguments.ViewJSON} + + for name, tc := range testCases { + for _, viewType := range views { + t.Run(fmt.Sprintf("%s (%s view)", name, viewType), func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(viewType, tc.destroy, NewView(streams)) + hooks := v.Hooks() + + var count *countHook + for _, hook := range hooks { + if ch, ok := hook.(*countHook); ok { + count = ch + } + } + if count == nil { + t.Fatalf("expected Hooks to include a countHook: %#v", hooks) + } + + count.Added = 1 + count.Changed = 2 + count.Removed = 3 + + v.ResourceCount("") + + got := done(t).Stdout() + if !strings.Contains(got, tc.want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, tc.want) + } + }) + } + } +} + +func TestApplyHuman_resourceCountStatePath(t *testing.T) { + testCases := map[string]struct { + added int + changed int + removed int + statePath string + wantContains bool + }{ + "default state path": { + added: 1, + changed: 2, + removed: 3, + statePath: "", + wantContains: false, + }, + "only removed": { + added: 0, + changed: 0, + removed: 5, + statePath: "foo.tfstate", + wantContains: false, + }, + "added": { + added: 5, + changed: 0, + removed: 0, + statePath: "foo.tfstate", + wantContains: true, + }, + "changed": { + added: 0, + changed: 5, + removed: 0, + statePath: "foo.tfstate", + wantContains: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams)) + hooks := v.Hooks() + + var count *countHook + for _, hook := range hooks { + if ch, ok := hook.(*countHook); ok { + count = ch + } + } + if count == nil { + t.Fatalf("expected Hooks to include a countHook: %#v", hooks) + } + + count.Added = tc.added + count.Changed = tc.changed + count.Removed = tc.removed + + v.ResourceCount(tc.statePath) + + got := done(t).Stdout() + want := "State path: " + tc.statePath + contains := strings.Contains(got, want) + if contains && !tc.wantContains { + t.Errorf("wrong result\ngot: %q\nshould not contain: %q", got, want) + } else if !contains && tc.wantContains { + t.Errorf("wrong result\ngot: %q\nshould contain: %q", got, want) + } + }) + } +} + +// Basic test coverage of Outputs, since most of its functionality is tested +// elsewhere. +func TestApplyJSON_outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewJSON, false, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{ + "boop_count": {Value: cty.NumberIntVal(92)}, + "password": {Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), Sensitive: true}, + }) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Outputs: 2", + "@module": "terraform.ui", + "type": "outputs", + "outputs": map[string]interface{}{ + "boop_count": map[string]interface{}{ + "sensitive": false, + "value": float64(92), + "type": "number", + }, + "password": map[string]interface{}{ + "sensitive": true, + "type": "string", + }, + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} diff --git a/internal/command/views/hook_count.go b/command/views/hook_count.go similarity index 91% rename from internal/command/views/hook_count.go rename to command/views/hook_count.go index 054c9da38d42..25e1a5e7ddf1 100644 --- a/internal/command/views/hook_count.go +++ b/command/views/hook_count.go @@ -5,10 +5,10 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" ) // countHook is a hook that counts the number of resources diff --git a/internal/command/views/hook_count_test.go b/command/views/hook_count_test.go similarity index 96% rename from internal/command/views/hook_count_test.go rename to command/views/hook_count_test.go index 3cf51d4dd42d..b5f670736c65 100644 --- a/internal/command/views/hook_count_test.go +++ b/command/views/hook_count_test.go @@ -6,12 +6,12 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" - legacy "github.com/hashicorp/terraform/internal/legacy/terraform" + legacy "github.com/hashicorp/terraform/legacy/terraform" ) func TestCountHook_impl(t *testing.T) { diff --git a/internal/command/views/hook_json.go b/command/views/hook_json.go similarity index 93% rename from internal/command/views/hook_json.go rename to command/views/hook_json.go index 38e24de39d6c..18ff86ada86f 100644 --- a/internal/command/views/hook_json.go +++ b/command/views/hook_json.go @@ -9,12 +9,12 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" ) // How long to wait between sending heartbeat/progress messages diff --git a/internal/command/views/hook_json_test.go b/command/views/hook_json_test.go similarity index 97% rename from internal/command/views/hook_json_test.go rename to command/views/hook_json_test.go index cb1cbc920bd7..cd451559f825 100644 --- a/internal/command/views/hook_json_test.go +++ b/command/views/hook_json_test.go @@ -6,11 +6,11 @@ import ( "testing" "time" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/command/views/hook_ui.go b/command/views/hook_ui.go similarity index 96% rename from internal/command/views/hook_ui.go rename to command/views/hook_ui.go index 2c5c0f5704fc..c10b7878f485 100644 --- a/internal/command/views/hook_ui.go +++ b/command/views/hook_ui.go @@ -11,12 +11,12 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" ) const defaultPeriodicUiTimer = 10 * time.Second diff --git a/internal/command/views/hook_ui_test.go b/command/views/hook_ui_test.go similarity index 97% rename from internal/command/views/hook_ui_test.go rename to command/views/hook_ui_test.go index bbde3b686643..1720e2af7501 100644 --- a/internal/command/views/hook_ui_test.go +++ b/command/views/hook_ui_test.go @@ -10,13 +10,13 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" ) // Test the PreApply hook for creating a new resource diff --git a/command/views/json/change.go b/command/views/json/change.go new file mode 100644 index 000000000000..128c81f8a82c --- /dev/null +++ b/command/views/json/change.go @@ -0,0 +1,122 @@ +package json + +import ( + "fmt" + + "github.com/hashicorp/terraform/plans" +) + +func NewResourceInstanceChange(change *plans.ResourceInstanceChangeSrc) *ResourceInstanceChange { + c := &ResourceInstanceChange{ + Resource: newResourceAddr(change.Addr), + Action: changeAction(change.Action), + Reason: changeReason(change.ActionReason), + } + if !change.Addr.Equal(change.PrevRunAddr) { + if c.Action == ActionNoOp { + c.Action = ActionMove + } + pr := newResourceAddr(change.PrevRunAddr) + c.PreviousResource = &pr + } + + return c +} + +type ResourceInstanceChange struct { + Resource ResourceAddr `json:"resource"` + PreviousResource *ResourceAddr `json:"previous_resource,omitempty"` + Action ChangeAction `json:"action"` + Reason ChangeReason `json:"reason,omitempty"` +} + +func (c *ResourceInstanceChange) String() string { + return fmt.Sprintf("%s: Plan to %s", c.Resource.Addr, c.Action) +} + +type ChangeAction string + +const ( + ActionNoOp ChangeAction = "noop" + ActionMove ChangeAction = "move" + ActionCreate ChangeAction = "create" + ActionRead ChangeAction = "read" + ActionUpdate ChangeAction = "update" + ActionReplace ChangeAction = "replace" + ActionDelete ChangeAction = "delete" +) + +func changeAction(action plans.Action) ChangeAction { + switch action { + case plans.NoOp: + return ActionNoOp + case plans.Create: + return ActionCreate + case plans.Read: + return ActionRead + case plans.Update: + return ActionUpdate + case plans.DeleteThenCreate, plans.CreateThenDelete: + return ActionReplace + case plans.Delete: + return ActionDelete + default: + return ActionNoOp + } +} + +type ChangeReason string + +const ( + ReasonNone ChangeReason = "" + ReasonTainted ChangeReason = "tainted" + ReasonRequested ChangeReason = "requested" + ReasonReplaceTriggeredBy ChangeReason = "replace_triggered_by" + ReasonCannotUpdate ChangeReason = "cannot_update" + ReasonUnknown ChangeReason = "unknown" + + ReasonDeleteBecauseNoResourceConfig ChangeReason = "delete_because_no_resource_config" + ReasonDeleteBecauseWrongRepetition ChangeReason = "delete_because_wrong_repetition" + ReasonDeleteBecauseCountIndex ChangeReason = "delete_because_count_index" + ReasonDeleteBecauseEachKey ChangeReason = "delete_because_each_key" + ReasonDeleteBecauseNoModule ChangeReason = "delete_because_no_module" + ReasonDeleteBecauseNoMoveTarget ChangeReason = "delete_because_no_move_target" + ReasonReadBecauseConfigUnknown ChangeReason = "read_because_config_unknown" + ReasonReadBecauseDependencyPending ChangeReason = "read_because_dependency_pending" +) + +func changeReason(reason plans.ResourceInstanceChangeActionReason) ChangeReason { + switch reason { + case plans.ResourceInstanceChangeNoReason: + return ReasonNone + case plans.ResourceInstanceReplaceBecauseTainted: + return ReasonTainted + case plans.ResourceInstanceReplaceByRequest: + return ReasonRequested + case plans.ResourceInstanceReplaceBecauseCannotUpdate: + return ReasonCannotUpdate + case plans.ResourceInstanceReplaceByTriggers: + return ReasonReplaceTriggeredBy + case plans.ResourceInstanceDeleteBecauseNoResourceConfig: + return ReasonDeleteBecauseNoResourceConfig + case plans.ResourceInstanceDeleteBecauseWrongRepetition: + return ReasonDeleteBecauseWrongRepetition + case plans.ResourceInstanceDeleteBecauseCountIndex: + return ReasonDeleteBecauseCountIndex + case plans.ResourceInstanceDeleteBecauseEachKey: + return ReasonDeleteBecauseEachKey + case plans.ResourceInstanceDeleteBecauseNoModule: + return ReasonDeleteBecauseNoModule + case plans.ResourceInstanceReadBecauseConfigUnknown: + return ReasonReadBecauseConfigUnknown + case plans.ResourceInstanceDeleteBecauseNoMoveTarget: + return ReasonDeleteBecauseNoMoveTarget + case plans.ResourceInstanceReadBecauseDependencyPending: + return ReasonReadBecauseDependencyPending + default: + // This should never happen, but there's no good way to guarantee + // exhaustive handling of the enum, so a generic fall back is better + // than a misleading result or a panic + return ReasonUnknown + } +} diff --git a/internal/command/views/json/change_summary.go b/command/views/json/change_summary.go similarity index 100% rename from internal/command/views/json/change_summary.go rename to command/views/json/change_summary.go diff --git a/command/views/json/diagnostic.go b/command/views/json/diagnostic.go new file mode 100644 index 000000000000..551def16ba6a --- /dev/null +++ b/command/views/json/diagnostic.go @@ -0,0 +1,490 @@ +package json + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcled" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// These severities map to the tfdiags.Severity values, plus an explicit +// unknown in case that enum grows without us noticing here. +const ( + DiagnosticSeverityUnknown = "unknown" + DiagnosticSeverityError = "error" + DiagnosticSeverityWarning = "warning" +) + +// Diagnostic represents any tfdiags.Diagnostic value. The simplest form has +// just a severity, single line summary, and optional detail. If there is more +// information about the source of the diagnostic, this is represented in the +// range field. +type Diagnostic struct { + Severity string `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail"` + Address string `json:"address,omitempty"` + Range *DiagnosticRange `json:"range,omitempty"` + Snippet *DiagnosticSnippet `json:"snippet,omitempty"` +} + +// Pos represents a position in the source code. +type Pos struct { + // Line is a one-based count for the line in the indicated file. + Line int `json:"line"` + + // Column is a one-based count of Unicode characters from the start of the line. + Column int `json:"column"` + + // Byte is a zero-based offset into the indicated file. + Byte int `json:"byte"` +} + +// DiagnosticRange represents the filename and position of the diagnostic +// subject. This defines the range of the source to be highlighted in the +// output. Note that the snippet may include additional surrounding source code +// if the diagnostic has a context range. +// +// The Start position is inclusive, and the End position is exclusive. Exact +// positions are intended for highlighting for human interpretation only and +// are subject to change. +type DiagnosticRange struct { + Filename string `json:"filename"` + Start Pos `json:"start"` + End Pos `json:"end"` +} + +// DiagnosticSnippet represents source code information about the diagnostic. +// It is possible for a diagnostic to have a source (and therefore a range) but +// no source code can be found. In this case, the range field will be present and +// the snippet field will not. +type DiagnosticSnippet struct { + // Context is derived from HCL's hcled.ContextString output. This gives a + // high-level summary of the root context of the diagnostic: for example, + // the resource block in which an expression causes an error. + Context *string `json:"context"` + + // Code is a possibly-multi-line string of Terraform configuration, which + // includes both the diagnostic source and any relevant context as defined + // by the diagnostic. + Code string `json:"code"` + + // StartLine is the line number in the source file for the first line of + // the snippet code block. This is not necessarily the same as the value of + // Range.Start.Line, as it is possible to have zero or more lines of + // context source code before the diagnostic range starts. + StartLine int `json:"start_line"` + + // HighlightStartOffset is the character offset into Code at which the + // diagnostic source range starts, which ought to be highlighted as such by + // the consumer of this data. + HighlightStartOffset int `json:"highlight_start_offset"` + + // HighlightEndOffset is the character offset into Code at which the + // diagnostic source range ends. + HighlightEndOffset int `json:"highlight_end_offset"` + + // Values is a sorted slice of expression values which may be useful in + // understanding the source of an error in a complex expression. + Values []DiagnosticExpressionValue `json:"values"` + + // FunctionCall is information about a function call whose failure is + // being reported by this diagnostic, if any. + FunctionCall *DiagnosticFunctionCall `json:"function_call,omitempty"` +} + +// DiagnosticExpressionValue represents an HCL traversal string (e.g. +// "var.foo") and a statement about its value while the expression was +// evaluated (e.g. "is a string", "will be known only after apply"). These are +// intended to help the consumer diagnose why an expression caused a diagnostic +// to be emitted. +type DiagnosticExpressionValue struct { + Traversal string `json:"traversal"` + Statement string `json:"statement"` +} + +// DiagnosticFunctionCall represents a function call whose information is +// being included as part of a diagnostic snippet. +type DiagnosticFunctionCall struct { + // CalledAs is the full name that was used to call this function, + // potentially including namespace prefixes if the function does not belong + // to the default function namespace. + CalledAs string `json:"called_as"` + + // Signature is a description of the signature of the function that was + // called, if any. Might be omitted if we're reporting that a call failed + // because the given function name isn't known, for example. + Signature *Function `json:"signature,omitempty"` +} + +// NewDiagnostic takes a tfdiags.Diagnostic and a map of configuration sources, +// and returns a Diagnostic struct. +func NewDiagnostic(diag tfdiags.Diagnostic, sources map[string][]byte) *Diagnostic { + var sev string + switch diag.Severity() { + case tfdiags.Error: + sev = DiagnosticSeverityError + case tfdiags.Warning: + sev = DiagnosticSeverityWarning + default: + sev = DiagnosticSeverityUnknown + } + + desc := diag.Description() + + diagnostic := &Diagnostic{ + Severity: sev, + Summary: desc.Summary, + Detail: desc.Detail, + Address: desc.Address, + } + + sourceRefs := diag.Source() + if sourceRefs.Subject != nil { + // We'll borrow HCL's range implementation here, because it has some + // handy features to help us produce a nice source code snippet. + highlightRange := sourceRefs.Subject.ToHCL() + + // Some diagnostic sources fail to set the end of the subject range. + if highlightRange.End == (hcl.Pos{}) { + highlightRange.End = highlightRange.Start + } + + snippetRange := highlightRange + if sourceRefs.Context != nil { + snippetRange = sourceRefs.Context.ToHCL() + } + + // Make sure the snippet includes the highlight. This should be true + // for any reasonable diagnostic, but we'll make sure. + snippetRange = hcl.RangeOver(snippetRange, highlightRange) + + // Empty ranges result in odd diagnostic output, so extend the end to + // ensure there's at least one byte in the snippet or highlight. + if snippetRange.Empty() { + snippetRange.End.Byte++ + snippetRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + diagnostic.Range = &DiagnosticRange{ + Filename: highlightRange.Filename, + Start: Pos{ + Line: highlightRange.Start.Line, + Column: highlightRange.Start.Column, + Byte: highlightRange.Start.Byte, + }, + End: Pos{ + Line: highlightRange.End.Line, + Column: highlightRange.End.Column, + Byte: highlightRange.End.Byte, + }, + } + + var src []byte + if sources != nil { + src = sources[highlightRange.Filename] + } + + // If we have a source file for the diagnostic, we can emit a code + // snippet. + if src != nil { + diagnostic.Snippet = &DiagnosticSnippet{ + StartLine: snippetRange.Start.Line, + + // Ensure that the default Values struct is an empty array, as this + // makes consuming the JSON structure easier in most languages. + Values: []DiagnosticExpressionValue{}, + } + + file, offset := parseRange(src, highlightRange) + + // Some diagnostics may have a useful top-level context to add to + // the code snippet output. + contextStr := hcled.ContextString(file, offset-1) + if contextStr != "" { + diagnostic.Snippet.Context = &contextStr + } + + // Build the string of the code snippet, tracking at which byte of + // the file the snippet starts. + var codeStartByte int + sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) + var code strings.Builder + for sc.Scan() { + lineRange := sc.Range() + if lineRange.Overlaps(snippetRange) { + if codeStartByte == 0 && code.Len() == 0 { + codeStartByte = lineRange.Start.Byte + } + code.Write(lineRange.SliceBytes(src)) + code.WriteRune('\n') + } + } + codeStr := strings.TrimSuffix(code.String(), "\n") + diagnostic.Snippet.Code = codeStr + + // Calculate the start and end byte of the highlight range relative + // to the code snippet string. + start := highlightRange.Start.Byte - codeStartByte + end := start + (highlightRange.End.Byte - highlightRange.Start.Byte) + + // We can end up with some quirky results here in edge cases like + // when a source range starts or ends at a newline character, + // so we'll cap the results at the bounds of the highlight range + // so that consumers of this data don't need to contend with + // out-of-bounds errors themselves. + if start < 0 { + start = 0 + } else if start > len(codeStr) { + start = len(codeStr) + } + if end < 0 { + end = 0 + } else if end > len(codeStr) { + end = len(codeStr) + } + + diagnostic.Snippet.HighlightStartOffset = start + diagnostic.Snippet.HighlightEndOffset = end + + if fromExpr := diag.FromExpr(); fromExpr != nil { + // We may also be able to generate information about the dynamic + // values of relevant variables at the point of evaluation, then. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + expr := fromExpr.Expression + ctx := fromExpr.EvalContext + vars := expr.Variables() + values := make([]DiagnosticExpressionValue, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + includeUnknown := tfdiags.DiagnosticCausedByUnknown(diag) + includeSensitive := tfdiags.DiagnosticCausedBySensitive(diag) + Traversals: + for _, traversal := range vars { + for len(traversal) > 1 { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + traversal = traversal[:len(traversal)-1] + continue + } + + traversalStr := traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue Traversals // don't show duplicates when the same variable is referenced multiple times + } + value := DiagnosticExpressionValue{ + Traversal: traversalStr, + } + switch { + case val.HasMark(marks.Sensitive): + // We only mention a sensitive value if the diagnostic + // we're rendering is explicitly marked as being + // caused by sensitive values, because otherwise + // readers tend to be misled into thinking the error + // is caused by the sensitive value even when it isn't. + if !includeSensitive { + continue Traversals + } + // Even when we do mention one, we keep it vague + // in order to minimize the chance of giving away + // whatever was sensitive about it. + value.Statement = "has a sensitive value" + case !val.IsKnown(): + // We'll avoid saying anything about unknown or + // "known after apply" unless the diagnostic is + // explicitly marked as being caused by unknown + // values, because otherwise readers tend to be + // misled into thinking the error is caused by the + // unknown value even when it isn't. + if ty := val.Type(); ty != cty.DynamicPseudoType { + if includeUnknown { + value.Statement = fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName()) + } else { + value.Statement = fmt.Sprintf("is a %s", ty.FriendlyName()) + } + } else { + if !includeUnknown { + continue Traversals + } + value.Statement = "will be known only after apply" + } + default: + value.Statement = fmt.Sprintf("is %s", compactValueStr(val)) + } + values = append(values, value) + seen[traversalStr] = struct{}{} + } + } + sort.Slice(values, func(i, j int) bool { + return values[i].Traversal < values[j].Traversal + }) + diagnostic.Snippet.Values = values + + if callInfo := tfdiags.ExtraInfo[hclsyntax.FunctionCallDiagExtra](diag); callInfo != nil && callInfo.CalledFunctionName() != "" { + calledAs := callInfo.CalledFunctionName() + baseName := calledAs + if idx := strings.LastIndex(baseName, "::"); idx >= 0 { + baseName = baseName[idx+2:] + } + callInfo := &DiagnosticFunctionCall{ + CalledAs: calledAs, + } + if f, ok := ctx.Functions[calledAs]; ok { + callInfo.Signature = DescribeFunction(baseName, f) + } + diagnostic.Snippet.FunctionCall = callInfo + } + + } + + } + } + + return diagnostic +} + +func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { + filename := rng.Filename + offset := rng.Start.Byte + + // We need to re-parse here to get a *hcl.File we can interrogate. This + // is not awesome since we presumably already parsed the file earlier too, + // but this re-parsing is architecturally simpler than retaining all of + // the hcl.File objects and we only do this in the case of an error anyway + // so the overhead here is not a big problem. + parser := hclparse.NewParser() + var file *hcl.File + + // Ignore diagnostics here as there is nothing we can do with them. + if strings.HasSuffix(filename, ".json") { + file, _ = parser.ParseJSON(src, filename) + } else { + file, _ = parser.ParseHCL(src, filename) + } + + return file, offset +} + +// compactValueStr produces a compact, single-line summary of a given value +// that is suitable for display in the UI. +// +// For primitives it returns a full representation, while for more complex +// types it instead summarizes the type, size, etc to produce something +// that is hopefully still somewhat useful but not as verbose as a rendering +// of the entire data structure. +func compactValueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + if val.HasMark(marks.Sensitive) { + // We check this in here just to make sure, but note that the caller + // of compactValueStr ought to have already checked this and skipped + // calling into compactValueStr anyway, so this shouldn't actually + // be reachable. + return "(sensitive value)" + } + + // WARNING: We've only checked that the value isn't sensitive _shallowly_ + // here, and so we must never show any element values from complex types + // in here. However, it's fine to show map keys and attribute names because + // those are never sensitive in isolation: the entire value would be + // sensitive in that case. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} + +// traversalStr produces a representation of an HCL traversal that is compact, +// resembles HCL native syntax, and is suitable for display in the UI. +func traversalStr(traversal hcl.Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(tStep.Name) + case hcl.TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case hcl.TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(compactValueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} diff --git a/command/views/json/diagnostic_test.go b/command/views/json/diagnostic_test.go new file mode 100644 index 000000000000..fdc7750dbf17 --- /dev/null +++ b/command/views/json/diagnostic_test.go @@ -0,0 +1,951 @@ +package json + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func TestNewDiagnostic(t *testing.T) { + // Common HCL for diags with source ranges. This does not have any real + // semantic errors, but we can synthesize fake HCL errors which will + // exercise the diagnostic rendering code using this + sources := map[string][]byte{ + "test.tf": []byte(`resource "test_resource" "test" { + foo = var.boop["hello!"] + bar = { + baz = maybe + } +} +`), + "short.tf": []byte("bad source code"), + "odd-comment.tf": []byte("foo\n\n#\n"), + "values.tf": []byte(`[ + var.a, + var.b, + var.c, + var.d, + var.e, + var.f, + var.g, + var.h, + var.i, + var.j, + var.k, +] +`), + } + testCases := map[string]struct { + diag interface{} // allow various kinds of diags + want *Diagnostic + }{ + "sourceless warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "Oh no", + "Something is broken", + ), + &Diagnostic{ + Severity: "warning", + Summary: "Oh no", + Detail: "Something is broken", + }, + }, + "error with source code unavailable": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad news", + Detail: "It went wrong", + Subject: &hcl.Range{ + Filename: "modules/oops/missing.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 2, Column: 12, Byte: 33}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Bad news", + Detail: "It went wrong", + Range: &DiagnosticRange{ + Filename: "modules/oops/missing.tf", + Start: Pos{ + Line: 1, + Column: 6, + Byte: 5, + }, + End: Pos{ + Line: 2, + Column: 12, + Byte: 33, + }, + }, + }, + }, + "error with source code subject": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Tiny explosion", + Detail: "Unexpected detonation while parsing", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 10, Byte: 9}, + End: hcl.Pos{Line: 1, Column: 25, Byte: 24}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Tiny explosion", + Detail: "Unexpected detonation while parsing", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 1, + Column: 10, + Byte: 9, + }, + End: Pos{ + Line: 1, + Column: 25, + Byte: 24, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: `resource "test_resource" "test" {`, + StartLine: 1, + HighlightStartOffset: 9, + HighlightEndOffset: 24, + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error with source code subject but no context": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Nonsense input", + Detail: "What you wrote makes no sense", + Subject: &hcl.Range{ + Filename: "short.tf", + Start: hcl.Pos{Line: 1, Column: 5, Byte: 4}, + End: hcl.Pos{Line: 1, Column: 10, Byte: 9}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Nonsense input", + Detail: "What you wrote makes no sense", + Range: &DiagnosticRange{ + Filename: "short.tf", + Start: Pos{ + Line: 1, + Column: 5, + Byte: 4, + }, + End: Pos{ + Line: 1, + Column: 10, + Byte: 9, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: nil, + Code: (`bad source code`), + StartLine: (1), + HighlightStartOffset: (4), + HighlightEndOffset: (9), + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error with multi-line snippet": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "In this house we respect booleans", + Detail: "True or false, there is no maybe", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 4, Column: 11, Byte: 81}, + End: hcl.Pos{Line: 4, Column: 16, Byte: 86}, + }, + Context: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 3, Byte: 63}, + End: hcl.Pos{Line: 5, Column: 4, Byte: 90}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "In this house we respect booleans", + Detail: "True or false, there is no maybe", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 4, + Column: 11, + Byte: 81, + }, + End: Pos{ + Line: 4, + Column: 16, + Byte: 86, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: " bar = {\n baz = maybe\n }", + StartLine: 3, + HighlightStartOffset: 20, + HighlightEndOffset: 25, + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error with empty highlight range at end of source code": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "You forgot something", + Detail: "Please finish your thought", + Subject: &hcl.Range{ + Filename: "short.tf", + Start: hcl.Pos{Line: 1, Column: 16, Byte: 15}, + End: hcl.Pos{Line: 1, Column: 16, Byte: 15}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "You forgot something", + Detail: "Please finish your thought", + Range: &DiagnosticRange{ + Filename: "short.tf", + Start: Pos{ + Line: 1, + Column: 16, + Byte: 15, + }, + End: Pos{ + Line: 1, + Column: 17, + Byte: 16, + }, + }, + Snippet: &DiagnosticSnippet{ + Code: ("bad source code"), + StartLine: (1), + HighlightStartOffset: (15), + HighlightEndOffset: (15), + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error with unset highlight end position": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "There is no end", + Detail: "But there is a beginning", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 16, Byte: 15}, + End: hcl.Pos{Line: 0, Column: 0, Byte: 0}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "There is no end", + Detail: "But there is a beginning", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 1, + Column: 16, + Byte: 15, + }, + End: Pos{ + Line: 1, + Column: 17, + Byte: 16, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: `resource "test_resource" "test" {`, + StartLine: 1, + HighlightStartOffset: 15, + HighlightEndOffset: 16, + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error whose range starts at a newline": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid newline", + Detail: "How awkward!", + Subject: &hcl.Range{ + Filename: "odd-comment.tf", + Start: hcl.Pos{Line: 2, Column: 5, Byte: 4}, + End: hcl.Pos{Line: 3, Column: 1, Byte: 6}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Invalid newline", + Detail: "How awkward!", + Range: &DiagnosticRange{ + Filename: "odd-comment.tf", + Start: Pos{ + Line: 2, + Column: 5, + Byte: 4, + }, + End: Pos{ + Line: 3, + Column: 1, + Byte: 6, + }, + }, + Snippet: &DiagnosticSnippet{ + Code: `#`, + StartLine: 2, + Values: []DiagnosticExpressionValue{}, + + // Due to the range starting at a newline on a blank + // line, we end up stripping off the initial newline + // to produce only a one-line snippet. That would + // therefore cause the start offset to naturally be + // -1, just before the Code we returned, but then we + // force it to zero so that the result will still be + // in range for a byte-oriented slice of Code. + HighlightStartOffset: 0, + HighlightEndOffset: 1, + }, + }, + }, + "error with source code subject and known expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.StringVal("bleurgh"), + }), + }), + }, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop["hello!"]`, + Statement: `is "bleurgh"`, + }, + }, + }, + }, + }, + "error with source code subject and expression referring to sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), + }), + }), + }, + }, + Extra: diagnosticCausedBySensitive(true), + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop["hello!"]`, + Statement: `has a sensitive value`, + }, + }, + }, + }, + }, + "error with source code subject and expression referring to sensitive value when not caused by sensitive values": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), + }), + }), + }, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + // The sensitive value is filtered out because this is + // not a sensitive-value-related diagnostic message. + }, + }, + }, + }, + "error with source code subject and expression referring to a collection containing a sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), + }), + }), + }, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop`, + Statement: `is map of string with 1 element`, + }, + }, + }, + }, + }, + "error with source code subject and unknown string expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.UnknownVal(cty.String), + }), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop["hello!"]`, + Statement: `is a string, known only after apply`, + }, + }, + }, + }, + }, + "error with source code subject and unknown expression of unknown type": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.UnknownVal(cty.DynamicPseudoType), + }), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop["hello!"]`, + Statement: `will be known only after apply`, + }, + }, + }, + }, + }, + "error with source code subject and unknown expression of unknown type when not caused by unknown values": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.UnknownVal(cty.DynamicPseudoType), + }), + }), + }, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + // The unknown value is filtered out because this is + // not an unknown-value-related diagnostic message. + }, + }, + }, + }, + "error with source code subject with multiple expression values": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Catastrophic failure", + Detail: "Basically, everything went wrong", + Subject: &hcl.Range{ + Filename: "values.tf", + Start: hcl.Pos{Line: 1, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 13, Column: 2, Byte: 102}, + }, + Expression: hcltest.MockExprList([]hcl.Expression{ + hcltest.MockExprTraversalSrc("var.a"), + hcltest.MockExprTraversalSrc("var.b"), + hcltest.MockExprTraversalSrc("var.c"), + hcltest.MockExprTraversalSrc("var.d"), + hcltest.MockExprTraversalSrc("var.e"), + hcltest.MockExprTraversalSrc("var.f"), + hcltest.MockExprTraversalSrc("var.g"), + hcltest.MockExprTraversalSrc("var.h"), + hcltest.MockExprTraversalSrc("var.i"), + hcltest.MockExprTraversalSrc("var.j"), + hcltest.MockExprTraversalSrc("var.k"), + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + "b": cty.NumberFloatVal(123.45), + "c": cty.NullVal(cty.String), + "d": cty.StringVal("secret").Mark(marks.Sensitive), + "e": cty.False, + "f": cty.ListValEmpty(cty.String), + "g": cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + }), + "h": cty.ListVal([]cty.Value{ + cty.StringVal("boop"), + cty.StringVal("beep"), + cty.StringVal("blorp"), + }), + "i": cty.EmptyObjectVal, + "j": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "k": cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + "b": cty.False, + }), + }), + }, + }, + Extra: diagnosticCausedBySensitive(true), + }, + &Diagnostic{ + Severity: "error", + Summary: "Catastrophic failure", + Detail: "Basically, everything went wrong", + Range: &DiagnosticRange{ + Filename: "values.tf", + Start: Pos{ + Line: 1, + Column: 1, + Byte: 0, + }, + End: Pos{ + Line: 13, + Column: 2, + Byte: 102, + }, + }, + Snippet: &DiagnosticSnippet{ + Code: `[ + var.a, + var.b, + var.c, + var.d, + var.e, + var.f, + var.g, + var.h, + var.i, + var.j, + var.k, +]`, + StartLine: (1), + HighlightStartOffset: (0), + HighlightEndOffset: (102), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.a`, + Statement: `is true`, + }, + { + Traversal: `var.b`, + Statement: `is 123.45`, + }, + { + Traversal: `var.c`, + Statement: `is null`, + }, + { + Traversal: `var.d`, + Statement: `has a sensitive value`, + }, + { + Traversal: `var.e`, + Statement: `is false`, + }, + { + Traversal: `var.f`, + Statement: `is empty list of string`, + }, + { + Traversal: `var.g`, + Statement: `is map of string with 1 element`, + }, + { + Traversal: `var.h`, + Statement: `is list of string with 3 elements`, + }, + { + Traversal: `var.i`, + Statement: `is object with no attributes`, + }, + { + Traversal: `var.j`, + Statement: `is object with 1 attribute "foo"`, + }, + { + Traversal: `var.k`, + Statement: `is object with 2 attributes`, + }, + }, + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // Convert the diag into a tfdiags.Diagnostic + var diags tfdiags.Diagnostics + diags = diags.Append(tc.diag) + + got := NewDiagnostic(diags[0], sources) + if !cmp.Equal(tc.want, got) { + t.Fatalf("wrong result\n:%s", cmp.Diff(tc.want, got)) + } + }) + + t.Run(fmt.Sprintf("golden test for %s", name), func(t *testing.T) { + // Convert the diag into a tfdiags.Diagnostic + var diags tfdiags.Diagnostics + diags = diags.Append(tc.diag) + + got := NewDiagnostic(diags[0], sources) + + // Render the diagnostic to indented JSON + gotBytes, err := json.MarshalIndent(got, "", " ") + if err != nil { + t.Fatal(err) + } + + // Compare against the golden reference + filename := path.Join( + "testdata", + "diagnostic", + fmt.Sprintf("%s.json", strings.ReplaceAll(name, " ", "-")), + ) + + // Generate golden reference by uncommenting the next two lines: + // gotBytes = append(gotBytes, '\n') + // os.WriteFile(filename, gotBytes, 0644) + + wantFile, err := os.Open(filename) + if err != nil { + t.Fatalf("failed to open golden file: %s", err) + } + defer wantFile.Close() + wantBytes, err := ioutil.ReadAll(wantFile) + if err != nil { + t.Fatalf("failed to read output file: %s", err) + } + + // Don't care about leading or trailing whitespace + gotString := strings.TrimSpace(string(gotBytes)) + wantString := strings.TrimSpace(string(wantBytes)) + + if !cmp.Equal(wantString, gotString) { + t.Fatalf("wrong result\n:%s", cmp.Diff(wantString, gotString)) + } + }) + } +} + +// Helper function to make constructing literal Diagnostics easier. There +// are fields which are pointer-to-string to ensure that the rendered JSON +// results in `null` for an empty value, rather than `""`. +func strPtr(s string) *string { return &s } + +// diagnosticCausedByUnknown is a testing helper for exercising our logic +// for selectively showing unknown values alongside our source snippets for +// diagnostics that are explicitly marked as being caused by unknown values. +type diagnosticCausedByUnknown bool + +var _ tfdiags.DiagnosticExtraBecauseUnknown = diagnosticCausedByUnknown(true) + +func (e diagnosticCausedByUnknown) DiagnosticCausedByUnknown() bool { + return bool(e) +} + +// diagnosticCausedBySensitive is a testing helper for exercising our logic +// for selectively showing sensitive values alongside our source snippets for +// diagnostics that are explicitly marked as being caused by sensitive values. +type diagnosticCausedBySensitive bool + +var _ tfdiags.DiagnosticExtraBecauseSensitive = diagnosticCausedBySensitive(true) + +func (e diagnosticCausedBySensitive) DiagnosticCausedBySensitive() bool { + return bool(e) +} diff --git a/internal/command/views/json/function.go b/command/views/json/function.go similarity index 100% rename from internal/command/views/json/function.go rename to command/views/json/function.go diff --git a/internal/command/views/json/function_test.go b/command/views/json/function_test.go similarity index 100% rename from internal/command/views/json/function_test.go rename to command/views/json/function_test.go diff --git a/command/views/json/hook.go b/command/views/json/hook.go new file mode 100644 index 000000000000..c619e07ae67f --- /dev/null +++ b/command/views/json/hook.go @@ -0,0 +1,376 @@ +package json + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" +) + +type Hook interface { + HookType() MessageType + String() string +} + +// ApplyStart: triggered by PreApply hook +type applyStart struct { + Resource ResourceAddr `json:"resource"` + Action ChangeAction `json:"action"` + IDKey string `json:"id_key,omitempty"` + IDValue string `json:"id_value,omitempty"` + actionVerb string +} + +var _ Hook = (*applyStart)(nil) + +func (h *applyStart) HookType() MessageType { + return MessageApplyStart +} + +func (h *applyStart) String() string { + var id string + if h.IDKey != "" && h.IDValue != "" { + id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) + } + return fmt.Sprintf("%s: %s...%s", h.Resource.Addr, h.actionVerb, id) +} + +func NewApplyStart(addr addrs.AbsResourceInstance, action plans.Action, idKey string, idValue string) Hook { + hook := &applyStart{ + Resource: newResourceAddr(addr), + Action: changeAction(action), + IDKey: idKey, + IDValue: idValue, + actionVerb: startActionVerb(action), + } + + return hook +} + +// ApplyProgress: currently triggered by a timer started on PreApply. In +// future, this might also be triggered by provider progress reporting. +type applyProgress struct { + Resource ResourceAddr `json:"resource"` + Action ChangeAction `json:"action"` + Elapsed float64 `json:"elapsed_seconds"` + actionVerb string + elapsed time.Duration +} + +var _ Hook = (*applyProgress)(nil) + +func (h *applyProgress) HookType() MessageType { + return MessageApplyProgress +} + +func (h *applyProgress) String() string { + return fmt.Sprintf("%s: Still %s... [%s elapsed]", h.Resource.Addr, h.actionVerb, h.elapsed) +} + +func NewApplyProgress(addr addrs.AbsResourceInstance, action plans.Action, elapsed time.Duration) Hook { + return &applyProgress{ + Resource: newResourceAddr(addr), + Action: changeAction(action), + Elapsed: elapsed.Seconds(), + actionVerb: progressActionVerb(action), + elapsed: elapsed, + } +} + +// ApplyComplete: triggered by PostApply hook +type applyComplete struct { + Resource ResourceAddr `json:"resource"` + Action ChangeAction `json:"action"` + IDKey string `json:"id_key,omitempty"` + IDValue string `json:"id_value,omitempty"` + Elapsed float64 `json:"elapsed_seconds"` + actionNoun string + elapsed time.Duration +} + +var _ Hook = (*applyComplete)(nil) + +func (h *applyComplete) HookType() MessageType { + return MessageApplyComplete +} + +func (h *applyComplete) String() string { + var id string + if h.IDKey != "" && h.IDValue != "" { + id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) + } + return fmt.Sprintf("%s: %s complete after %s%s", h.Resource.Addr, h.actionNoun, h.elapsed, id) +} + +func NewApplyComplete(addr addrs.AbsResourceInstance, action plans.Action, idKey, idValue string, elapsed time.Duration) Hook { + return &applyComplete{ + Resource: newResourceAddr(addr), + Action: changeAction(action), + IDKey: idKey, + IDValue: idValue, + Elapsed: elapsed.Seconds(), + actionNoun: actionNoun(action), + elapsed: elapsed, + } +} + +// ApplyErrored: triggered by PostApply hook on failure. This will be followed +// by diagnostics when the apply finishes. +type applyErrored struct { + Resource ResourceAddr `json:"resource"` + Action ChangeAction `json:"action"` + Elapsed float64 `json:"elapsed_seconds"` + actionNoun string + elapsed time.Duration +} + +var _ Hook = (*applyErrored)(nil) + +func (h *applyErrored) HookType() MessageType { + return MessageApplyErrored +} + +func (h *applyErrored) String() string { + return fmt.Sprintf("%s: %s errored after %s", h.Resource.Addr, h.actionNoun, h.elapsed) +} + +func NewApplyErrored(addr addrs.AbsResourceInstance, action plans.Action, elapsed time.Duration) Hook { + return &applyErrored{ + Resource: newResourceAddr(addr), + Action: changeAction(action), + Elapsed: elapsed.Seconds(), + actionNoun: actionNoun(action), + elapsed: elapsed, + } +} + +// ProvisionStart: triggered by PreProvisionInstanceStep hook +type provisionStart struct { + Resource ResourceAddr `json:"resource"` + Provisioner string `json:"provisioner"` +} + +var _ Hook = (*provisionStart)(nil) + +func (h *provisionStart) HookType() MessageType { + return MessageProvisionStart +} + +func (h *provisionStart) String() string { + return fmt.Sprintf("%s: Provisioning with '%s'...", h.Resource.Addr, h.Provisioner) +} + +func NewProvisionStart(addr addrs.AbsResourceInstance, provisioner string) Hook { + return &provisionStart{ + Resource: newResourceAddr(addr), + Provisioner: provisioner, + } +} + +// ProvisionProgress: triggered by ProvisionOutput hook +type provisionProgress struct { + Resource ResourceAddr `json:"resource"` + Provisioner string `json:"provisioner"` + Output string `json:"output"` +} + +var _ Hook = (*provisionProgress)(nil) + +func (h *provisionProgress) HookType() MessageType { + return MessageProvisionProgress +} + +func (h *provisionProgress) String() string { + return fmt.Sprintf("%s: (%s): %s", h.Resource.Addr, h.Provisioner, h.Output) +} + +func NewProvisionProgress(addr addrs.AbsResourceInstance, provisioner string, output string) Hook { + return &provisionProgress{ + Resource: newResourceAddr(addr), + Provisioner: provisioner, + Output: output, + } +} + +// ProvisionComplete: triggered by PostProvisionInstanceStep hook +type provisionComplete struct { + Resource ResourceAddr `json:"resource"` + Provisioner string `json:"provisioner"` +} + +var _ Hook = (*provisionComplete)(nil) + +func (h *provisionComplete) HookType() MessageType { + return MessageProvisionComplete +} + +func (h *provisionComplete) String() string { + return fmt.Sprintf("%s: (%s) Provisioning complete", h.Resource.Addr, h.Provisioner) +} + +func NewProvisionComplete(addr addrs.AbsResourceInstance, provisioner string) Hook { + return &provisionComplete{ + Resource: newResourceAddr(addr), + Provisioner: provisioner, + } +} + +// ProvisionErrored: triggered by PostProvisionInstanceStep hook on failure. +// This will be followed by diagnostics when the apply finishes. +type provisionErrored struct { + Resource ResourceAddr `json:"resource"` + Provisioner string `json:"provisioner"` +} + +var _ Hook = (*provisionErrored)(nil) + +func (h *provisionErrored) HookType() MessageType { + return MessageProvisionErrored +} + +func (h *provisionErrored) String() string { + return fmt.Sprintf("%s: (%s) Provisioning errored", h.Resource.Addr, h.Provisioner) +} + +func NewProvisionErrored(addr addrs.AbsResourceInstance, provisioner string) Hook { + return &provisionErrored{ + Resource: newResourceAddr(addr), + Provisioner: provisioner, + } +} + +// RefreshStart: triggered by PreRefresh hook +type refreshStart struct { + Resource ResourceAddr `json:"resource"` + IDKey string `json:"id_key,omitempty"` + IDValue string `json:"id_value,omitempty"` +} + +var _ Hook = (*refreshStart)(nil) + +func (h *refreshStart) HookType() MessageType { + return MessageRefreshStart +} + +func (h *refreshStart) String() string { + var id string + if h.IDKey != "" && h.IDValue != "" { + id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) + } + return fmt.Sprintf("%s: Refreshing state...%s", h.Resource.Addr, id) +} + +func NewRefreshStart(addr addrs.AbsResourceInstance, idKey, idValue string) Hook { + return &refreshStart{ + Resource: newResourceAddr(addr), + IDKey: idKey, + IDValue: idValue, + } +} + +// RefreshComplete: triggered by PostRefresh hook +type refreshComplete struct { + Resource ResourceAddr `json:"resource"` + IDKey string `json:"id_key,omitempty"` + IDValue string `json:"id_value,omitempty"` +} + +var _ Hook = (*refreshComplete)(nil) + +func (h *refreshComplete) HookType() MessageType { + return MessageRefreshComplete +} + +func (h *refreshComplete) String() string { + var id string + if h.IDKey != "" && h.IDValue != "" { + id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) + } + return fmt.Sprintf("%s: Refresh complete%s", h.Resource.Addr, id) +} + +func NewRefreshComplete(addr addrs.AbsResourceInstance, idKey, idValue string) Hook { + return &refreshComplete{ + Resource: newResourceAddr(addr), + IDKey: idKey, + IDValue: idValue, + } +} + +// Convert the subset of plans.Action values we expect to receive into a +// present-tense verb for the applyStart hook message. +func startActionVerb(action plans.Action) string { + switch action { + case plans.Create: + return "Creating" + case plans.Update: + return "Modifying" + case plans.Delete: + return "Destroying" + case plans.Read: + return "Refreshing" + case plans.CreateThenDelete, plans.DeleteThenCreate: + // This is not currently possible to reach, as we receive separate + // passes for create and delete + return "Replacing" + case plans.NoOp: + // This should never be possible: a no-op planned change should not + // be applied. We'll fall back to "Applying". + fallthrough + default: + return "Applying" + } +} + +// Convert the subset of plans.Action values we expect to receive into a +// present-tense verb for the applyProgress hook message. This will be +// prefixed with "Still ", so it is lower-case. +func progressActionVerb(action plans.Action) string { + switch action { + case plans.Create: + return "creating" + case plans.Update: + return "modifying" + case plans.Delete: + return "destroying" + case plans.Read: + return "refreshing" + case plans.CreateThenDelete, plans.DeleteThenCreate: + // This is not currently possible to reach, as we receive separate + // passes for create and delete + return "replacing" + case plans.NoOp: + // This should never be possible: a no-op planned change should not + // be applied. We'll fall back to "applying". + fallthrough + default: + return "applying" + } +} + +// Convert the subset of plans.Action values we expect to receive into a +// noun for the applyComplete and applyErrored hook messages. This will be +// combined into a phrase like "Creation complete after 1m4s". +func actionNoun(action plans.Action) string { + switch action { + case plans.Create: + return "Creation" + case plans.Update: + return "Modifications" + case plans.Delete: + return "Destruction" + case plans.Read: + return "Refresh" + case plans.CreateThenDelete, plans.DeleteThenCreate: + // This is not currently possible to reach, as we receive separate + // passes for create and delete + return "Replacement" + case plans.NoOp: + // This should never be possible: a no-op planned change should not + // be applied. We'll fall back to "Apply". + fallthrough + default: + return "Apply" + } +} diff --git a/internal/command/views/json/message_types.go b/command/views/json/message_types.go similarity index 100% rename from internal/command/views/json/message_types.go rename to command/views/json/message_types.go diff --git a/command/views/json/output.go b/command/views/json/output.go new file mode 100644 index 000000000000..65bfc883471c --- /dev/null +++ b/command/views/json/output.go @@ -0,0 +1,75 @@ +package json + +import ( + "encoding/json" + "fmt" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +type Output struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type,omitempty"` + Value json.RawMessage `json:"value,omitempty"` + Action ChangeAction `json:"action,omitempty"` +} + +type Outputs map[string]Output + +func OutputsFromMap(outputValues map[string]*states.OutputValue) (Outputs, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + outputs := make(map[string]Output, len(outputValues)) + + for name, ov := range outputValues { + unmarked, _ := ov.Value.UnmarkDeep() + value, err := ctyjson.Marshal(unmarked, unmarked.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Error serializing output %q", name), + fmt.Sprintf("Error: %s", err), + )) + return nil, diags + } + valueType, err := ctyjson.MarshalType(unmarked.Type()) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + var redactedValue json.RawMessage + if !ov.Sensitive { + redactedValue = json.RawMessage(value) + } + + outputs[name] = Output{ + Sensitive: ov.Sensitive, + Type: json.RawMessage(valueType), + Value: redactedValue, + } + } + + return outputs, nil +} + +func OutputsFromChanges(changes []*plans.OutputChangeSrc) Outputs { + outputs := make(map[string]Output, len(changes)) + + for _, change := range changes { + outputs[change.Addr.OutputValue.Name] = Output{ + Sensitive: change.Sensitive, + Action: changeAction(change.Action), + } + } + + return outputs +} + +func (o Outputs) String() string { + return fmt.Sprintf("Outputs: %d", len(o)) +} diff --git a/command/views/json/output_test.go b/command/views/json/output_test.go new file mode 100644 index 000000000000..22acc42fb1fc --- /dev/null +++ b/command/views/json/output_test.go @@ -0,0 +1,180 @@ +package json + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" +) + +func TestOutputsFromMap(t *testing.T) { + got, diags := OutputsFromMap(map[string]*states.OutputValue{ + // Normal non-sensitive output + "boop": { + Value: cty.NumberIntVal(1234), + }, + // Sensitive string output + "beep": { + Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), + Sensitive: true, + }, + // Sensitive object output which is marked at the leaf + "blorp": { + Value: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("oh, hi").Mark(marks.Sensitive), + }), + }), + }), + Sensitive: true, + }, + // Null value + "honk": { + Value: cty.NullVal(cty.Map(cty.Bool)), + }, + }) + if len(diags) > 0 { + t.Fatal(diags.Err()) + } + + want := Outputs{ + "boop": { + Sensitive: false, + Type: json.RawMessage(`"number"`), + Value: json.RawMessage(`1234`), + }, + "beep": { + Sensitive: true, + Type: json.RawMessage(`"string"`), + }, + "blorp": { + Sensitive: true, + Type: json.RawMessage(`["object",{"a":["object",{"b":["object",{"c":"string"}]}]}]`), + }, + "honk": { + Sensitive: false, + Type: json.RawMessage(`["map","bool"]`), + Value: json.RawMessage(`null`), + }, + } + + if !cmp.Equal(want, got) { + t.Fatalf("unexpected result\n%s", cmp.Diff(want, got)) + } +} + +func TestOutputsFromChanges(t *testing.T) { + root := addrs.RootModuleInstance + num, err := plans.NewDynamicValue(cty.NumberIntVal(1234), cty.Number) + if err != nil { + t.Fatalf("unexpected error creating dynamic value: %v", err) + } + str, err := plans.NewDynamicValue(cty.StringVal("1234"), cty.String) + if err != nil { + t.Fatalf("unexpected error creating dynamic value: %v", err) + } + + got := OutputsFromChanges([]*plans.OutputChangeSrc{ + // Unchanged output "boop", value 1234 + { + Addr: root.OutputValue("boop"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.NoOp, + Before: num, + After: num, + }, + Sensitive: false, + }, + // New output "beep", value 1234 + { + Addr: root.OutputValue("beep"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: nil, + After: num, + }, + Sensitive: false, + }, + // Deleted output "blorp", prior value 1234 + { + Addr: root.OutputValue("blorp"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + Before: num, + After: nil, + }, + Sensitive: false, + }, + // Updated output "honk", prior value 1234, new value "1234" + { + Addr: root.OutputValue("honk"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + Before: num, + After: str, + }, + Sensitive: false, + }, + // New sensitive output "secret", value "1234" + { + Addr: root.OutputValue("secret"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: nil, + After: str, + }, + Sensitive: true, + }, + }) + + want := Outputs{ + "boop": { + Action: "noop", + Sensitive: false, + }, + "beep": { + Action: "create", + Sensitive: false, + }, + "blorp": { + Action: "delete", + Sensitive: false, + }, + "honk": { + Action: "update", + Sensitive: false, + }, + "secret": { + Action: "create", + Sensitive: true, + }, + } + + if !cmp.Equal(want, got) { + t.Fatalf("unexpected result\n%s", cmp.Diff(want, got)) + } +} + +func TestOutputs_String(t *testing.T) { + outputs := Outputs{ + "boop": { + Sensitive: false, + Type: json.RawMessage(`"number"`), + Value: json.RawMessage(`1234`), + }, + "beep": { + Sensitive: true, + Type: json.RawMessage(`"string"`), + Value: json.RawMessage(`"horse-battery"`), + }, + } + if got, want := outputs.String(), "Outputs: 2"; got != want { + t.Fatalf("unexpected value\n got: %q\nwant: %q", got, want) + } +} diff --git a/internal/command/views/json/resource_addr.go b/command/views/json/resource_addr.go similarity index 95% rename from internal/command/views/json/resource_addr.go rename to command/views/json/resource_addr.go index 27ff502a2ce1..414ce33d8652 100644 --- a/internal/command/views/json/resource_addr.go +++ b/command/views/json/resource_addr.go @@ -4,7 +4,7 @@ import ( "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) type ResourceAddr struct { diff --git a/internal/command/views/json/testdata/diagnostic/error-whose-range-starts-at-a-newline.json b/command/views/json/testdata/diagnostic/error-whose-range-starts-at-a-newline.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-whose-range-starts-at-a-newline.json rename to command/views/json/testdata/diagnostic/error-whose-range-starts-at-a-newline.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-empty-highlight-range-at-end-of-source-code.json b/command/views/json/testdata/diagnostic/error-with-empty-highlight-range-at-end-of-source-code.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-empty-highlight-range-at-end-of-source-code.json rename to command/views/json/testdata/diagnostic/error-with-empty-highlight-range-at-end-of-source-code.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-multi-line-snippet.json b/command/views/json/testdata/diagnostic/error-with-multi-line-snippet.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-multi-line-snippet.json rename to command/views/json/testdata/diagnostic/error-with-multi-line-snippet.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-a-collection-containing-a-sensitive-value.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-a-collection-containing-a-sensitive-value.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-a-collection-containing-a-sensitive-value.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-a-collection-containing-a-sensitive-value.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value-when-not-caused-by-sensitive-values.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value-when-not-caused-by-sensitive-values.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value-when-not-caused-by-sensitive-values.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value-when-not-caused-by-sensitive-values.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-known-expression.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-known-expression.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-known-expression.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-and-known-expression.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type-when-not-caused-by-unknown-values.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type-when-not-caused-by-unknown-values.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type-when-not-caused-by-unknown-values.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type-when-not-caused-by-unknown-values.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-string-expression.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-string-expression.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-string-expression.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-string-expression.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-but-no-context.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-but-no-context.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-but-no-context.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-but-no-context.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-with-multiple-expression-values.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject-with-multiple-expression-values.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject-with-multiple-expression-values.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject-with-multiple-expression-values.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-subject.json b/command/views/json/testdata/diagnostic/error-with-source-code-subject.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-subject.json rename to command/views/json/testdata/diagnostic/error-with-source-code-subject.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-source-code-unavailable.json b/command/views/json/testdata/diagnostic/error-with-source-code-unavailable.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-source-code-unavailable.json rename to command/views/json/testdata/diagnostic/error-with-source-code-unavailable.json diff --git a/internal/command/views/json/testdata/diagnostic/error-with-unset-highlight-end-position.json b/command/views/json/testdata/diagnostic/error-with-unset-highlight-end-position.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/error-with-unset-highlight-end-position.json rename to command/views/json/testdata/diagnostic/error-with-unset-highlight-end-position.json diff --git a/internal/command/views/json/testdata/diagnostic/sourceless-warning.json b/command/views/json/testdata/diagnostic/sourceless-warning.json similarity index 100% rename from internal/command/views/json/testdata/diagnostic/sourceless-warning.json rename to command/views/json/testdata/diagnostic/sourceless-warning.json diff --git a/internal/command/views/json_view.go b/command/views/json_view.go similarity index 96% rename from internal/command/views/json_view.go rename to command/views/json_view.go index a1493bc4def6..54cdbc06e544 100644 --- a/internal/command/views/json_view.go +++ b/command/views/json_view.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/tfdiags" tfversion "github.com/hashicorp/terraform/version" ) diff --git a/internal/command/views/json_view_test.go b/command/views/json_view_test.go similarity index 97% rename from internal/command/views/json_view_test.go rename to command/views/json_view_test.go index 6bb5c4913241..225077bfe620 100644 --- a/internal/command/views/json_view_test.go +++ b/command/views/json_view_test.go @@ -8,11 +8,11 @@ import ( "time" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - viewsjson "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + viewsjson "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/tfdiags" tfversion "github.com/hashicorp/terraform/version" ) diff --git a/internal/command/views/operation.go b/command/views/operation.go similarity index 92% rename from internal/command/views/operation.go rename to command/views/operation.go index a0a8a96f9342..f63f43c8b95c 100644 --- a/internal/command/views/operation.go +++ b/command/views/operation.go @@ -5,17 +5,17 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/command/jsonplan" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/command/jsonplan" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) type Operation interface { diff --git a/internal/command/views/operation_test.go b/command/views/operation_test.go similarity index 98% rename from internal/command/views/operation_test.go rename to command/views/operation_test.go index 39da8959037b..46c8a3b9bbda 100644 --- a/internal/command/views/operation_test.go +++ b/command/views/operation_test.go @@ -6,14 +6,14 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/lang/globalref" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/lang/globalref" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" "github.com/zclconf/go-cty/cty" ) diff --git a/command/views/output.go b/command/views/output.go new file mode 100644 index 000000000000..e71734e2ccf3 --- /dev/null +++ b/command/views/output.go @@ -0,0 +1,285 @@ +package views + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// The Output view renders either one or all outputs, depending on whether or +// not the name argument is empty. +type Output interface { + Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics + Diagnostics(diags tfdiags.Diagnostics) +} + +// NewOutput returns an initialized Output implementation for the given ViewType. +func NewOutput(vt arguments.ViewType, view *View) Output { + switch vt { + case arguments.ViewJSON: + return &OutputJSON{view: view} + case arguments.ViewRaw: + return &OutputRaw{view: view} + case arguments.ViewHuman: + return &OutputHuman{view: view} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The OutputHuman implementation renders outputs in a format equivalent to HCL +// source. This uses the same formatting logic as in the console REPL. +type OutputHuman struct { + view *View +} + +var _ Output = (*OutputHuman)(nil) + +func (v *OutputHuman) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(outputs) == 0 { + diags = diags.Append(noOutputsWarning()) + return diags + } + + if name != "" { + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + result := repl.FormatValue(output.Value, 0) + v.view.streams.Println(result) + return nil + } + + outputBuf := new(bytes.Buffer) + if len(outputs) > 0 { + // Output the outputs in alphabetical order + keyLen := 0 + ks := make([]string, 0, len(outputs)) + for key := range outputs { + ks = append(ks, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(ks) + + for _, k := range ks { + v := outputs[k] + if v.Sensitive { + outputBuf.WriteString(fmt.Sprintf("%s = \n", k)) + continue + } + + result := repl.FormatValue(v.Value, 0) + outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, result)) + } + } + + v.view.streams.Println(strings.TrimSpace(outputBuf.String())) + + return nil +} + +func (v *OutputHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +// The OutputRaw implementation renders single string, number, or boolean +// output values directly and without quotes or other formatting. This is +// intended for use in shell scripting or other environments where the exact +// type of an output value is not important. +type OutputRaw struct { + view *View +} + +var _ Output = (*OutputRaw)(nil) + +func (v *OutputRaw) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(outputs) == 0 { + diags = diags.Append(noOutputsWarning()) + return diags + } + + if name == "" { + diags = diags.Append(fmt.Errorf("Raw output format is only supported for single outputs")) + return diags + } + + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + + strV, err := convert.Convert(output.Value, cty.String) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The -raw option only supports strings, numbers, and boolean values, but output value %q is %s.\n\nUse the -json option for machine-readable representations of output values that have complex types.", + name, output.Value.Type().FriendlyName(), + ), + )) + return diags + } + if strV.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The value for output value %q is null, so -raw mode cannot print it.", + name, + ), + )) + return diags + } + if !strV.IsKnown() { + // Since we're working with values from the state it would be very + // odd to end up in here, but we'll handle it anyway to avoid a + // panic in case our rules somehow change in future. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The value for output value %q won't be known until after a successful terraform apply, so -raw mode cannot print it.", + name, + ), + )) + return diags + } + // If we get out here then we should have a valid string to print. + // We're writing it using Print here so that a shell caller will get + // exactly the value and no extra whitespace (including trailing newline). + v.view.streams.Print(strV.AsString()) + return nil +} + +func (v *OutputRaw) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +// The OutputJSON implementation renders outputs as JSON values. When rendering +// a single output, only the value is displayed. When rendering all outputs, +// the result is a JSON object with keys matching the output names and object +// values including type and sensitivity metadata. +type OutputJSON struct { + view *View +} + +var _ Output = (*OutputJSON)(nil) + +func (v *OutputJSON) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if name != "" { + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + value := output.Value + + jsonOutput, err := ctyjson.Marshal(value, value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + + v.view.streams.Println(string(jsonOutput)) + + return nil + } + + // Due to a historical accident, the switch from state version 2 to + // 3 caused our JSON output here to be the full metadata about the + // outputs rather than just the output values themselves as we'd + // show in the single value case. We must now maintain that behavior + // for compatibility, so this is an emulation of the JSON + // serialization of outputs used in state format version 3. + type OutputMeta struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type"` + Value json.RawMessage `json:"value"` + } + outputMetas := map[string]OutputMeta{} + + for n, os := range outputs { + jsonVal, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + jsonType, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + outputMetas[n] = OutputMeta{ + Sensitive: os.Sensitive, + Type: json.RawMessage(jsonType), + Value: json.RawMessage(jsonVal), + } + } + + jsonOutputs, err := json.MarshalIndent(outputMetas, "", " ") + if err != nil { + diags = diags.Append(err) + return diags + } + + v.view.streams.Println(string(jsonOutputs)) + + return nil +} + +func (v *OutputJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +// For text and raw output modes, an empty map of outputs is considered a +// separate and higher priority failure mode than an output not being present +// in a non-empty map. This warning diagnostic explains how this might have +// happened. +func noOutputsWarning() tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Warning, + "No outputs found", + "The state file either has no outputs defined, or all the defined "+ + "outputs are empty. Please define an output in your configuration "+ + "with the `output` keyword and run `terraform refresh` for it to "+ + "become available. If you are using interpolation, please verify "+ + "the interpolated value is not empty. You can use the "+ + "`terraform console` command to assist.", + ) +} + +// Attempting to display a missing output results in this failure, which +// includes suggestions on how to rectify the problem. +func missingOutputError(name string) tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Output %q not found", name), + "The output variable requested could not be found in the state "+ + "file. If you recently added this to your configuration, be "+ + "sure to run `terraform apply`, since the state won't be updated "+ + "with new output variables until that command is run.", + ) +} diff --git a/command/views/output_test.go b/command/views/output_test.go new file mode 100644 index 000000000000..a57c59949fdd --- /dev/null +++ b/command/views/output_test.go @@ -0,0 +1,363 @@ +package views + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/zclconf/go-cty/cty" +) + +// Test various single output values for human-readable UI. Note that since +// OutputHuman defers to repl.FormatValue to render a single value, most of the +// test coverage should be in that package. +func TestOutputHuman_single(t *testing.T) { + testCases := map[string]struct { + value cty.Value + want string + wantErr bool + }{ + "string": { + value: cty.StringVal("hello"), + want: "\"hello\"\n", + }, + "list of maps": { + value: cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("value"), + "key2": cty.StringVal("value2"), + }), + cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("value"), + }), + }), + want: `tolist([ + tomap({ + "key" = "value" + "key2" = "value2" + }), + tomap({ + "key" = "value" + }), +]) +`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(arguments.ViewHuman, NewView(streams)) + + outputs := map[string]*states.OutputValue{ + "foo": {Value: tc.value}, + } + diags := v.Output("foo", outputs) + + if diags.HasErrors() { + if !tc.wantErr { + t.Fatalf("unexpected diagnostics: %s", diags) + } + } else if tc.wantErr { + t.Fatalf("succeeded, but want error") + } + + if got, want := done(t).Stdout(), tc.want; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} + +// Sensitive output values are rendered to the console intentionally when +// requesting a single output. +func TestOutput_sensitive(t *testing.T) { + testCases := map[string]arguments.ViewType{ + "human": arguments.ViewHuman, + "json": arguments.ViewJSON, + "raw": arguments.ViewRaw, + } + for name, vt := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(vt, NewView(streams)) + + outputs := map[string]*states.OutputValue{ + "foo": { + Value: cty.StringVal("secret"), + Sensitive: true, + }, + } + diags := v.Output("foo", outputs) + + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags) + } + + // Test for substring match here because we don't care about exact + // output format in this test, just the presence of the sensitive + // value. + if got, want := done(t).Stdout(), "secret"; !strings.Contains(got, want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} + +// Showing all outputs is supported by human and JSON output format. +func TestOutput_all(t *testing.T) { + outputs := map[string]*states.OutputValue{ + "foo": { + Value: cty.StringVal("secret"), + Sensitive: true, + }, + "bar": { + Value: cty.ListVal([]cty.Value{cty.True, cty.False, cty.True}), + }, + "baz": { + Value: cty.ObjectVal(map[string]cty.Value{ + "boop": cty.NumberIntVal(5), + "beep": cty.StringVal("true"), + }), + }, + } + + testCases := map[string]struct { + vt arguments.ViewType + want string + }{ + "human": { + arguments.ViewHuman, + `bar = tolist([ + true, + false, + true, +]) +baz = { + "beep" = "true" + "boop" = 5 +} +foo = +`, + }, + "json": { + arguments.ViewJSON, + `{ + "bar": { + "sensitive": false, + "type": [ + "list", + "bool" + ], + "value": [ + true, + false, + true + ] + }, + "baz": { + "sensitive": false, + "type": [ + "object", + { + "beep": "string", + "boop": "number" + } + ], + "value": { + "beep": "true", + "boop": 5 + } + }, + "foo": { + "sensitive": true, + "type": "string", + "value": "secret" + } +} +`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(tc.vt, NewView(streams)) + diags := v.Output("", outputs) + + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags) + } + + if got := done(t).Stdout(); got != tc.want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, tc.want) + } + }) + } +} + +// JSON output format supports empty outputs by rendering an empty object +// without diagnostics. +func TestOutputJSON_empty(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(arguments.ViewJSON, NewView(streams)) + + diags := v.Output("", map[string]*states.OutputValue{}) + + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags) + } + + if got, want := done(t).Stdout(), "{}\n"; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } +} + +// Human and raw formats render a warning if there are no outputs. +func TestOutput_emptyWarning(t *testing.T) { + testCases := map[string]arguments.ViewType{ + "human": arguments.ViewHuman, + "raw": arguments.ViewRaw, + } + + for name, vt := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(vt, NewView(streams)) + + diags := v.Output("", map[string]*states.OutputValue{}) + + if got, want := done(t).Stdout(), ""; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + + if len(diags) != 1 { + t.Fatalf("expected 1 diagnostic, got %d", len(diags)) + } + + if diags.HasErrors() { + t.Fatalf("unexpected error diagnostics: %s", diags) + } + + if got, want := diags[0].Description().Summary, "No outputs found"; got != want { + t.Errorf("unexpected diagnostics: %s", diags) + } + }) + } +} + +// Raw output is a simple unquoted output format designed for shell scripts, +// which relies on the cty.AsString() implementation. This test covers +// formatting for supported value types. +func TestOutputRaw(t *testing.T) { + values := map[string]cty.Value{ + "str": cty.StringVal("bar"), + "multistr": cty.StringVal("bar\nbaz"), + "num": cty.NumberIntVal(2), + "bool": cty.True, + "obj": cty.EmptyObjectVal, + "null": cty.NullVal(cty.String), + "unknown": cty.UnknownVal(cty.String), + } + + tests := map[string]struct { + WantOutput string + WantErr bool + }{ + "str": {WantOutput: "bar"}, + "multistr": {WantOutput: "bar\nbaz"}, + "num": {WantOutput: "2"}, + "bool": {WantOutput: "true"}, + "obj": {WantErr: true}, + "null": {WantErr: true}, + "unknown": {WantErr: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(arguments.ViewRaw, NewView(streams)) + + value := values[name] + outputs := map[string]*states.OutputValue{ + name: {Value: value}, + } + diags := v.Output(name, outputs) + + if diags.HasErrors() { + if !test.WantErr { + t.Fatalf("unexpected diagnostics: %s", diags) + } + } else if test.WantErr { + t.Fatalf("succeeded, but want error") + } + + if got, want := done(t).Stdout(), test.WantOutput; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} + +// Raw cannot render all outputs. +func TestOutputRaw_all(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(arguments.ViewRaw, NewView(streams)) + + outputs := map[string]*states.OutputValue{ + "foo": {Value: cty.StringVal("secret")}, + "bar": {Value: cty.True}, + } + diags := v.Output("", outputs) + + if got, want := done(t).Stdout(), ""; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + + if !diags.HasErrors() { + t.Fatalf("expected diagnostics, got %s", diags) + } + + if got, want := diags.Err().Error(), "Raw output format is only supported for single outputs"; got != want { + t.Errorf("unexpected diagnostics: %s", diags) + } +} + +// All outputs render an error if a specific output is requested which is +// missing from the map of outputs. +func TestOutput_missing(t *testing.T) { + testCases := map[string]arguments.ViewType{ + "human": arguments.ViewHuman, + "json": arguments.ViewJSON, + "raw": arguments.ViewRaw, + } + + for name, vt := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(vt, NewView(streams)) + + diags := v.Output("foo", map[string]*states.OutputValue{ + "bar": {Value: cty.StringVal("boop")}, + }) + + if len(diags) != 1 { + t.Fatalf("expected 1 diagnostic, got %d", len(diags)) + } + + if !diags.HasErrors() { + t.Fatalf("expected error diagnostics, got %s", diags) + } + + if got, want := diags[0].Description().Summary, `Output "foo" not found`; got != want { + t.Errorf("unexpected diagnostics: %s", diags) + } + + if got, want := done(t).Stdout(), ""; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} diff --git a/command/views/plan.go b/command/views/plan.go new file mode 100644 index 000000000000..56678f383375 --- /dev/null +++ b/command/views/plan.go @@ -0,0 +1,88 @@ +package views + +import ( + "fmt" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// The Plan view is used for the plan command. +type Plan interface { + Operation() Operation + Hooks() []terraform.Hook + + Diagnostics(diags tfdiags.Diagnostics) + HelpPrompt() +} + +// NewPlan returns an initialized Plan implementation for the given ViewType. +func NewPlan(vt arguments.ViewType, view *View) Plan { + switch vt { + case arguments.ViewJSON: + return &PlanJSON{ + view: NewJSONView(view), + } + case arguments.ViewHuman: + return &PlanHuman{ + view: view, + inAutomation: view.RunningInAutomation(), + } + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The PlanHuman implementation renders human-readable text logs, suitable for +// a scrolling terminal. +type PlanHuman struct { + view *View + + inAutomation bool +} + +var _ Plan = (*PlanHuman)(nil) + +func (v *PlanHuman) Operation() Operation { + return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) +} + +func (v *PlanHuman) Hooks() []terraform.Hook { + return []terraform.Hook{ + NewUiHook(v.view), + } +} + +func (v *PlanHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *PlanHuman) HelpPrompt() { + v.view.HelpPrompt("plan") +} + +// The PlanJSON implementation renders streaming JSON logs, suitable for +// integrating with other software. +type PlanJSON struct { + view *JSONView +} + +var _ Plan = (*PlanJSON)(nil) + +func (v *PlanJSON) Operation() Operation { + return &OperationJSON{view: v.view} +} + +func (v *PlanJSON) Hooks() []terraform.Hook { + return []terraform.Hook{ + newJSONHook(v.view), + } +} + +func (v *PlanJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *PlanJSON) HelpPrompt() { +} diff --git a/command/views/plan_test.go b/command/views/plan_test.go new file mode 100644 index 000000000000..1fc56ade258f --- /dev/null +++ b/command/views/plan_test.go @@ -0,0 +1,176 @@ +package views + +import ( + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" +) + +// Ensure that the correct view type and in-automation settings propagate to the +// Operation view. +func TestPlanHuman_operation(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewPlan(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)).Operation() + if hv, ok := v.(*OperationHuman); !ok { + t.Fatalf("unexpected return type %t", v) + } else if hv.inAutomation != true { + t.Fatalf("unexpected inAutomation value on Operation view") + } +} + +// Verify that Hooks includes a UI hook +func TestPlanHuman_hooks(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewPlan(arguments.ViewHuman, NewView(streams).SetRunningInAutomation((true))) + hooks := v.Hooks() + + var uiHook *UiHook + for _, hook := range hooks { + if ch, ok := hook.(*UiHook); ok { + uiHook = ch + } + } + if uiHook == nil { + t.Fatalf("expected Hooks to include a UiHook: %#v", hooks) + } +} + +// Helper functions to build a trivial test plan, to exercise the plan +// renderer. +func testPlan(t *testing.T) *plans.Plan { + t.Helper() + + plannedVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("bar"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + + changes := plans.NewChanges() + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: priorValRaw, + After: plannedValRaw, + }, + }) + + return &plans.Plan{ + Changes: changes, + } +} + +func testPlanWithDatasource(t *testing.T) *plans.Plan { + plan := testPlan(t) + + addr := addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_data_source", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + dataVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("C6743020-40BD-4591-81E6-CD08494341D3"), + "bar": cty.StringVal("foo"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(dataVal.Type()), dataVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(dataVal, dataVal.Type()) + if err != nil { + t.Fatal(err) + } + + plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Read, + Before: priorValRaw, + After: plannedValRaw, + }, + }) + + return plan +} + +func testSchemas() *terraform.Schemas { + provider := testProvider() + return &terraform.Schemas{ + Providers: map[addrs.Provider]*terraform.ProviderSchema{ + addrs.NewDefaultProvider("test"): provider.ProviderSchema(), + }, + } +} + +func testProvider() *terraform.MockProvider { + p := new(terraform.MockProvider) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + + p.GetProviderSchemaResponse = testProviderSchema() + + return p +} + +func testProviderSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{}, + }, + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Required: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} diff --git a/command/views/refresh.go b/command/views/refresh.go new file mode 100644 index 000000000000..39db5a3bf189 --- /dev/null +++ b/command/views/refresh.go @@ -0,0 +1,112 @@ +package views + +import ( + "fmt" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// The Refresh view is used for the refresh command. +type Refresh interface { + Outputs(outputValues map[string]*states.OutputValue) + + Operation() Operation + Hooks() []terraform.Hook + + Diagnostics(diags tfdiags.Diagnostics) + HelpPrompt() +} + +// NewRefresh returns an initialized Refresh implementation for the given ViewType. +func NewRefresh(vt arguments.ViewType, view *View) Refresh { + switch vt { + case arguments.ViewJSON: + return &RefreshJSON{ + view: NewJSONView(view), + } + case arguments.ViewHuman: + return &RefreshHuman{ + view: view, + inAutomation: view.RunningInAutomation(), + countHook: &countHook{}, + } + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The RefreshHuman implementation renders human-readable text logs, suitable for +// a scrolling terminal. +type RefreshHuman struct { + view *View + + inAutomation bool + + countHook *countHook +} + +var _ Refresh = (*RefreshHuman)(nil) + +func (v *RefreshHuman) Outputs(outputValues map[string]*states.OutputValue) { + if len(outputValues) > 0 { + v.view.streams.Print(v.view.colorize.Color("[reset][bold][green]\nOutputs:\n\n")) + NewOutput(arguments.ViewHuman, v.view).Output("", outputValues) + } +} + +func (v *RefreshHuman) Operation() Operation { + return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) +} + +func (v *RefreshHuman) Hooks() []terraform.Hook { + return []terraform.Hook{ + v.countHook, + NewUiHook(v.view), + } +} + +func (v *RefreshHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *RefreshHuman) HelpPrompt() { + v.view.HelpPrompt("refresh") +} + +// The RefreshJSON implementation renders streaming JSON logs, suitable for +// integrating with other software. +type RefreshJSON struct { + view *JSONView +} + +var _ Refresh = (*RefreshJSON)(nil) + +func (v *RefreshJSON) Outputs(outputValues map[string]*states.OutputValue) { + outputs, diags := json.OutputsFromMap(outputValues) + if diags.HasErrors() { + v.Diagnostics(diags) + } else { + v.view.Outputs(outputs) + } +} + +func (v *RefreshJSON) Operation() Operation { + return &OperationJSON{view: v.view} +} + +func (v *RefreshJSON) Hooks() []terraform.Hook { + return []terraform.Hook{ + newJSONHook(v.view), + } +} + +func (v *RefreshJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *RefreshJSON) HelpPrompt() { +} diff --git a/command/views/refresh_test.go b/command/views/refresh_test.go new file mode 100644 index 000000000000..fc907a69c54f --- /dev/null +++ b/command/views/refresh_test.go @@ -0,0 +1,107 @@ +package views + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terminal" + "github.com/zclconf/go-cty/cty" +) + +// Ensure that the correct view type and in-automation settings propagate to the +// Operation view. +func TestRefreshHuman_operation(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewRefresh(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)).Operation() + if hv, ok := v.(*OperationHuman); !ok { + t.Fatalf("unexpected return type %t", v) + } else if hv.inAutomation != true { + t.Fatalf("unexpected inAutomation value on Operation view") + } +} + +// Verify that Hooks includes a UI hook +func TestRefreshHuman_hooks(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewRefresh(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)) + hooks := v.Hooks() + + var uiHook *UiHook + for _, hook := range hooks { + if ch, ok := hook.(*UiHook); ok { + uiHook = ch + } + } + if uiHook == nil { + t.Fatalf("expected Hooks to include a UiHook: %#v", hooks) + } +} + +// Basic test coverage of Outputs, since most of its functionality is tested +// elsewhere. +func TestRefreshHuman_outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewRefresh(arguments.ViewHuman, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{ + "foo": {Value: cty.StringVal("secret")}, + }) + + got := done(t).Stdout() + for _, want := range []string{"Outputs:", `foo = "secret"`} { + if !strings.Contains(got, want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + } +} + +// Outputs should do nothing if there are no outputs to render. +func TestRefreshHuman_outputsEmpty(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewRefresh(arguments.ViewHuman, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{}) + + got := done(t).Stdout() + if got != "" { + t.Errorf("output should be empty, but got: %q", got) + } +} + +// Basic test coverage of Outputs, since most of its functionality is tested +// elsewhere. +func TestRefreshJSON_outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewRefresh(arguments.ViewJSON, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{ + "boop_count": {Value: cty.NumberIntVal(92)}, + "password": {Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), Sensitive: true}, + }) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Outputs: 2", + "@module": "terraform.ui", + "type": "outputs", + "outputs": map[string]interface{}{ + "boop_count": map[string]interface{}{ + "sensitive": false, + "value": float64(92), + "type": "number", + }, + "password": map[string]interface{}{ + "sensitive": true, + "type": "string", + }, + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} diff --git a/command/views/show.go b/command/views/show.go new file mode 100644 index 000000000000..2cc629b7b33f --- /dev/null +++ b/command/views/show.go @@ -0,0 +1,138 @@ +package views + +import ( + "fmt" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/jsonformat" + "github.com/hashicorp/terraform/command/jsonplan" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +type Show interface { + // Display renders the plan, if it is available. If plan is nil, it renders the statefile. + Display(config *configs.Config, plan *plans.Plan, stateFile *statefile.File, schemas *terraform.Schemas) int + + // Diagnostics renders early diagnostics, resulting from argument parsing. + Diagnostics(diags tfdiags.Diagnostics) +} + +func NewShow(vt arguments.ViewType, view *View) Show { + switch vt { + case arguments.ViewJSON: + return &ShowJSON{view: view} + case arguments.ViewHuman: + return &ShowHuman{view: view} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +type ShowHuman struct { + view *View +} + +var _ Show = (*ShowHuman)(nil) + +func (v *ShowHuman) Display(config *configs.Config, plan *plans.Plan, stateFile *statefile.File, schemas *terraform.Schemas) int { + renderer := jsonformat.Renderer{ + Colorize: v.view.colorize, + Streams: v.view.streams, + RunningInAutomation: v.view.runningInAutomation, + } + + if plan != nil { + outputs, changed, drift, attrs, err := jsonplan.MarshalForRenderer(plan, schemas) + if err != nil { + v.view.streams.Eprintf("Failed to marshal plan to json: %s", err) + return 1 + } + + jplan := jsonformat.Plan{ + PlanFormatVersion: jsonplan.FormatVersion, + ProviderFormatVersion: jsonprovider.FormatVersion, + OutputChanges: outputs, + ResourceChanges: changed, + ResourceDrift: drift, + ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), + RelevantAttributes: attrs, + } + + var opts []jsonformat.PlanRendererOpt + if !plan.CanApply() { + opts = append(opts, jsonformat.CanNotApply) + } + if plan.Errored { + opts = append(opts, jsonformat.Errored) + } + + renderer.RenderHumanPlan(jplan, plan.UIMode, opts...) + } else { + if stateFile == nil { + v.view.streams.Println("No state.") + return 0 + } + + root, outputs, err := jsonstate.MarshalForRenderer(stateFile, schemas) + if err != nil { + v.view.streams.Eprintf("Failed to marshal state to json: %s", err) + return 1 + } + + jstate := jsonformat.State{ + StateFormatVersion: jsonstate.FormatVersion, + ProviderFormatVersion: jsonprovider.FormatVersion, + RootModule: root, + RootModuleOutputs: outputs, + ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), + } + + renderer.RenderHumanState(jstate) + } + return 0 +} + +func (v *ShowHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +type ShowJSON struct { + view *View +} + +var _ Show = (*ShowJSON)(nil) + +func (v *ShowJSON) Display(config *configs.Config, plan *plans.Plan, stateFile *statefile.File, schemas *terraform.Schemas) int { + if plan != nil { + jsonPlan, err := jsonplan.Marshal(config, plan, stateFile, schemas) + + if err != nil { + v.view.streams.Eprintf("Failed to marshal plan to json: %s", err) + return 1 + } + v.view.streams.Println(string(jsonPlan)) + } else { + // It is possible that there is neither state nor a plan. + // That's ok, we'll just return an empty object. + jsonState, err := jsonstate.Marshal(stateFile, schemas) + if err != nil { + v.view.streams.Eprintf("Failed to marshal state to json: %s", err) + return 1 + } + v.view.streams.Println(string(jsonState)) + } + return 0 +} + +// Diagnostics should only be called if show cannot be executed. +// In this case, we choose to render human-readable diagnostic output, +// primarily for backwards compatibility. +func (v *ShowJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} diff --git a/command/views/show_test.go b/command/views/show_test.go new file mode 100644 index 000000000000..14028c84d3a7 --- /dev/null +++ b/command/views/show_test.go @@ -0,0 +1,184 @@ +package views + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/terraform" + + "github.com/zclconf/go-cty/cty" +) + +func TestShowHuman(t *testing.T) { + testCases := map[string]struct { + plan *plans.Plan + stateFile *statefile.File + schemas *terraform.Schemas + wantExact bool + wantString string + }{ + "plan file": { + testPlan(t), + nil, + testSchemas(), + false, + "# test_resource.foo will be created", + }, + "statefile": { + nil, + &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: testState(), + }, + testSchemas(), + false, + "# test_resource.foo:", + }, + "empty statefile": { + nil, + &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: states.NewState(), + }, + testSchemas(), + true, + "The state file is empty. No resources are represented.\n", + }, + "nothing": { + nil, + nil, + nil, + true, + "No state.\n", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: true}) + v := NewShow(arguments.ViewHuman, view) + + code := v.Display(nil, testCase.plan, testCase.stateFile, testCase.schemas) + if code != 0 { + t.Errorf("expected 0 return code, got %d", code) + } + + output := done(t) + got := output.Stdout() + want := testCase.wantString + if (testCase.wantExact && got != want) || (!testCase.wantExact && !strings.Contains(got, want)) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestShowJSON(t *testing.T) { + testCases := map[string]struct { + plan *plans.Plan + stateFile *statefile.File + }{ + "plan file": { + testPlan(t), + nil, + }, + "statefile": { + nil, + &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: testState(), + }, + }, + "empty statefile": { + nil, + &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: states.NewState(), + }, + }, + "nothing": { + nil, + nil, + }, + } + + config, _, configCleanup := initwd.MustLoadConfigForTests(t, "./testdata/show") + defer configCleanup() + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: true}) + v := NewShow(arguments.ViewJSON, view) + + schemas := &terraform.Schemas{ + Providers: map[addrs.Provider]*terraform.ProviderSchema{ + addrs.NewDefaultProvider("test"): { + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + } + + code := v.Display(config, testCase.plan, testCase.stateFile, schemas) + + if code != 0 { + t.Errorf("expected 0 return code, got %d", code) + } + + // Make sure the result looks like JSON; we comprehensively test + // the structure of this output in the command package tests. + var result map[string]interface{} + got := done(t).All() + t.Logf("output: %s", got) + if err := json.Unmarshal([]byte(got), &result); err != nil { + t.Fatal(err) + } + }) + } +} + +// testState returns a test State structure. +func testState() *states.State { + return states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + // DeepCopy is used here to ensure our synthetic state matches exactly + // with a state that will have been copied during the command + // operation, and all fields have been copied correctly. + }).DeepCopy() +} diff --git a/internal/command/views/state_locker.go b/command/views/state_locker.go similarity index 97% rename from internal/command/views/state_locker.go rename to command/views/state_locker.go index baa465f2933a..1c87600cf269 100644 --- a/internal/command/views/state_locker.go +++ b/command/views/state_locker.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform/internal/command/arguments" + "github.com/hashicorp/terraform/command/arguments" ) // The StateLocker view is used to display locking/unlocking status messages diff --git a/command/views/test.go b/command/views/test.go new file mode 100644 index 000000000000..f448bb968770 --- /dev/null +++ b/command/views/test.go @@ -0,0 +1,373 @@ +package views + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "sort" + "strings" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/moduletest" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/colorstring" +) + +// Test is the view interface for the "terraform test" command. +type Test interface { + // Results presents the given test results. + Results(map[string]*moduletest.Suite) tfdiags.Diagnostics + + // Diagnostics is for reporting warnings or errors that occurred with the + // mechanics of running tests. For this command in particular, some + // errors are considered to be test failures rather than mechanism failures, + // and so those will be reported via Results rather than via Diagnostics. + Diagnostics(tfdiags.Diagnostics) +} + +// NewTest returns an implementation of Test configured to respect the +// settings described in the given arguments. +func NewTest(base *View, args arguments.TestOutput) Test { + return &testHuman{ + streams: base.streams, + showDiagnostics: base.Diagnostics, + colorize: base.colorize, + junitXMLFile: args.JUnitXMLFile, + } +} + +type testHuman struct { + // This is the subset of functionality we need from the base view. + streams *terminal.Streams + showDiagnostics func(diags tfdiags.Diagnostics) + colorize *colorstring.Colorize + + // If junitXMLFile is not empty then results will be written to + // the given file path in addition to the usual output. + junitXMLFile string +} + +func (v *testHuman) Results(results map[string]*moduletest.Suite) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // FIXME: Due to how this prototype command evolved concurrently with + // establishing the idea of command views, the handling of JUnit output + // as part of the "human" view rather than as a separate view in its + // own right is a little odd and awkward. We should refactor this + // prior to making "terraform test" a real supported command to make + // it be structured more like the other commands that use the views + // package. + + v.humanResults(results) + + if v.junitXMLFile != "" { + moreDiags := v.junitXMLResults(results, v.junitXMLFile) + diags = diags.Append(moreDiags) + } + + return diags +} + +func (v *testHuman) Diagnostics(diags tfdiags.Diagnostics) { + if len(diags) == 0 { + return + } + v.showDiagnostics(diags) +} + +func (v *testHuman) humanResults(results map[string]*moduletest.Suite) { + failCount := 0 + width := v.streams.Stderr.Columns() + + suiteNames := make([]string, 0, len(results)) + for suiteName := range results { + suiteNames = append(suiteNames, suiteName) + } + sort.Strings(suiteNames) + for _, suiteName := range suiteNames { + suite := results[suiteName] + + componentNames := make([]string, 0, len(suite.Components)) + for componentName := range suite.Components { + componentNames = append(componentNames, componentName) + } + for _, componentName := range componentNames { + component := suite.Components[componentName] + + assertionNames := make([]string, 0, len(component.Assertions)) + for assertionName := range component.Assertions { + assertionNames = append(assertionNames, assertionName) + } + sort.Strings(assertionNames) + + for _, assertionName := range assertionNames { + assertion := component.Assertions[assertionName] + + fullName := fmt.Sprintf("%s.%s.%s", suiteName, componentName, assertionName) + if strings.HasPrefix(componentName, "(") { + // parenthesis-prefixed components are placeholders that + // the test harness generates to represent problems that + // prevented checking any assertions at all, so we'll + // just hide them and show the suite name. + fullName = suiteName + } + headingExtra := fmt.Sprintf("%s (%s)", fullName, assertion.Description) + + switch assertion.Outcome { + case moduletest.Failed: + // Failed means that the assertion was successfully + // excecuted but that the assertion condition didn't hold. + v.eprintRuleHeading("yellow", "Failed", headingExtra) + + case moduletest.Error: + // Error means that the system encountered an unexpected + // error when trying to evaluate the assertion. + v.eprintRuleHeading("red", "Error", headingExtra) + + default: + // We don't do anything for moduletest.Passed or + // moduletest.Skipped. Perhaps in future we'll offer a + // -verbose option to include information about those. + continue + } + failCount++ + + if len(assertion.Message) > 0 { + dispMsg := format.WordWrap(assertion.Message, width) + v.streams.Eprintln(dispMsg) + } + if len(assertion.Diagnostics) > 0 { + // We'll do our own writing of the diagnostics in this + // case, rather than using v.Diagnostics, because we + // specifically want all of these diagnostics to go to + // Stderr along with all of the other output we've + // generated. + for _, diag := range assertion.Diagnostics { + diagStr := format.Diagnostic(diag, nil, v.colorize, width) + v.streams.Eprint(diagStr) + } + } + } + } + } + + if failCount > 0 { + // If we've printed at least one failure then we'll have printed at + // least one horizontal rule across the terminal, and so we'll balance + // that with another horizontal rule. + if width > 1 { + rule := strings.Repeat("─", width-1) + v.streams.Eprintln(v.colorize.Color("[dark_gray]" + rule)) + } + } + + if failCount == 0 { + if len(results) > 0 { + // This is not actually an error, but it's convenient if all of our + // result output goes to the same stream for when this is running in + // automation that might be gathering this output via a pipe. + v.streams.Eprint(v.colorize.Color("[bold][green]Success![reset] All of the test assertions passed.\n\n")) + } else { + v.streams.Eprint(v.colorize.Color("[bold][yellow]No tests defined.[reset] This module doesn't have any test suites to run.\n\n")) + } + } + + // Try to flush any buffering that might be happening. (This isn't always + // successful, depending on what sort of fd Stderr is connected to.) + v.streams.Stderr.File.Sync() +} + +func (v *testHuman) junitXMLResults(results map[string]*moduletest.Suite, filename string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // "JUnit XML" is a file format that has become a de-facto standard for + // test reporting tools but that is not formally specified anywhere, and + // so each producer and consumer implementation unfortunately tends to + // differ in certain ways from others. + // With that in mind, this is a best effort sort of thing aimed at being + // broadly compatible with various consumers, but it's likely that + // some consumers will present these results better than others. + // This implementation is based mainly on the pseudo-specification of the + // format curated here, based on the Jenkins parser implementation: + // https://llg.cubic.org/docs/junit/ + + // An "Outcome" represents one of the various XML elements allowed inside + // a testcase element to indicate the test outcome. + type Outcome struct { + Message string `xml:"message,omitempty"` + } + + // TestCase represents an individual test case as part of a suite. Note + // that a JUnit XML incorporates both the "component" and "assertion" + // levels of our model: we pretend that component is a class name and + // assertion is a method name in order to match with the Java-flavored + // expectations of JUnit XML, which are hopefully close enough to get + // a test result rendering that's useful to humans. + type TestCase struct { + AssertionName string `xml:"name"` + ComponentName string `xml:"classname"` + + // These fields represent the different outcomes of a TestCase. Only one + // of these should be populated in each TestCase; this awkward + // structure is just to make this play nicely with encoding/xml's + // expecatations. + Skipped *Outcome `xml:"skipped,omitempty"` + Error *Outcome `xml:"error,omitempty"` + Failure *Outcome `xml:"failure,omitempty"` + + Stderr string `xml:"system-out,omitempty"` + } + + // TestSuite represents an individual test suite, of potentially many + // in a JUnit XML document. + type TestSuite struct { + Name string `xml:"name"` + TotalCount int `xml:"tests"` + SkippedCount int `xml:"skipped"` + ErrorCount int `xml:"errors"` + FailureCount int `xml:"failures"` + Cases []*TestCase `xml:"testcase"` + } + + // TestSuites represents the root element of the XML document. + type TestSuites struct { + XMLName struct{} `xml:"testsuites"` + ErrorCount int `xml:"errors"` + FailureCount int `xml:"failures"` + TotalCount int `xml:"tests"` + Suites []*TestSuite `xml:"testsuite"` + } + + xmlSuites := TestSuites{} + suiteNames := make([]string, 0, len(results)) + for suiteName := range results { + suiteNames = append(suiteNames, suiteName) + } + sort.Strings(suiteNames) + for _, suiteName := range suiteNames { + suite := results[suiteName] + + xmlSuite := &TestSuite{ + Name: suiteName, + } + xmlSuites.Suites = append(xmlSuites.Suites, xmlSuite) + + componentNames := make([]string, 0, len(suite.Components)) + for componentName := range suite.Components { + componentNames = append(componentNames, componentName) + } + for _, componentName := range componentNames { + component := suite.Components[componentName] + + assertionNames := make([]string, 0, len(component.Assertions)) + for assertionName := range component.Assertions { + assertionNames = append(assertionNames, assertionName) + } + sort.Strings(assertionNames) + + for _, assertionName := range assertionNames { + assertion := component.Assertions[assertionName] + xmlSuites.TotalCount++ + xmlSuite.TotalCount++ + + xmlCase := &TestCase{ + ComponentName: componentName, + AssertionName: assertionName, + } + xmlSuite.Cases = append(xmlSuite.Cases, xmlCase) + + switch assertion.Outcome { + case moduletest.Pending: + // We represent "pending" cases -- cases blocked by + // upstream errors -- as if they were "skipped" in JUnit + // terms, because we didn't actually check them and so + // can't say whether they succeeded or not. + xmlSuite.SkippedCount++ + xmlCase.Skipped = &Outcome{ + Message: assertion.Message, + } + case moduletest.Failed: + xmlSuites.FailureCount++ + xmlSuite.FailureCount++ + xmlCase.Failure = &Outcome{ + Message: assertion.Message, + } + case moduletest.Error: + xmlSuites.ErrorCount++ + xmlSuite.ErrorCount++ + xmlCase.Error = &Outcome{ + Message: assertion.Message, + } + + // We'll also include the diagnostics in the "stderr" + // portion of the output, so they'll hopefully be visible + // in a test log viewer in JUnit-XML-Consuming CI systems. + var buf strings.Builder + for _, diag := range assertion.Diagnostics { + diagStr := format.DiagnosticPlain(diag, nil, 68) + buf.WriteString(diagStr) + } + xmlCase.Stderr = buf.String() + } + + } + } + } + + xmlOut, err := xml.MarshalIndent(&xmlSuites, "", " ") + if err != nil { + // If marshalling fails then that's a bug in the code above, + // because we should always be producing a value that is + // accepted by encoding/xml. + panic(fmt.Sprintf("invalid values to marshal as JUnit XML: %s", err)) + } + + err = ioutil.WriteFile(filename, xmlOut, 0644) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write JUnit XML file", + fmt.Sprintf( + "Could not create %s to record the test results in JUnit XML format: %s.", + filename, + err, + ), + )) + } + + return diags +} + +func (v *testHuman) eprintRuleHeading(color, prefix, extra string) { + const lineCell string = "─" + textLen := len(prefix) + len(": ") + len(extra) + spacingLen := 2 + leftLineLen := 3 + + rightLineLen := 0 + width := v.streams.Stderr.Columns() + if (textLen + spacingLen + leftLineLen) < (width - 1) { + // (we allow an extra column at the end because some terminals can't + // print in the final column without wrapping to the next line) + rightLineLen = width - (textLen + spacingLen + leftLineLen) - 1 + } + + colorCode := "[" + color + "]" + + // We'll prepare what we're going to print in memory first, so that we can + // send it all to stderr in one write in case other programs are also + // concurrently trying to write to the terminal for some reason. + var buf strings.Builder + buf.WriteString(v.colorize.Color(colorCode + strings.Repeat(lineCell, leftLineLen))) + buf.WriteByte(' ') + buf.WriteString(v.colorize.Color("[bold]" + colorCode + prefix + ":")) + buf.WriteByte(' ') + buf.WriteString(extra) + if rightLineLen > 0 { + buf.WriteByte(' ') + buf.WriteString(v.colorize.Color(colorCode + strings.Repeat(lineCell, rightLineLen))) + } + v.streams.Eprintln(buf.String()) +} diff --git a/command/views/test_test.go b/command/views/test_test.go new file mode 100644 index 000000000000..144b3c3d24ae --- /dev/null +++ b/command/views/test_test.go @@ -0,0 +1,32 @@ +package views + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/moduletest" + "github.com/hashicorp/terraform/terminal" +) + +func TestTest(t *testing.T) { + streams, close := terminal.StreamsForTesting(t) + baseView := NewView(streams) + view := NewTest(baseView, arguments.TestOutput{ + JUnitXMLFile: "", + }) + + results := map[string]*moduletest.Suite{} + view.Results(results) + + output := close(t) + gotOutput := strings.TrimSpace(output.All()) + wantOutput := `No tests defined. This module doesn't have any test suites to run.` + if gotOutput != wantOutput { + t.Errorf("wrong output\ngot:\n%s\nwant:\n%s", gotOutput, wantOutput) + } + + // TODO: Test more at this layer. For now, the main UI output tests for + // the "terraform test" command are in the command package as part of + // the overall command tests. +} diff --git a/internal/command/views/testdata/show/main.tf b/command/views/testdata/show/main.tf similarity index 100% rename from internal/command/views/testdata/show/main.tf rename to command/views/testdata/show/main.tf diff --git a/command/views/validate.go b/command/views/validate.go new file mode 100644 index 000000000000..b04180f3347b --- /dev/null +++ b/command/views/validate.go @@ -0,0 +1,138 @@ +package views + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + viewsjson "github.com/hashicorp/terraform/command/views/json" + "github.com/hashicorp/terraform/tfdiags" +) + +// The Validate is used for the validate command. +type Validate interface { + // Results renders the diagnostics returned from a validation walk, and + // returns a CLI exit code: 0 if there are no errors, 1 otherwise + Results(diags tfdiags.Diagnostics) int + + // Diagnostics renders early diagnostics, resulting from argument parsing. + Diagnostics(diags tfdiags.Diagnostics) +} + +// NewValidate returns an initialized Validate implementation for the given ViewType. +func NewValidate(vt arguments.ViewType, view *View) Validate { + switch vt { + case arguments.ViewJSON: + return &ValidateJSON{view: view} + case arguments.ViewHuman: + return &ValidateHuman{view: view} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The ValidateHuman implementation renders diagnostics in a human-readable form, +// along with a success/failure message if Terraform is able to execute the +// validation walk. +type ValidateHuman struct { + view *View +} + +var _ Validate = (*ValidateHuman)(nil) + +func (v *ValidateHuman) Results(diags tfdiags.Diagnostics) int { + columns := v.view.outputColumns() + + if len(diags) == 0 { + v.view.streams.Println(format.WordWrap(v.view.colorize.Color(validateSuccess), columns)) + } else { + v.Diagnostics(diags) + + if !diags.HasErrors() { + v.view.streams.Println(format.WordWrap(v.view.colorize.Color(validateWarnings), columns)) + } + } + + if diags.HasErrors() { + return 1 + } + return 0 +} + +const validateSuccess = "[green][bold]Success![reset] The configuration is valid.\n" + +const validateWarnings = "[green][bold]Success![reset] The configuration is valid, but there were some validation warnings as shown above.\n" + +func (v *ValidateHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +// The ValidateJSON implementation renders validation results as a JSON object. +// This object includes top-level fields summarizing the result, and an array +// of JSON diagnostic objects. +type ValidateJSON struct { + view *View +} + +var _ Validate = (*ValidateJSON)(nil) + +func (v *ValidateJSON) Results(diags tfdiags.Diagnostics) int { + // FormatVersion represents the version of the json format and will be + // incremented for any change to this format that requires changes to a + // consuming parser. + const FormatVersion = "1.0" + + type Output struct { + FormatVersion string `json:"format_version"` + + // We include some summary information that is actually redundant + // with the detailed diagnostics, but avoids the need for callers + // to re-implement our logic for deciding these. + Valid bool `json:"valid"` + ErrorCount int `json:"error_count"` + WarningCount int `json:"warning_count"` + Diagnostics []*viewsjson.Diagnostic `json:"diagnostics"` + } + + output := Output{ + FormatVersion: FormatVersion, + Valid: true, // until proven otherwise + } + configSources := v.view.configSources() + for _, diag := range diags { + output.Diagnostics = append(output.Diagnostics, viewsjson.NewDiagnostic(diag, configSources)) + + switch diag.Severity() { + case tfdiags.Error: + output.ErrorCount++ + output.Valid = false + case tfdiags.Warning: + output.WarningCount++ + } + } + if output.Diagnostics == nil { + // Make sure this always appears as an array in our output, since + // this is easier to consume for dynamically-typed languages. + output.Diagnostics = []*viewsjson.Diagnostic{} + } + + j, err := json.MarshalIndent(&output, "", " ") + if err != nil { + // Should never happen because we fully-control the input here + panic(err) + } + v.view.streams.Println(string(j)) + + if diags.HasErrors() { + return 1 + } + return 0 +} + +// Diagnostics should only be called if the validation walk cannot be executed. +// In this case, we choose to render human-readable diagnostic output, +// primarily for backwards compatibility. +func (v *ValidateJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} diff --git a/command/views/validate_test.go b/command/views/validate_test.go new file mode 100644 index 000000000000..44744be7268a --- /dev/null +++ b/command/views/validate_test.go @@ -0,0 +1,133 @@ +package views + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestValidateHuman(t *testing.T) { + testCases := map[string]struct { + diag tfdiags.Diagnostic + wantSuccess bool + wantSubstring string + }{ + "success": { + nil, + true, + "The configuration is valid.", + }, + "warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "Your shoelaces are untied", + "Watch out, or you'll trip!", + ), + true, + "The configuration is valid, but there were some validation warnings", + }, + "error": { + tfdiags.Sourceless( + tfdiags.Error, + "Configuration is missing random_pet", + "Every configuration should have a random_pet.", + ), + false, + "Error: Configuration is missing random_pet", + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: true}) + v := NewValidate(arguments.ViewHuman, view) + + var diags tfdiags.Diagnostics + + if tc.diag != nil { + diags = diags.Append(tc.diag) + } + + ret := v.Results(diags) + + if tc.wantSuccess && ret != 0 { + t.Errorf("expected 0 return code, got %d", ret) + } else if !tc.wantSuccess && ret != 1 { + t.Errorf("expected 1 return code, got %d", ret) + } + + got := done(t).All() + if strings.Contains(got, "Success!") != tc.wantSuccess { + t.Errorf("unexpected output:\n%s", got) + } + if !strings.Contains(got, tc.wantSubstring) { + t.Errorf("expected output to include %q, but was:\n%s", tc.wantSubstring, got) + } + }) + } +} + +func TestValidateJSON(t *testing.T) { + testCases := map[string]struct { + diag tfdiags.Diagnostic + wantSuccess bool + }{ + "success": { + nil, + true, + }, + "warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "Your shoelaces are untied", + "Watch out, or you'll trip!", + ), + true, + }, + "error": { + tfdiags.Sourceless( + tfdiags.Error, + "Configuration is missing random_pet", + "Every configuration should have a random_pet.", + ), + false, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: true}) + v := NewValidate(arguments.ViewJSON, view) + + var diags tfdiags.Diagnostics + + if tc.diag != nil { + diags = diags.Append(tc.diag) + } + + ret := v.Results(diags) + + if tc.wantSuccess && ret != 0 { + t.Errorf("expected 0 return code, got %d", ret) + } else if !tc.wantSuccess && ret != 1 { + t.Errorf("expected 1 return code, got %d", ret) + } + + got := done(t).All() + + // Make sure the result looks like JSON; we comprehensively test + // the structure of this output in the command package tests. + var result map[string]interface{} + + if err := json.Unmarshal([]byte(got), &result); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/internal/command/views/view.go b/command/views/view.go similarity index 95% rename from internal/command/views/view.go rename to command/views/view.go index 206ead7fd5c1..c70eb208da78 100644 --- a/internal/command/views/view.go +++ b/command/views/view.go @@ -1,10 +1,10 @@ package views import ( - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/terminal" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/colorstring" ) diff --git a/command/webbrowser/mock.go b/command/webbrowser/mock.go new file mode 100644 index 000000000000..ef411ba1e579 --- /dev/null +++ b/command/webbrowser/mock.go @@ -0,0 +1,155 @@ +package webbrowser + +import ( + "context" + "fmt" + "log" + "net/http" + "net/url" + "sync" + + "github.com/hashicorp/terraform/httpclient" +) + +// NewMockLauncher creates and returns a mock implementation of Launcher, +// with some special behavior designed for use in unit tests. +// +// See the documentation of MockLauncher itself for more information. +func NewMockLauncher(ctx context.Context) *MockLauncher { + client := httpclient.New() + return &MockLauncher{ + Client: client, + Context: ctx, + } +} + +// MockLauncher is a mock implementation of Launcher that has some special +// behavior designed for use in unit tests. +// +// When OpenURL is called, MockLauncher will make an HTTP request to the given +// URL rather than interacting with a "real" browser. +// +// In normal situations it will then return with no further action, but if +// the response to the given URL is either a standard HTTP redirect response +// or includes the custom HTTP header X-Redirect-To then MockLauncher will +// send a follow-up request to that target URL, and continue in this manner +// until it reaches a URL that is not a redirect. (The X-Redirect-To header +// is there so that a server can potentially offer a normal HTML page to +// an actual browser while also giving a next-hop hint for MockLauncher.) +// +// Since MockLauncher is not a full programmable user-agent implementation +// it can't be used for testing of real-world web applications, but it can +// be used for testing against specialized test servers that are written +// with MockLauncher in mind and know how to drive the request flow through +// whatever steps are required to complete the desired test. +// +// All of the actions taken by MockLauncher happen asynchronously in the +// background, to simulate the concurrency of a separate web browser. +// Test code using MockLauncher should provide a context which is cancelled +// when the test completes, to help avoid leaking MockLaunchers. +type MockLauncher struct { + // Client is the HTTP client that MockLauncher will use to make requests. + // By default (if you use NewMockLauncher) this is a new client created + // via httpclient.New, but callers may override it if they need customized + // behavior for a particular test. + // + // Do not use a client that is shared with any other subsystem, because + // MockLauncher will customize the settings of the given client. + Client *http.Client + + // Context can be cancelled in order to abort an OpenURL call before it + // would naturally complete. + Context context.Context + + // Responses is a log of all of the responses recieved from the launcher's + // requests, in the order requested. + Responses []*http.Response + + // done is a waitgroup used internally to signal when the async work is + // complete, in order to make this mock more convenient to use in tests. + done sync.WaitGroup +} + +var _ Launcher = (*MockLauncher)(nil) + +// OpenURL is the mock implementation of Launcher, which has the special +// behavior described for type MockLauncher. +func (l *MockLauncher) OpenURL(u string) error { + // We run our operation in the background because it's supposed to be + // behaving like a web browser running in a separate process. + log.Printf("[TRACE] webbrowser.MockLauncher: OpenURL(%q) starting in the background", u) + l.done.Add(1) + go func() { + err := l.openURL(u) + if err != nil { + // Can't really do anything with this asynchronously, so we'll + // just log it so that someone debugging will be able to see it. + log.Printf("[ERROR] webbrowser.MockLauncher: OpenURL(%q): %s", u, err) + } else { + log.Printf("[TRACE] webbrowser.MockLauncher: OpenURL(%q) has concluded", u) + } + l.done.Done() + }() + return nil +} + +func (l *MockLauncher) openURL(u string) error { + // We need to disable automatic redirect following so that we can implement + // it ourselves below, and thus be able to see the redirects in our + // responses log. + l.Client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + + // We'll keep looping as long as the server keeps giving us new URLs to + // request. + for u != "" { + log.Printf("[DEBUG] webbrowser.MockLauncher: requesting %s", u) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return fmt.Errorf("failed to construct HTTP request for %s: %s", u, err) + } + resp, err := l.Client.Do(req) + if err != nil { + log.Printf("[DEBUG] webbrowser.MockLauncher: request failed: %s", err) + return fmt.Errorf("error requesting %s: %s", u, err) + } + l.Responses = append(l.Responses, resp) + if resp.StatusCode >= 400 { + log.Printf("[DEBUG] webbrowser.MockLauncher: request failed: %s", resp.Status) + return fmt.Errorf("error requesting %s: %s", u, resp.Status) + } + log.Printf("[DEBUG] webbrowser.MockLauncher: request succeeded: %s", resp.Status) + + u = "" // unless it's a redirect, we'll stop after this + if location := resp.Header.Get("Location"); location != "" { + u = location + } else if redirectTo := resp.Header.Get("X-Redirect-To"); redirectTo != "" { + u = redirectTo + } + + if u != "" { + // HTTP technically doesn't permit relative URLs in Location, but + // browsers tolerate it and so real-world servers do it, and thus + // we'll allow it here too. + oldURL := resp.Request.URL + givenURL, err := url.Parse(u) + if err != nil { + return fmt.Errorf("invalid redirect URL %s: %s", u, err) + } + u = oldURL.ResolveReference(givenURL).String() + log.Printf("[DEBUG] webbrowser.MockLauncher: redirected to %s", u) + } + } + + log.Printf("[DEBUG] webbrowser.MockLauncher: all done") + return nil +} + +// Wait blocks until the MockLauncher has finished its asynchronous work of +// making HTTP requests and following redirects, at which point it will have +// reached a request that didn't redirect anywhere and stopped iterating. +func (l *MockLauncher) Wait() { + log.Printf("[TRACE] webbrowser.MockLauncher: Wait() for current work to complete") + l.done.Wait() +} diff --git a/internal/command/webbrowser/mock_test.go b/command/webbrowser/mock_test.go similarity index 100% rename from internal/command/webbrowser/mock_test.go rename to command/webbrowser/mock_test.go diff --git a/internal/command/webbrowser/native.go b/command/webbrowser/native.go similarity index 100% rename from internal/command/webbrowser/native.go rename to command/webbrowser/native.go diff --git a/internal/command/webbrowser/webbrowser.go b/command/webbrowser/webbrowser.go similarity index 100% rename from internal/command/webbrowser/webbrowser.go rename to command/webbrowser/webbrowser.go diff --git a/internal/command/workdir/dir.go b/command/workdir/dir.go similarity index 100% rename from internal/command/workdir/dir.go rename to command/workdir/dir.go diff --git a/internal/command/workdir/doc.go b/command/workdir/doc.go similarity index 100% rename from internal/command/workdir/doc.go rename to command/workdir/doc.go diff --git a/internal/command/workdir/normalize_path.go b/command/workdir/normalize_path.go similarity index 100% rename from internal/command/workdir/normalize_path.go rename to command/workdir/normalize_path.go diff --git a/internal/command/workdir/plugin_dirs.go b/command/workdir/plugin_dirs.go similarity index 100% rename from internal/command/workdir/plugin_dirs.go rename to command/workdir/plugin_dirs.go diff --git a/internal/command/workdir/plugin_dirs_test.go b/command/workdir/plugin_dirs_test.go similarity index 100% rename from internal/command/workdir/plugin_dirs_test.go rename to command/workdir/plugin_dirs_test.go diff --git a/internal/command/workspace_command.go b/command/workspace_command.go similarity index 100% rename from internal/command/workspace_command.go rename to command/workspace_command.go diff --git a/internal/command/workspace_command_test.go b/command/workspace_command_test.go similarity index 96% rename from internal/command/workspace_command_test.go rename to command/workspace_command_test.go index 7d2052e111d7..9f11f6d21a8e 100644 --- a/internal/command/workspace_command_test.go +++ b/command/workspace_command_test.go @@ -7,15 +7,15 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/backend/local" - "github.com/hashicorp/terraform/internal/backend/remote-state/inmem" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/backend/local" + "github.com/hashicorp/terraform/backend/remote-state/inmem" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" "github.com/mitchellh/cli" - legacy "github.com/hashicorp/terraform/internal/legacy/terraform" + legacy "github.com/hashicorp/terraform/legacy/terraform" ) func TestWorkspace_createAndChange(t *testing.T) { diff --git a/internal/command/workspace_delete.go b/command/workspace_delete.go similarity index 95% rename from internal/command/workspace_delete.go rename to command/workspace_delete.go index f59852d5d7c2..afe265d326ce 100644 --- a/internal/command/workspace_delete.go +++ b/command/workspace_delete.go @@ -5,11 +5,11 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/internal/command/workspace_list.go b/command/workspace_list.go similarity index 97% rename from internal/command/workspace_list.go rename to command/workspace_list.go index 7b43bc34627d..40f7d426d719 100644 --- a/internal/command/workspace_list.go +++ b/command/workspace_list.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" "github.com/posener/complete" ) diff --git a/internal/command/workspace_new.go b/command/workspace_new.go similarity index 94% rename from internal/command/workspace_new.go rename to command/workspace_new.go index 38985622079f..de254c1f5c1d 100644 --- a/internal/command/workspace_new.go +++ b/command/workspace_new.go @@ -6,11 +6,11 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/internal/command/workspace_select.go b/command/workspace_select.go similarity index 98% rename from internal/command/workspace_select.go rename to command/workspace_select.go index 7dd7fc7e6d9a..791e01ade39f 100644 --- a/internal/command/workspace_select.go +++ b/command/workspace_select.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/internal/command/workspace_show.go b/command/workspace_show.go similarity index 100% rename from internal/command/workspace_show.go rename to command/workspace_show.go diff --git a/commands.go b/commands.go index aadfc51b3114..cb18b95d4a1f 100644 --- a/commands.go +++ b/commands.go @@ -10,14 +10,14 @@ import ( svchost "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/auth" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command" - "github.com/hashicorp/terraform/internal/command/cliconfig" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/command/webbrowser" - "github.com/hashicorp/terraform/internal/getproviders" - pluginDiscovery "github.com/hashicorp/terraform/internal/plugin/discovery" - "github.com/hashicorp/terraform/internal/terminal" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command" + "github.com/hashicorp/terraform/command/cliconfig" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/command/webbrowser" + "github.com/hashicorp/terraform/getproviders" + pluginDiscovery "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/terminal" ) // runningInAutomationEnvName gives the name of an environment variable that diff --git a/communicator/communicator.go b/communicator/communicator.go new file mode 100644 index 000000000000..2908da18a0c7 --- /dev/null +++ b/communicator/communicator.go @@ -0,0 +1,170 @@ +package communicator + +import ( + "context" + "fmt" + "io" + "log" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/communicator/shared" + "github.com/hashicorp/terraform/communicator/ssh" + "github.com/hashicorp/terraform/communicator/winrm" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" +) + +// Communicator is an interface that must be implemented by all communicators +// used for any of the provisioners +type Communicator interface { + // Connect is used to set up the connection + Connect(provisioners.UIOutput) error + + // Disconnect is used to terminate the connection + Disconnect() error + + // Timeout returns the configured connection timeout + Timeout() time.Duration + + // ScriptPath returns the configured script path + ScriptPath() string + + // Start executes a remote command in a new session + Start(*remote.Cmd) error + + // Upload is used to upload a single file + Upload(string, io.Reader) error + + // UploadScript is used to upload a file as an executable script + UploadScript(string, io.Reader) error + + // UploadDir is used to upload a directory + UploadDir(string, string) error +} + +// New returns a configured Communicator or an error if the connection type is not supported +func New(v cty.Value) (Communicator, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) + if err != nil { + return nil, err + } + + typeVal := v.GetAttr("type") + connType := "" + if !typeVal.IsNull() { + connType = typeVal.AsString() + } + + switch connType { + case "ssh", "": // The default connection type is ssh, so if connType is empty use ssh + return ssh.New(v) + case "winrm": + return winrm.New(v) + default: + return nil, fmt.Errorf("connection type '%s' not supported", connType) + } +} + +// maxBackoffDelay is the maximum delay between retry attempts +var maxBackoffDelay = 20 * time.Second +var initialBackoffDelay = time.Second + +// in practice we want to abort the retry asap, but for tests we need to +// synchronize the return. +var retryTestWg *sync.WaitGroup + +// Fatal is an interface that error values can return to halt Retry +type Fatal interface { + FatalError() error +} + +// Retry retries the function f until it returns a nil error, a Fatal error, or +// the context expires. +func Retry(ctx context.Context, f func() error) error { + // container for atomic error value + type errWrap struct { + E error + } + + // Try the function in a goroutine + var errVal atomic.Value + doneCh := make(chan struct{}) + go func() { + if retryTestWg != nil { + defer retryTestWg.Done() + } + + defer close(doneCh) + + delay := time.Duration(0) + for { + // If our context ended, we want to exit right away. + select { + case <-ctx.Done(): + return + case <-time.After(delay): + } + + // Try the function call + err := f() + + // return if we have no error, or a FatalError + done := false + switch e := err.(type) { + case nil: + done = true + case Fatal: + err = e.FatalError() + done = true + } + + errVal.Store(errWrap{err}) + + if done { + return + } + + log.Printf("[WARN] retryable error: %v", err) + + delay *= 2 + + if delay == 0 { + delay = initialBackoffDelay + } + + if delay > maxBackoffDelay { + delay = maxBackoffDelay + } + + log.Printf("[INFO] sleeping for %s", delay) + } + }() + + // Wait for completion + select { + case <-ctx.Done(): + case <-doneCh: + } + + var lastErr error + // Check if we got an error executing + if ev, ok := errVal.Load().(errWrap); ok { + lastErr = ev.E + } + + // Check if we have a context error to check if we're interrupted or timeout + switch ctx.Err() { + case context.Canceled: + return fmt.Errorf("interrupted - last error: %v", lastErr) + case context.DeadlineExceeded: + return fmt.Errorf("timeout - last error: %v", lastErr) + } + + if lastErr != nil { + return lastErr + } + return nil +} diff --git a/internal/communicator/communicator_mock.go b/communicator/communicator_mock.go similarity index 95% rename from internal/communicator/communicator_mock.go rename to communicator/communicator_mock.go index b60edec19721..b619560c0cab 100644 --- a/internal/communicator/communicator_mock.go +++ b/communicator/communicator_mock.go @@ -7,8 +7,8 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/internal/communicator/remote" - "github.com/hashicorp/terraform/internal/provisioners" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/provisioners" ) // MockCommunicator is an implementation of Communicator that can be used for tests. diff --git a/internal/communicator/communicator_test.go b/communicator/communicator_test.go similarity index 100% rename from internal/communicator/communicator_test.go rename to communicator/communicator_test.go diff --git a/internal/communicator/remote/command.go b/communicator/remote/command.go similarity index 100% rename from internal/communicator/remote/command.go rename to communicator/remote/command.go diff --git a/internal/communicator/remote/command_test.go b/communicator/remote/command_test.go similarity index 100% rename from internal/communicator/remote/command_test.go rename to communicator/remote/command_test.go diff --git a/internal/communicator/shared/shared.go b/communicator/shared/shared.go similarity index 98% rename from internal/communicator/shared/shared.go rename to communicator/shared/shared.go index 5990807a7809..a9bb22b45cb2 100644 --- a/internal/communicator/shared/shared.go +++ b/communicator/shared/shared.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/communicator/shared/shared_test.go b/communicator/shared/shared_test.go similarity index 100% rename from internal/communicator/shared/shared_test.go rename to communicator/shared/shared_test.go diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go new file mode 100644 index 000000000000..c1bd364aa9fd --- /dev/null +++ b/communicator/ssh/communicator.go @@ -0,0 +1,896 @@ +package ssh + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/apparentlymart/go-shquot/shquot" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + _ "github.com/hashicorp/terraform/logging" +) + +const ( + // DefaultShebang is added at the top of a SSH script file + DefaultShebang = "#!/bin/sh\n" +) + +var ( + // randShared is a global random generator object that is shared. This must be + // shared since it is seeded by the current time and creating multiple can + // result in the same values. By using a shared RNG we assure different numbers + // per call. + randLock sync.Mutex + randShared *rand.Rand + + // enable ssh keeplive probes by default + keepAliveInterval = 2 * time.Second + + // max time to wait for for a KeepAlive response before considering the + // connection to be dead. + maxKeepAliveDelay = 120 * time.Second +) + +// Communicator represents the SSH communicator +type Communicator struct { + connInfo *connectionInfo + client *ssh.Client + config *sshConfig + conn net.Conn + cancelKeepAlive context.CancelFunc + + lock sync.Mutex +} + +type sshConfig struct { + // The configuration of the Go SSH connection + config *ssh.ClientConfig + + // connection returns a new connection. The current connection + // in use will be closed as part of the Close method, or in the + // case an error occurs. + connection func() (net.Conn, error) + + // noPty, if true, will not request a pty from the remote end. + noPty bool + + // sshAgent is a struct surrounding the agent.Agent client and the net.Conn + // to the SSH Agent. It is nil if no SSH agent is configured + sshAgent *sshAgent +} + +type fatalError struct { + error +} + +func (e fatalError) FatalError() error { + return e.error +} + +// New creates a new communicator implementation over SSH. +func New(v cty.Value) (*Communicator, error) { + connInfo, err := parseConnectionInfo(v) + if err != nil { + return nil, err + } + + config, err := prepareSSHConfig(connInfo) + if err != nil { + return nil, err + } + + // Set up the random number generator once. The seed value is the + // time multiplied by the PID. This can overflow the int64 but that + // is okay. We multiply by the PID in case we have multiple processes + // grabbing this at the same time. This is possible with Terraform and + // if we communicate to the same host at the same instance, we could + // overwrite the same files. Multiplying by the PID prevents this. + randLock.Lock() + defer randLock.Unlock() + if randShared == nil { + randShared = rand.New(rand.NewSource( + time.Now().UnixNano() * int64(os.Getpid()))) + } + + comm := &Communicator{ + connInfo: connInfo, + config: config, + } + + return comm, nil +} + +// Connect implementation of communicator.Communicator interface +func (c *Communicator) Connect(o provisioners.UIOutput) (err error) { + // Grab a lock so we can modify our internal attributes + c.lock.Lock() + defer c.lock.Unlock() + + if c.conn != nil { + c.conn.Close() + } + + // Set the conn and client to nil since we'll recreate it + c.conn = nil + c.client = nil + + if o != nil { + o.Output(fmt.Sprintf( + "Connecting to remote host via SSH...\n"+ + " Host: %s\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " Private key: %t\n"+ + " Certificate: %t\n"+ + " SSH Agent: %t\n"+ + " Checking Host Key: %t\n"+ + " Target Platform: %s\n", + c.connInfo.Host, c.connInfo.User, + c.connInfo.Password != "", + c.connInfo.PrivateKey != "", + c.connInfo.Certificate != "", + c.connInfo.Agent, + c.connInfo.HostKey != "", + c.connInfo.TargetPlatform, + )) + + if c.connInfo.BastionHost != "" { + o.Output(fmt.Sprintf( + "Using configured bastion host...\n"+ + " Host: %s\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " Private key: %t\n"+ + " Certificate: %t\n"+ + " SSH Agent: %t\n"+ + " Checking Host Key: %t", + c.connInfo.BastionHost, c.connInfo.BastionUser, + c.connInfo.BastionPassword != "", + c.connInfo.BastionPrivateKey != "", + c.connInfo.BastionCertificate != "", + c.connInfo.Agent, + c.connInfo.BastionHostKey != "", + )) + } + + if c.connInfo.ProxyHost != "" { + o.Output(fmt.Sprintf( + "Using configured proxy host...\n"+ + " ProxyHost: %s\n"+ + " ProxyPort: %d\n"+ + " ProxyUserName: %s\n"+ + " ProxyUserPassword: %t", + c.connInfo.ProxyHost, + c.connInfo.ProxyPort, + c.connInfo.ProxyUserName, + c.connInfo.ProxyUserPassword != "", + )) + } + } + + hostAndPort := fmt.Sprintf("%s:%d", c.connInfo.Host, c.connInfo.Port) + log.Printf("[DEBUG] Connecting to %s for SSH", hostAndPort) + c.conn, err = c.config.connection() + if err != nil { + // Explicitly set this to the REAL nil. Connection() can return + // a nil implementation of net.Conn which will make the + // "if c.conn == nil" check fail above. Read here for more information + // on this psychotic language feature: + // + // http://golang.org/doc/faq#nil_error + c.conn = nil + + log.Printf("[ERROR] connection error: %s", err) + return err + } + + log.Printf("[DEBUG] Connection established. Handshaking for user %v", c.connInfo.User) + sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, hostAndPort, c.config.config) + if err != nil { + err = fmt.Errorf("SSH authentication failed (%s@%s): %w", c.connInfo.User, hostAndPort, err) + + // While in theory this should be a fatal error, some hosts may start + // the ssh service before it is properly configured, or before user + // authentication data is available. + // Log the error, and allow the provisioner to retry. + log.Printf("[WARN] %s", err) + return err + } + + c.client = ssh.NewClient(sshConn, sshChan, req) + + if c.config.sshAgent != nil { + log.Printf("[DEBUG] Telling SSH config to forward to agent") + if err := c.config.sshAgent.ForwardToAgent(c.client); err != nil { + return fatalError{err} + } + + log.Printf("[DEBUG] Setting up a session to request agent forwarding") + session, err := c.client.NewSession() + if err != nil { + return err + } + defer session.Close() + + err = agent.RequestAgentForwarding(session) + + if err == nil { + log.Printf("[INFO] agent forwarding enabled") + } else { + log.Printf("[WARN] error forwarding agent: %s", err) + } + } + + if err != nil { + return err + } + + if o != nil { + o.Output("Connected!") + } + + ctx, cancelKeepAlive := context.WithCancel(context.TODO()) + c.cancelKeepAlive = cancelKeepAlive + + // Start a keepalive goroutine to help maintain the connection for + // long-running commands. + log.Printf("[DEBUG] starting ssh KeepAlives") + + // We want a local copy of the ssh client pointer, so that a reconnect + // doesn't race with the running keep-alive loop. + sshClient := c.client + go func() { + defer cancelKeepAlive() + // Along with the KeepAlives generating packets to keep the tcp + // connection open, we will use the replies to verify liveness of the + // connection. This will prevent dead connections from blocking the + // provisioner indefinitely. + respCh := make(chan error, 1) + + go func() { + t := time.NewTicker(keepAliveInterval) + defer t.Stop() + for { + select { + case <-t.C: + _, _, err := sshClient.SendRequest("keepalive@terraform.io", true, nil) + respCh <- err + case <-ctx.Done(): + return + } + } + }() + + after := time.NewTimer(maxKeepAliveDelay) + defer after.Stop() + + for { + select { + case err := <-respCh: + if err != nil { + log.Printf("[ERROR] ssh keepalive: %s", err) + sshConn.Close() + return + } + case <-after.C: + // abort after too many missed keepalives + log.Println("[ERROR] no reply from ssh server") + sshConn.Close() + return + case <-ctx.Done(): + return + } + if !after.Stop() { + <-after.C + } + after.Reset(maxKeepAliveDelay) + } + }() + + return nil +} + +// Disconnect implementation of communicator.Communicator interface +func (c *Communicator) Disconnect() error { + c.lock.Lock() + defer c.lock.Unlock() + + if c.cancelKeepAlive != nil { + c.cancelKeepAlive() + } + + if c.config.sshAgent != nil { + if err := c.config.sshAgent.Close(); err != nil { + return err + } + } + + if c.conn != nil { + conn := c.conn + c.conn = nil + return conn.Close() + } + + return nil +} + +// Timeout implementation of communicator.Communicator interface +func (c *Communicator) Timeout() time.Duration { + return c.connInfo.TimeoutVal +} + +// ScriptPath implementation of communicator.Communicator interface +func (c *Communicator) ScriptPath() string { + randLock.Lock() + defer randLock.Unlock() + + return strings.Replace( + c.connInfo.ScriptPath, "%RAND%", + strconv.FormatInt(int64(randShared.Int31()), 10), -1) +} + +// Start implementation of communicator.Communicator interface +func (c *Communicator) Start(cmd *remote.Cmd) error { + cmd.Init() + + session, err := c.newSession() + if err != nil { + return err + } + + // Set up our session + session.Stdin = cmd.Stdin + session.Stdout = cmd.Stdout + session.Stderr = cmd.Stderr + + if !c.config.noPty && c.connInfo.TargetPlatform != TargetPlatformWindows { + // Request a PTY + termModes := ssh.TerminalModes{ + ssh.ECHO: 0, // do not echo + ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud + ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud + } + + if err := session.RequestPty("xterm", 80, 40, termModes); err != nil { + return err + } + } + + log.Printf("[DEBUG] starting remote command: %s", cmd.Command) + err = session.Start(strings.TrimSpace(cmd.Command) + "\n") + if err != nil { + return err + } + + // Start a goroutine to wait for the session to end and set the + // exit boolean and status. + go func() { + defer session.Close() + + err := session.Wait() + exitStatus := 0 + if err != nil { + exitErr, ok := err.(*ssh.ExitError) + if ok { + exitStatus = exitErr.ExitStatus() + } + } + + cmd.SetExitStatus(exitStatus, err) + log.Printf("[DEBUG] remote command exited with '%d': %s", exitStatus, cmd.Command) + }() + + return nil +} + +// Upload implementation of communicator.Communicator interface +func (c *Communicator) Upload(path string, input io.Reader) error { + // The target directory and file for talking the SCP protocol + targetDir := filepath.Dir(path) + targetFile := filepath.Base(path) + + // On windows, filepath.Dir uses backslash separators (ie. "\tmp"). + // This does not work when the target host is unix. Switch to forward slash + // which works for unix and windows + targetDir = filepath.ToSlash(targetDir) + + // Skip copying if we can get the file size directly from common io.Readers + size := int64(0) + + switch src := input.(type) { + case *os.File: + fi, err := src.Stat() + if err == nil { + size = fi.Size() + } + case *bytes.Buffer: + size = int64(src.Len()) + case *bytes.Reader: + size = int64(src.Len()) + case *strings.Reader: + size = int64(src.Len()) + } + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + return scpUploadFile(targetFile, input, w, stdoutR, size) + } + + cmd, err := quoteShell([]string{"scp", "-vt", targetDir}, c.connInfo.TargetPlatform) + if err != nil { + return err + } + return c.scpSession(cmd, scpFunc) +} + +// UploadScript implementation of communicator.Communicator interface +func (c *Communicator) UploadScript(path string, input io.Reader) error { + reader := bufio.NewReader(input) + prefix, err := reader.Peek(2) + if err != nil { + return fmt.Errorf("Error reading script: %s", err) + } + var script bytes.Buffer + + if string(prefix) != "#!" && c.connInfo.TargetPlatform != TargetPlatformWindows { + script.WriteString(DefaultShebang) + } + script.ReadFrom(reader) + + if err := c.Upload(path, &script); err != nil { + return err + } + if c.connInfo.TargetPlatform != TargetPlatformWindows { + var stdout, stderr bytes.Buffer + cmd := &remote.Cmd{ + Command: fmt.Sprintf("chmod 0777 %s", path), + Stdout: &stdout, + Stderr: &stderr, + } + if err := c.Start(cmd); err != nil { + return fmt.Errorf( + "Error chmodding script file to 0777 in remote "+ + "machine: %s", err) + } + + if err := cmd.Wait(); err != nil { + return fmt.Errorf( + "Error chmodding script file to 0777 in remote "+ + "machine %v: %s %s", err, stdout.String(), stderr.String()) + } + } + return nil +} + +// UploadDir implementation of communicator.Communicator interface +func (c *Communicator) UploadDir(dst string, src string) error { + log.Printf("[DEBUG] Uploading dir '%s' to '%s'", src, dst) + scpFunc := func(w io.Writer, r *bufio.Reader) error { + uploadEntries := func() error { + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + entries, err := f.Readdir(-1) + if err != nil { + return err + } + + return scpUploadDir(src, entries, w, r) + } + + if src[len(src)-1] != '/' { + log.Printf("[DEBUG] No trailing slash, creating the source directory name") + return scpUploadDirProtocol(filepath.Base(src), w, r, uploadEntries) + } + // Trailing slash, so only upload the contents + return uploadEntries() + } + + cmd, err := quoteShell([]string{"scp", "-rvt", dst}, c.connInfo.TargetPlatform) + if err != nil { + return err + } + return c.scpSession(cmd, scpFunc) +} + +func (c *Communicator) newSession() (session *ssh.Session, err error) { + log.Println("[DEBUG] opening new ssh session") + if c.client == nil { + err = errors.New("ssh client is not connected") + } else { + session, err = c.client.NewSession() + } + + if err != nil { + log.Printf("[WARN] ssh session open error: '%s', attempting reconnect", err) + if err := c.Connect(nil); err != nil { + return nil, err + } + + return c.client.NewSession() + } + + return session, nil +} + +func (c *Communicator) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error { + session, err := c.newSession() + if err != nil { + return err + } + defer session.Close() + + // Get a pipe to stdin so that we can send data down + stdinW, err := session.StdinPipe() + if err != nil { + return err + } + + // We only want to close once, so we nil w after we close it, + // and only close in the defer if it hasn't been closed already. + defer func() { + if stdinW != nil { + stdinW.Close() + } + }() + + // Get a pipe to stdout so that we can get responses back + stdoutPipe, err := session.StdoutPipe() + if err != nil { + return err + } + stdoutR := bufio.NewReader(stdoutPipe) + + // Set stderr to a bytes buffer + stderr := new(bytes.Buffer) + session.Stderr = stderr + + // Start the sink mode on the other side + // TODO(mitchellh): There are probably issues with shell escaping the path + log.Println("[DEBUG] Starting remote scp process: ", scpCommand) + if err := session.Start(scpCommand); err != nil { + return err + } + + // Call our callback that executes in the context of SCP. We ignore + // EOF errors if they occur because it usually means that SCP prematurely + // ended on the other side. + log.Println("[DEBUG] Started SCP session, beginning transfers...") + if err := f(stdinW, stdoutR); err != nil && err != io.EOF { + return err + } + + // Close the stdin, which sends an EOF, and then set w to nil so that + // our defer func doesn't close it again since that is unsafe with + // the Go SSH package. + log.Println("[DEBUG] SCP session complete, closing stdin pipe.") + stdinW.Close() + stdinW = nil + + // Wait for the SCP connection to close, meaning it has consumed all + // our data and has completed. Or has errored. + log.Println("[DEBUG] Waiting for SSH session to complete.") + err = session.Wait() + + // log any stderr before exiting on an error + scpErr := stderr.String() + if len(scpErr) > 0 { + log.Printf("[ERROR] scp stderr: %q", stderr) + } + + if err != nil { + if exitErr, ok := err.(*ssh.ExitError); ok { + // Otherwise, we have an ExitErorr, meaning we can just read + // the exit status + log.Printf("[ERROR] %s", exitErr) + + // If we exited with status 127, it means SCP isn't available. + // Return a more descriptive error for that. + if exitErr.ExitStatus() == 127 { + return errors.New( + "SCP failed to start. This usually means that SCP is not\n" + + "properly installed on the remote system.") + } + } + + return err + } + + return nil +} + +// checkSCPStatus checks that a prior command sent to SCP completed +// successfully. If it did not complete successfully, an error will +// be returned. +func checkSCPStatus(r *bufio.Reader) error { + code, err := r.ReadByte() + if err != nil { + return err + } + + if code != 0 { + // Treat any non-zero (really 1 and 2) as fatal errors + message, _, err := r.ReadLine() + if err != nil { + return fmt.Errorf("Error reading error message: %s", err) + } + + return errors.New(string(message)) + } + + return nil +} + +var testUploadSizeHook func(size int64) + +func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, size int64) error { + if testUploadSizeHook != nil { + testUploadSizeHook(size) + } + + if size == 0 { + // Create a temporary file where we can copy the contents of the src + // so that we can determine the length, since SCP is length-prefixed. + tf, err := ioutil.TempFile("", "terraform-upload") + if err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + defer os.Remove(tf.Name()) + defer tf.Close() + + log.Println("[DEBUG] Copying input data into temporary file so we can read the length") + if _, err := io.Copy(tf, src); err != nil { + return err + } + + // Sync the file so that the contents are definitely on disk, then + // read the length of it. + if err := tf.Sync(); err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + // Seek the file to the beginning so we can re-read all of it + if _, err := tf.Seek(0, 0); err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + fi, err := tf.Stat() + if err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + src = tf + size = fi.Size() + } + + // Start the protocol + log.Println("[DEBUG] Beginning file upload...") + fmt.Fprintln(w, "C0644", size, dst) + if err := checkSCPStatus(r); err != nil { + return err + } + + if _, err := io.Copy(w, src); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + if err := checkSCPStatus(r); err != nil { + return err + } + + return nil +} + +func scpUploadDirProtocol(name string, w io.Writer, r *bufio.Reader, f func() error) error { + log.Printf("[DEBUG] SCP: starting directory upload: %s", name) + fmt.Fprintln(w, "D0755 0", name) + err := checkSCPStatus(r) + if err != nil { + return err + } + + if err := f(); err != nil { + return err + } + + fmt.Fprintln(w, "E") + if err != nil { + return err + } + + return nil +} + +func scpUploadDir(root string, fs []os.FileInfo, w io.Writer, r *bufio.Reader) error { + for _, fi := range fs { + realPath := filepath.Join(root, fi.Name()) + + // Track if this is actually a symlink to a directory. If it is + // a symlink to a file we don't do any special behavior because uploading + // a file just works. If it is a directory, we need to know so we + // treat it as such. + isSymlinkToDir := false + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + symPath, err := filepath.EvalSymlinks(realPath) + if err != nil { + return err + } + + symFi, err := os.Lstat(symPath) + if err != nil { + return err + } + + isSymlinkToDir = symFi.IsDir() + } + + if !fi.IsDir() && !isSymlinkToDir { + // It is a regular file (or symlink to a file), just upload it + f, err := os.Open(realPath) + if err != nil { + return err + } + + err = func() error { + defer f.Close() + return scpUploadFile(fi.Name(), f, w, r, fi.Size()) + }() + + if err != nil { + return err + } + + continue + } + + // It is a directory, recursively upload + err := scpUploadDirProtocol(fi.Name(), w, r, func() error { + f, err := os.Open(realPath) + if err != nil { + return err + } + defer f.Close() + + entries, err := f.Readdir(-1) + if err != nil { + return err + } + + return scpUploadDir(realPath, entries, w, r) + }) + if err != nil { + return err + } + } + + return nil +} + +// ConnectFunc is a convenience method for returning a function +// that just uses net.Dial to communicate with the remote end that +// is suitable for use with the SSH communicator configuration. +func ConnectFunc(network, addr string, p *proxyInfo) func() (net.Conn, error) { + return func() (net.Conn, error) { + var c net.Conn + var err error + + // Wrap connection to host if proxy server is configured + if p != nil { + RegisterDialerType() + c, err = newHttpProxyConn(p, addr) + } else { + c, err = net.DialTimeout(network, addr, 15*time.Second) + } + + if err != nil { + return nil, err + } + + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + } + + return c, nil + } +} + +// BastionConnectFunc is a convenience method for returning a function +// that connects to a host over a bastion connection. +func BastionConnectFunc( + bProto string, + bAddr string, + bConf *ssh.ClientConfig, + proto string, + addr string, + p *proxyInfo) func() (net.Conn, error) { + return func() (net.Conn, error) { + log.Printf("[DEBUG] Connecting to bastion: %s", bAddr) + var bastion *ssh.Client + var err error + + // Wrap connection to bastion server if proxy server is configured + if p != nil { + var pConn net.Conn + var bConn ssh.Conn + var bChans <-chan ssh.NewChannel + var bReq <-chan *ssh.Request + + RegisterDialerType() + pConn, err = newHttpProxyConn(p, bAddr) + + if err != nil { + return nil, fmt.Errorf("Error connecting to proxy: %s", err) + } + + bConn, bChans, bReq, err = ssh.NewClientConn(pConn, bAddr, bConf) + + if err != nil { + return nil, fmt.Errorf("Error creating new client connection via proxy: %s", err) + } + + bastion = ssh.NewClient(bConn, bChans, bReq) + } else { + bastion, err = ssh.Dial(bProto, bAddr, bConf) + } + + if err != nil { + return nil, fmt.Errorf("Error connecting to bastion: %s", err) + } + + log.Printf("[DEBUG] Connecting via bastion (%s) to host: %s", bAddr, addr) + conn, err := bastion.Dial(proto, addr) + if err != nil { + bastion.Close() + return nil, err + } + + // Wrap it up so we close both things properly + return &bastionConn{ + Conn: conn, + Bastion: bastion, + }, nil + } +} + +type bastionConn struct { + net.Conn + Bastion *ssh.Client +} + +func (c *bastionConn) Close() error { + c.Conn.Close() + return c.Bastion.Close() +} + +func quoteShell(args []string, targetPlatform string) (string, error) { + if targetPlatform == TargetPlatformUnix { + return shquot.POSIXShell(args), nil + } + if targetPlatform == TargetPlatformWindows { + return shquot.WindowsArgv(args), nil + } + + return "", fmt.Errorf("Cannot quote shell command, target platform unknown: %s", targetPlatform) + +} diff --git a/communicator/ssh/communicator_test.go b/communicator/ssh/communicator_test.go new file mode 100644 index 000000000000..ac2527966ed2 --- /dev/null +++ b/communicator/ssh/communicator_test.go @@ -0,0 +1,759 @@ +//go:build !race +// +build !race + +package ssh + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform/communicator/remote" + "github.com/zclconf/go-cty/cty" + "golang.org/x/crypto/ssh" +) + +// private key for mock server +const testServerPrivateKey = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA19lGVsTqIT5iiNYRgnoY1CwkbETW5cq+Rzk5v/kTlf31XpSU +70HVWkbTERECjaYdXM2gGcbb+sxpq6GtXf1M3kVomycqhxwhPv4Cr6Xp4WT/jkFx +9z+FFzpeodGJWjOH6L2H5uX1Cvr9EDdQp9t9/J32/qBFntY8GwoUI/y/1MSTmMiF +tupdMODN064vd3gyMKTwrlQ8tZM6aYuyOPsutLlUY7M5x5FwMDYvnPDSeyT/Iw0z +s3B+NCyqeeMd2T7YzQFnRATj0M7rM5LoSs7DVqVriOEABssFyLj31PboaoLhOKgc +qoM9khkNzr7FHVvi+DhYM2jD0DwvqZLN6NmnLwIDAQABAoIBAQCGVj+kuSFOV1lT ++IclQYA6bM6uY5mroqcSBNegVxCNhWU03BxlW//BE9tA/+kq53vWylMeN9mpGZea +riEMIh25KFGWXqXlOOioH8bkMsqA8S7sBmc7jljyv+0toQ9vCCtJ+sueNPhxQQxH +D2YvUjfzBQ04I9+wn30BByDJ1QA/FoPsunxIOUCcRBE/7jxuLYcpR+JvEF68yYIh +atXRld4W4in7T65YDR8jK1Uj9XAcNeDYNpT/M6oFLx1aPIlkG86aCWRO19S1jLPT +b1ZAKHHxPMCVkSYW0RqvIgLXQOR62D0Zne6/2wtzJkk5UCjkSQ2z7ZzJpMkWgDgN +ifCULFPBAoGBAPoMZ5q1w+zB+knXUD33n1J+niN6TZHJulpf2w5zsW+m2K6Zn62M +MXndXlVAHtk6p02q9kxHdgov34Uo8VpuNjbS1+abGFTI8NZgFo+bsDxJdItemwC4 +KJ7L1iz39hRN/ZylMRLz5uTYRGddCkeIHhiG2h7zohH/MaYzUacXEEy3AoGBANz8 +e/msleB+iXC0cXKwds26N4hyMdAFE5qAqJXvV3S2W8JZnmU+sS7vPAWMYPlERPk1 +D8Q2eXqdPIkAWBhrx4RxD7rNc5qFNcQWEhCIxC9fccluH1y5g2M+4jpMX2CT8Uv+ +3z+NoJ5uDTXZTnLCfoZzgZ4nCZVZ+6iU5U1+YXFJAoGBANLPpIV920n/nJmmquMj +orI1R/QXR9Cy56cMC65agezlGOfTYxk5Cfl5Ve+/2IJCfgzwJyjWUsFx7RviEeGw +64o7JoUom1HX+5xxdHPsyZ96OoTJ5RqtKKoApnhRMamau0fWydH1yeOEJd+TRHhc +XStGfhz8QNa1dVFvENczja1vAoGABGWhsd4VPVpHMc7lUvrf4kgKQtTC2PjA4xoc +QJ96hf/642sVE76jl+N6tkGMzGjnVm4P2j+bOy1VvwQavKGoXqJBRd5Apppv727g +/SM7hBXKFc/zH80xKBBgP/i1DR7kdjakCoeu4ngeGywvu2jTS6mQsqzkK+yWbUxJ +I7mYBsECgYB/KNXlTEpXtz/kwWCHFSYA8U74l7zZbVD8ul0e56JDK+lLcJ0tJffk +gqnBycHj6AhEycjda75cs+0zybZvN4x65KZHOGW/O/7OAWEcZP5TPb3zf9ned3Hl +NsZoFj52ponUM6+99A2CmezFCN16c4mbA//luWF+k3VVqR6BpkrhKw== +-----END RSA PRIVATE KEY-----` + +// this cert was signed by the key from testCAPublicKey +const testServerHostCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgvQ3Bs1ex7277b9q6I0fNaWsVEC16f+LcT8RLPSVMEVMAAAADAQABAAABAQDX2UZWxOohPmKI1hGCehjULCRsRNblyr5HOTm/+ROV/fVelJTvQdVaRtMREQKNph1czaAZxtv6zGmroa1d/UzeRWibJyqHHCE+/gKvpenhZP+OQXH3P4UXOl6h0YlaM4fovYfm5fUK+v0QN1Cn2338nfb+oEWe1jwbChQj/L/UxJOYyIW26l0w4M3Tri93eDIwpPCuVDy1kzppi7I4+y60uVRjsznHkXAwNi+c8NJ7JP8jDTOzcH40LKp54x3ZPtjNAWdEBOPQzuszkuhKzsNWpWuI4QAGywXIuPfU9uhqguE4qByqgz2SGQ3OvsUdW+L4OFgzaMPQPC+pks3o2acvAAAAAAAAAAAAAAACAAAAB2NhLXRlc3QAAAANAAAACTEyNy4wLjAuMQAAAABag0jkAAAAAHDcHtAAAAAAAAAAAAAAAAAAAAEXAAAAB3NzaC1yc2EAAAADAQABAAABAQCrozyZIhdEvalCn+eSzHH94cO9ykiywA13ntWI7mJcHBwYTeCYWG8E9zGXyp2iDOjCGudM0Tdt8o0OofKChk9Z/qiUN0G8y1kmaXBlBM3qA5R9NPpvMYMNkYLfX6ivtZCnqrsbzaoqN2Oc/7H2StHzJWh/XCGu9otQZA6vdv1oSmAsZOjw/xIGaGQqDUaLq21J280PP1qSbdJHf76iSHE+TWe3YpqV946JWM5tCh0DykZ10VznvxYpUjzhr07IN3tVKxOXbPnnU7lX6IaLIWgfzLqwSyheeux05c3JLF9iF4sFu8ou4hwQz1iuUTU1jxgwZP0w/bkXgFFs0949lW81AAABDwAAAAdzc2gtcnNhAAABAEyoiVkZ5z79nh3WSU5mU2U7e2BItnnEqsJIm9EN+35uG0yORSXmQoaa9mtli7G3r79tyqEJd/C95EdNvU/9TjaoDcbH8OHP+Ue9XSfUzBuQ6bGSXe6mlZlO7QJ1cIyWphFP3MkrweDSiJ+SpeXzLzZkiJ7zKv5czhBEyG/MujFgvikotL+eUNG42y2cgsesXSjENSBS3l11q55a+RM2QKt3W32im8CsSxrH6Mz6p4JXQNgsVvZRknLxNlWXULFB2HLTunPKzJNMTf6xZf66oivSBAXVIdNKhlVpAQ3dT/dW5K6J4aQF/hjWByyLprFwZ16cPDqvtalnTCpbRYelNbw=` + +const testCAPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrozyZIhdEvalCn+eSzHH94cO9ykiywA13ntWI7mJcHBwYTeCYWG8E9zGXyp2iDOjCGudM0Tdt8o0OofKChk9Z/qiUN0G8y1kmaXBlBM3qA5R9NPpvMYMNkYLfX6ivtZCnqrsbzaoqN2Oc/7H2StHzJWh/XCGu9otQZA6vdv1oSmAsZOjw/xIGaGQqDUaLq21J280PP1qSbdJHf76iSHE+TWe3YpqV946JWM5tCh0DykZ10VznvxYpUjzhr07IN3tVKxOXbPnnU7lX6IaLIWgfzLqwSyheeux05c3JLF9iF4sFu8ou4hwQz1iuUTU1jxgwZP0w/bkXgFFs0949lW81` + +func newMockLineServer(t *testing.T, signer ssh.Signer, pubKey string) string { + serverConfig := &ssh.ServerConfig{ + PasswordCallback: acceptUserPass("user", "pass"), + PublicKeyCallback: acceptPublicKey(pubKey), + } + + var err error + if signer == nil { + signer, err = ssh.ParsePrivateKey([]byte(testServerPrivateKey)) + if err != nil { + t.Fatalf("unable to parse private key: %s", err) + } + } + serverConfig.AddHostKey(signer) + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Unable to listen for connection: %s", err) + } + + go func() { + defer l.Close() + c, err := l.Accept() + if err != nil { + t.Errorf("Unable to accept incoming connection: %s", err) + } + defer c.Close() + conn, chans, _, err := ssh.NewServerConn(c, serverConfig) + if err != nil { + t.Logf("Handshaking error: %v", err) + } + t.Log("Accepted SSH connection") + + for newChannel := range chans { + channel, requests, err := newChannel.Accept() + if err != nil { + t.Errorf("Unable to accept channel.") + } + t.Log("Accepted channel") + + go func(in <-chan *ssh.Request) { + defer channel.Close() + for req := range in { + // since this channel's requests are serviced serially, + // this will block keepalive probes, and can simulate a + // hung connection. + if bytes.Contains(req.Payload, []byte("sleep")) { + time.Sleep(time.Second) + } + + if req.WantReply { + req.Reply(true, nil) + } + } + }(requests) + } + conn.Close() + }() + + return l.Addr().String() +} + +func TestNew_Invalid(t *testing.T) { + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("i-am-invalid"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Connect(nil) + if err == nil { + t.Fatal("should have had an error connecting") + } +} + +func TestNew_InvalidHost(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("i-am-invalid"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) + + _, err := New(v) + if err == nil { + t.Fatal("should have had an error creating communicator") + } +} + +func TestStart(t *testing.T) { + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } +} + +// TestKeepAlives verifies that the keepalive messages don't interfere with +// normal operation of the client. +func TestKeepAlives(t *testing.T) { + ivl := keepAliveInterval + keepAliveInterval = 250 * time.Millisecond + defer func() { keepAliveInterval = ivl }() + + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + if err := c.Connect(nil); err != nil { + t.Fatal(err) + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "sleep" + cmd.Stdout = stdout + + // wait a bit before executing the command, so that at least 1 keepalive is sent + time.Sleep(500 * time.Millisecond) + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } +} + +// TestDeadConnection verifies that failed keepalive messages will eventually +// kill the connection. +func TestFailedKeepAlives(t *testing.T) { + ivl := keepAliveInterval + del := maxKeepAliveDelay + maxKeepAliveDelay = 500 * time.Millisecond + keepAliveInterval = 250 * time.Millisecond + defer func() { + keepAliveInterval = ivl + maxKeepAliveDelay = del + }() + + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + if err := c.Connect(nil); err != nil { + t.Fatal(err) + } + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "sleep" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err == nil { + t.Fatal("expected connection error") + } +} + +func TestLostConnection(t *testing.T) { + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } + + // The test server can't execute anything, so Wait will block, unless + // there's an error. Disconnect the communicator transport, to cause the + // command to fail. + go func() { + time.Sleep(100 * time.Millisecond) + c.Disconnect() + }() + + err = cmd.Wait() + if err == nil { + t.Fatal("expected communicator error") + } +} + +func TestHostKey(t *testing.T) { + // get the server's public key + signer, err := ssh.ParsePrivateKey([]byte(testServerPrivateKey)) + if err != nil { + t.Fatalf("unable to parse private key: %v", err) + } + pubKey := fmt.Sprintf("ssh-rsa %s", base64.StdEncoding.EncodeToString(signer.PublicKey().Marshal())) + + address := newMockLineServer(t, nil, testClientPublicKey) + host, p, _ := net.SplitHostPort(address) + port, _ := strconv.Atoi(p) + + connInfo := &connectionInfo{ + User: "user", + Password: "pass", + Host: host, + HostKey: pubKey, + Port: uint16(port), + Timeout: "30s", + } + + cfg, err := prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c := &Communicator{ + connInfo: connInfo, + config: cfg, + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + if err := c.Start(&cmd); err != nil { + t.Fatal(err) + } + if err := c.Disconnect(); err != nil { + t.Fatal(err) + } + + // now check with the wrong HostKey + address = newMockLineServer(t, nil, testClientPublicKey) + _, p, _ = net.SplitHostPort(address) + port, _ = strconv.Atoi(p) + + connInfo.HostKey = testClientPublicKey + connInfo.Port = uint16(port) + + cfg, err = prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c = &Communicator{ + connInfo: connInfo, + config: cfg, + } + + err = c.Start(&cmd) + if err == nil || !strings.Contains(err.Error(), "mismatch") { + t.Fatalf("expected host key mismatch, got error:%v", err) + } +} + +func TestHostCert(t *testing.T) { + pk, _, _, _, err := ssh.ParseAuthorizedKey([]byte(testServerHostCert)) + if err != nil { + t.Fatal(err) + } + + signer, err := ssh.ParsePrivateKey([]byte(testServerPrivateKey)) + if err != nil { + t.Fatal(err) + } + + signer, err = ssh.NewCertSigner(pk.(*ssh.Certificate), signer) + if err != nil { + t.Fatal(err) + } + + address := newMockLineServer(t, signer, testClientPublicKey) + host, p, _ := net.SplitHostPort(address) + port, _ := strconv.Atoi(p) + + connInfo := &connectionInfo{ + User: "user", + Password: "pass", + Host: host, + HostKey: testCAPublicKey, + Port: uint16(port), + Timeout: "30s", + } + + cfg, err := prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c := &Communicator{ + connInfo: connInfo, + config: cfg, + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + if err := c.Start(&cmd); err != nil { + t.Fatal(err) + } + if err := c.Disconnect(); err != nil { + t.Fatal(err) + } + + // now check with the wrong HostKey + address = newMockLineServer(t, signer, testClientPublicKey) + _, p, _ = net.SplitHostPort(address) + port, _ = strconv.Atoi(p) + + connInfo.HostKey = testClientPublicKey + connInfo.Port = uint16(port) + + cfg, err = prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c = &Communicator{ + connInfo: connInfo, + config: cfg, + } + + err = c.Start(&cmd) + if err == nil || !strings.Contains(err.Error(), "authorities") { + t.Fatalf("expected host key mismatch, got error:%v", err) + } +} + +const SERVER_PEM = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA8CkDr7uxCFt6lQUVwS8NyPO+fQNxORoGnMnN/XhVJZvpqyKR +Uji9R0d8D66bYxUUsabXjP2y4HTVzbZtnvXFZZshk0cOtJjjekpYJaLK2esPR/iX +wvSltNkrDQDPN/RmgEEMIevW8AgrPsqrnybFHxTpd7rEUHXBOe4nMNRIg3XHykB6 +jZk8q5bBPUe3I/f0DK5TJEBpTc6dO3P/j93u55VUqr39/SPRHnld2mCw+c8v6UOh +sssO/DIZFPScD3DYqsk2N+/nz9zXfcOTdWGhawgxuIo1DTokrNQbG3pDrLqcWgqj +13vqJFCmRA0O2CQIwJePd6+Np/XO3Uh/KL6FlQIDAQABAoIBAQCmvQMXNmvCDqk7 +30zsVDvw4fHGH+azK3Od1aqTqcEMHISOUbCtckFPxLzIsoSltRQqB1kuRVG07skm +Stsu+xny4lLcSwBVuLRuykEK2EyYIc/5Owo6y9pkhkaSf5ZfFes4bnD6+B/BhRpp +PRMMq0E+xCkX/G6iIi9mhgdlqm0x/vKtjzQeeshw9+gRcRLUpX+UeKFKXMXcDayx +qekr1bAaQKNBhTK+CbZjcqzG4f+BXVGRTZ9nsPAV+yTnWUCU0TghwPmtthHbebqa +9hlkum7qik/bQj/tjJ8/b0vTfHQSVxhtPG/ZV2Tn9ZuL/vrkYqeyMU8XkJ/uaEvH +WPyOcB4BAoGBAP5o5JSEtPog+U3JFrLNSRjz5ofZNVkJzice+0XyqlzJDHhX5tF8 +mriYQZLLXYhckBm4IdkhTn/dVbXNQTzyy2WVuO5nU8bkCMvGL9CGpW4YGqwGf7NX +e4H3emtRjLv8VZpUHe/RUUDhmYvMSt1qmXuskfpROuGfLhQBUd6A4J+BAoGBAPGp +UcMKjrxZ5qjYU6DLgS+xeca4Eu70HgdbSQbRo45WubXjyXvTRFij36DrpxJWf1D7 +lIsyBifoTra/lAuC1NQXGYWjTCdk2ey8Ll5qOgiXvE6lINHABr+U/Z90/g6LuML2 +VzaZbq/QLcT3yVsdyTogKckzCaKsCpusyHE1CXAVAoGAd6kMglKc8N0bhZukgnsN ++5+UeacPcY6sGTh4RWErAjNKGzx1A2lROKvcg9gFaULoQECcIw2IZ5nKW5VsLueg +BWrTrcaJ4A2XmYjhKnp6SvspaGoyHD90hx/Iw7t6r1yzQsB3yDmytwqldtyjBdvC +zynPC2azhDWjraMlR7tka4ECgYAxwvLiHa9sm3qCtCDsUFtmrb3srITBjaUNUL/F +1q8+JR+Sk7gudj9xnTT0VvINNaB71YIt83wPBagHu4VJpYQbtDH+MbUBu6OgOtO1 +f1w53rzY2OncJxV8p7pd9mJGLoE6LC2jQY7oRw7Vq0xcJdME1BCmrIrEY3a/vaF8 +pjYuTQKBgQCIOH23Xita8KmhH0NdlWxZfcQt1j3AnOcKe6UyN4BsF8hqS7eTA52s +WjG5X2IBl7gs1eMM1qkqR8npS9nwfO/pBmZPwjiZoilypXxWj+c+P3vwre2yija4 +bXgFVj4KFBwhr1+8KcobxC0SAPEouMvSkxzjjw+gnebozUtPlud9jA== +-----END RSA PRIVATE KEY----- +` +const CLIENT_CERT_SIGNED_BY_SERVER = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgbMDNUn4M2TtzrSH7MOT2QsvLzZWjehJ5TYrBOp9p+lwAAAADAQABAAABAQCyu57E7zIWRyEWuaiOiikOSZKFjbwLkpE9fboFfLLsNUJj4zw+5bZUJtzWK8roPjgL8s1oPncro5wuTtI2Nu4fkpeFK0Hb33o6Eyksuj4Om4+6Uemn1QEcb0bZqK8Zyg9Dg9deP7LeE0v78b5/jZafFgwxv+/sMhM0PRD34NCDYcYmkkHlvQtQWFAdbPXCgghObedZyYdoqZVuhTsiPMWtQS/cc9M4tv6mPOuQlhZt3R/Oh/kwUyu45oGRb5bhO4JicozFS3oeClpU+UMbgslkzApJqxZBWN7+PDFSZhKk2GslyeyP4sH3E30Z00yVi/lQYgmQsB+Hg6ClemNQMNu/AAAAAAAAAAAAAAACAAAABHVzZXIAAAAIAAAABHVzZXIAAAAAWzBjXAAAAAB/POfPAAAAAAAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEA8CkDr7uxCFt6lQUVwS8NyPO+fQNxORoGnMnN/XhVJZvpqyKRUji9R0d8D66bYxUUsabXjP2y4HTVzbZtnvXFZZshk0cOtJjjekpYJaLK2esPR/iXwvSltNkrDQDPN/RmgEEMIevW8AgrPsqrnybFHxTpd7rEUHXBOe4nMNRIg3XHykB6jZk8q5bBPUe3I/f0DK5TJEBpTc6dO3P/j93u55VUqr39/SPRHnld2mCw+c8v6UOhsssO/DIZFPScD3DYqsk2N+/nz9zXfcOTdWGhawgxuIo1DTokrNQbG3pDrLqcWgqj13vqJFCmRA0O2CQIwJePd6+Np/XO3Uh/KL6FlQAAAQ8AAAAHc3NoLXJzYQAAAQC6sKEQHyl954BQn2BXuTgOB3NkENBxN7SD8ZaS8PNkDESytLjSIqrzoE6m7xuzprA+G23XRrCY/um3UvM7+7+zbwig2NIBbGbp3QFliQHegQKW6hTZP09jAQZk5jRrrEr/QT/s+gtHPmjxJK7XOQYxhInDKj+aJg62ExcwpQlP/0ATKNOIkdzTzzq916p0UOnnVaaPMKibh5Lv69GafIhKJRZSuuLN9fvs1G1RuUbxn/BNSeoRCr54L++Ztg09fJxunoyELs8mwgzCgB3pdZoUR2Z6ak05W4mvH3lkSz2BKUrlwxI6mterxhJy1GuN1K/zBG0gEMl2UTLajGK3qKM8 itbitloaner@MacBook-Pro-4.fios-router.home` +const CLIENT_PEM = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsruexO8yFkchFrmojoopDkmShY28C5KRPX26BXyy7DVCY+M8 +PuW2VCbc1ivK6D44C/LNaD53K6OcLk7SNjbuH5KXhStB2996OhMpLLo+DpuPulHp +p9UBHG9G2aivGcoPQ4PXXj+y3hNL+/G+f42WnxYMMb/v7DITND0Q9+DQg2HGJpJB +5b0LUFhQHWz1woIITm3nWcmHaKmVboU7IjzFrUEv3HPTOLb+pjzrkJYWbd0fzof5 +MFMruOaBkW+W4TuCYnKMxUt6HgpaVPlDG4LJZMwKSasWQVje/jwxUmYSpNhrJcns +j+LB9xN9GdNMlYv5UGIJkLAfh4OgpXpjUDDbvwIDAQABAoIBAEu2ctFVyk/pnbi0 +uRR4rl+hBvKQUeJNGj2ELvL4Ggs5nIAX2IOEZ7JKLC6FqpSrFq7pEd5g57aSvixX +s3DH4CN7w7fj1ShBCNPlHgIWewdRGpeA74vrDWdwNAEsFdDE6aZeCTOhpDGy1vNJ +OrtpzS5i9pN0jTvvEneEjtWSZIHiiVlN+0hsFaiwZ6KXON+sDccZPmnP6Fzwj5Rc +WS0dKSwnxnx0otWgwWFs8nr306nSeMsNmQkHsS9lz4DEVpp9owdzrX1JmbQvNYAV +ohmB3ET4JYFgerqPXJfed9poueGuWCP6MYhsjNeHN35QhofxdO5/0i3JlZfqwZei +tNq/0oECgYEA6SqjRqDiIp3ajwyB7Wf0cIQG/P6JZDyN1jl//htgniliIH5UP1Tm +uAMG5MincV6X9lOyXyh6Yofu5+NR0yt9SqbDZVJ3ZCxKTun7pxJvQFd7wl5bMkiJ +qVfS08k6gQHHDoO+eel+DtpIfWc+e3tvX0aihSU0GZEMqDXYkkphLGECgYEAxDxb ++JwJ3N5UEjjkuvFBpuJnmjIaN9HvQkTv3inlx1gLE4iWBZXXsu4aWF8MCUeAAZyP +42hQDSkCYX/A22tYCEn/jfrU6A+6rkWBTjdUlYLvlSkhosSnO+117WEItb5cUE95 +hF4UY7LNs1AsDkV4WE87f/EjpxSwUAjB2Lfd/B8CgYAJ/JiHsuZcozQ0Qk3iVDyF +ATKnbWOHFozgqw/PW27U92LLj32eRM2o/gAylmGNmoaZt1YBe2NaiwXxiqv7hnZU +VzYxRcn1UWxRWvY7Xq/DKrwTRCVVzwOObEOMbKcD1YaoGX50DEso6bKHJH/pnAzW +INlfKIvFuI+5OK0w/tyQoQKBgQCf/jpaOxaLfrV62eobRQJrByLDBGB97GsvU7di +IjTWz8DQH0d5rE7d8uWF8ZCFrEcAiV6DYZQK9smbJqbd/uoacAKtBro5rkFdPwwK +8m/DKqsdqRhkdgOHh7bjYH7Sdy8ax4Fi27WyB6FQtmgFBrz0+zyetsODwQlzZ4Bs +qpSRrwKBgQC0vWHrY5aGIdF+b8EpP0/SSLLALpMySHyWhDyxYcPqdhszYbjDcavv +xrrLXNUD2duBHKPVYE+7uVoDkpZXLUQ4x8argo/IwQM6Kh2ma1y83TYMT6XhL1+B +5UPcl6RXZBCkiU7nFIG6/0XKFqVWc3fU8e09X+iJwXIJ5Jatywtg+g== +-----END RSA PRIVATE KEY----- +` + +func TestCertificateBasedAuth(t *testing.T) { + signer, err := ssh.ParsePrivateKey([]byte(SERVER_PEM)) + if err != nil { + t.Fatalf("unable to parse private key: %v", err) + } + address := newMockLineServer(t, signer, CLIENT_CERT_SIGNED_BY_SERVER) + host, p, _ := net.SplitHostPort(address) + port, _ := strconv.Atoi(p) + + connInfo := &connectionInfo{ + User: "user", + Host: host, + PrivateKey: CLIENT_PEM, + Certificate: CLIENT_CERT_SIGNED_BY_SERVER, + Port: uint16(port), + Timeout: "30s", + } + + cfg, err := prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c := &Communicator{ + connInfo: connInfo, + config: cfg, + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + if err := c.Start(&cmd); err != nil { + t.Fatal(err) + } + if err := c.Disconnect(); err != nil { + t.Fatal(err) + } +} + +func TestAccUploadFile(t *testing.T) { + // use the local ssh server and scp binary to check uploads + if ok := os.Getenv("SSH_UPLOAD_TEST"); ok == "" { + t.Log("Skipping Upload Acceptance without SSH_UPLOAD_TEST set") + t.Skip() + } + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal(os.Getenv("USER")), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + tmpDir := t.TempDir() + source, err := os.CreateTemp(tmpDir, "tempfile.in") + if err != nil { + t.Fatal(err) + } + + content := "this is the file content" + if _, err := source.WriteString(content); err != nil { + t.Fatal(err) + } + source.Seek(0, io.SeekStart) + + tmpFile := filepath.Join(tmpDir, "tempFile.out") + + testUploadSizeHook = func(size int64) { + if size != int64(len(content)) { + t.Errorf("expected %d bytes, got %d\n", len(content), size) + } + } + defer func() { + testUploadSizeHook = nil + }() + + err = c.Upload(tmpFile, source) + if err != nil { + t.Fatalf("error uploading file: %s", err) + } + + data, err := ioutil.ReadFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if string(data) != content { + t.Fatalf("bad: %s", data) + } +} + +func TestAccHugeUploadFile(t *testing.T) { + // use the local ssh server and scp binary to check uploads + if ok := os.Getenv("SSH_UPLOAD_TEST"); ok == "" { + t.Log("Skipping Upload Acceptance without SSH_UPLOAD_TEST set") + t.Skip() + } + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + "user": cty.StringVal(os.Getenv("USER")), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + // copy 4GB of data, random to prevent compression. + size := int64(1 << 32) + source := io.LimitReader(rand.New(rand.NewSource(0)), size) + + dest, err := ioutil.TempFile("", "communicator") + if err != nil { + t.Fatal(err) + } + destName := dest.Name() + dest.Close() + defer os.Remove(destName) + + t.Log("Uploading to", destName) + + // bypass the Upload method so we can directly supply the file size + // preventing the extra copy of the huge file. + targetDir := filepath.Dir(destName) + targetFile := filepath.Base(destName) + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + return scpUploadFile(targetFile, source, w, stdoutR, size) + } + + cmd, err := quoteShell([]string{"scp", "-vt", targetDir}, c.connInfo.TargetPlatform) + if err != nil { + t.Fatal(err) + } + err = c.scpSession(cmd, scpFunc) + if err != nil { + t.Fatal(err) + } + + // check the final file size + fs, err := os.Stat(destName) + if err != nil { + t.Fatal(err) + } + + if fs.Size() != size { + t.Fatalf("expected file size of %d, got %d", size, fs.Size()) + } +} + +func TestScriptPath(t *testing.T) { + cases := []struct { + Input string + Pattern string + }{ + { + "/tmp/script.sh", + `^/tmp/script\.sh$`, + }, + { + "/tmp/script_%RAND%.sh", + `^/tmp/script_(\d+)\.sh$`, + }, + } + + for _, tc := range cases { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + "script_path": cty.StringVal(tc.Input), + }) + + comm, err := New(v) + if err != nil { + t.Fatalf("err: %s", err) + } + output := comm.ScriptPath() + + match, err := regexp.Match(tc.Pattern, []byte(output)) + if err != nil { + t.Fatalf("bad: %s\n\nerr: %s", tc.Input, err) + } + if !match { + t.Fatalf("bad: %s\n\n%s", tc.Input, output) + } + } +} + +func TestScriptPath_randSeed(t *testing.T) { + // Pre GH-4186 fix, this value was the deterministic start the pseudorandom + // chain of unseeded math/rand values for Int31(). + staticSeedPath := "/tmp/terraform_1298498081.sh" + c, err := New(cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + })) + if err != nil { + t.Fatalf("err: %s", err) + } + path := c.ScriptPath() + if path == staticSeedPath { + t.Fatalf("rand not seeded! got: %s", path) + } +} + +var testClientPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDE6A1c4n+OtEPEFlNKTZf2i03L3NylSYmvmJ8OLmzLuPZmJBJt4G3VZ/60s1aKzwLKrTq20S+ONG4zvnK5zIPoauoNNdUJKbg944hB4OE+HDbrBhk7SH+YWCsCILBoSXwAVdUEic6FWf/SeqBSmTBySHvpuNOw16J+SK6Ardx8k64F2tRkZuC6AmOZijgKa/sQKjWAIVPk34ECM6OLfPc3kKUEfkdpYLvuMfuRMfSTlxn5lFC0b0SovK9aWfNMBH9iXLQkieQ5rXoyzUC7mwgnASgl8cqw1UrToiUuhvneduXBhbQfmC/Upv+tL6dSSk+0DlgVKEHuJmc8s8+/qpdL` + +func acceptUserPass(goodUser, goodPass string) func(ssh.ConnMetadata, []byte) (*ssh.Permissions, error) { + return func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + if c.User() == goodUser && string(pass) == goodPass { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + } +} + +func acceptPublicKey(keystr string) func(ssh.ConnMetadata, ssh.PublicKey) (*ssh.Permissions, error) { + return func(_ ssh.ConnMetadata, inkey ssh.PublicKey) (*ssh.Permissions, error) { + goodkey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(keystr)) + if err != nil { + return nil, fmt.Errorf("error parsing key: %v", err) + } + + if bytes.Equal(inkey.Marshal(), goodkey.Marshal()) { + return nil, nil + } + + return nil, fmt.Errorf("public key rejected") + } +} diff --git a/internal/communicator/ssh/http_proxy.go b/communicator/ssh/http_proxy.go similarity index 100% rename from internal/communicator/ssh/http_proxy.go rename to communicator/ssh/http_proxy.go diff --git a/internal/communicator/ssh/password.go b/communicator/ssh/password.go similarity index 100% rename from internal/communicator/ssh/password.go rename to communicator/ssh/password.go diff --git a/internal/communicator/ssh/password_test.go b/communicator/ssh/password_test.go similarity index 100% rename from internal/communicator/ssh/password_test.go rename to communicator/ssh/password_test.go diff --git a/communicator/ssh/provisioner.go b/communicator/ssh/provisioner.go new file mode 100644 index 000000000000..a7ec6862ed61 --- /dev/null +++ b/communicator/ssh/provisioner.go @@ -0,0 +1,593 @@ +package ssh + +import ( + "bytes" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/terraform/communicator/shared" + sshagent "github.com/xanzy/ssh-agent" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + "golang.org/x/crypto/ssh/knownhosts" +) + +const ( + // DefaultUser is used if there is no user given + DefaultUser = "root" + + // DefaultPort is used if there is no port given + DefaultPort = 22 + + // DefaultUnixScriptPath is used as the path to copy the file to + // for remote execution on unix if not provided otherwise. + DefaultUnixScriptPath = "/tmp/terraform_%RAND%.sh" + // DefaultWindowsScriptPath is used as the path to copy the file to + // for remote execution on windows if not provided otherwise. + DefaultWindowsScriptPath = "C:/windows/temp/terraform_%RAND%.cmd" + + // DefaultTimeout is used if there is no timeout given + DefaultTimeout = 5 * time.Minute + + // TargetPlatformUnix used for cleaner code, and is used if no target platform has been specified + TargetPlatformUnix = "unix" + //TargetPlatformWindows used for cleaner code + TargetPlatformWindows = "windows" +) + +// connectionInfo is decoded from the ConnInfo of the resource. These are the +// only keys we look at. If a PrivateKey is given, that is used instead +// of a password. +type connectionInfo struct { + User string + Password string + PrivateKey string + Certificate string + Host string + HostKey string + Port uint16 + Agent bool + ScriptPath string + TargetPlatform string + Timeout string + TimeoutVal time.Duration + + ProxyScheme string + ProxyHost string + ProxyPort uint16 + ProxyUserName string + ProxyUserPassword string + + BastionUser string + BastionPassword string + BastionPrivateKey string + BastionCertificate string + BastionHost string + BastionHostKey string + BastionPort uint16 + + AgentIdentity string +} + +// decodeConnInfo decodes the given cty.Value using the same behavior as the +// lgeacy mapstructure decoder in order to preserve as much of the existing +// logic as possible for compatibility. +func decodeConnInfo(v cty.Value) (*connectionInfo, error) { + connInfo := &connectionInfo{} + if v.IsNull() { + return connInfo, nil + } + + for k, v := range v.AsValueMap() { + if v.IsNull() { + continue + } + + switch k { + case "user": + connInfo.User = v.AsString() + case "password": + connInfo.Password = v.AsString() + case "private_key": + connInfo.PrivateKey = v.AsString() + case "certificate": + connInfo.Certificate = v.AsString() + case "host": + connInfo.Host = v.AsString() + case "host_key": + connInfo.HostKey = v.AsString() + case "port": + if err := gocty.FromCtyValue(v, &connInfo.Port); err != nil { + return nil, err + } + case "agent": + connInfo.Agent = v.True() + case "script_path": + connInfo.ScriptPath = v.AsString() + case "target_platform": + connInfo.TargetPlatform = v.AsString() + case "timeout": + connInfo.Timeout = v.AsString() + case "proxy_scheme": + connInfo.ProxyScheme = v.AsString() + case "proxy_host": + connInfo.ProxyHost = v.AsString() + case "proxy_port": + if err := gocty.FromCtyValue(v, &connInfo.ProxyPort); err != nil { + return nil, err + } + case "proxy_user_name": + connInfo.ProxyUserName = v.AsString() + case "proxy_user_password": + connInfo.ProxyUserPassword = v.AsString() + case "bastion_user": + connInfo.BastionUser = v.AsString() + case "bastion_password": + connInfo.BastionPassword = v.AsString() + case "bastion_private_key": + connInfo.BastionPrivateKey = v.AsString() + case "bastion_certificate": + connInfo.BastionCertificate = v.AsString() + case "bastion_host": + connInfo.BastionHost = v.AsString() + case "bastion_host_key": + connInfo.BastionHostKey = v.AsString() + case "bastion_port": + if err := gocty.FromCtyValue(v, &connInfo.BastionPort); err != nil { + return nil, err + } + case "agent_identity": + connInfo.AgentIdentity = v.AsString() + } + } + return connInfo, nil +} + +// parseConnectionInfo is used to convert the raw configuration into the +// *connectionInfo struct. +func parseConnectionInfo(v cty.Value) (*connectionInfo, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) + if err != nil { + return nil, err + } + + connInfo, err := decodeConnInfo(v) + if err != nil { + return nil, err + } + + // To default Agent to true, we need to check the raw string, since the + // decoded boolean can't represent "absence of config". + // + // And if SSH_AUTH_SOCK is not set, there's no agent to connect to, so we + // shouldn't try. + agent := v.GetAttr("agent") + if agent.IsNull() && os.Getenv("SSH_AUTH_SOCK") != "" { + connInfo.Agent = true + } + + if connInfo.User == "" { + connInfo.User = DefaultUser + } + + // Check if host is empty. + // Otherwise return error. + if connInfo.Host == "" { + return nil, fmt.Errorf("host for provisioner cannot be empty") + } + + // Format the host if needed. + // Needed for IPv6 support. + connInfo.Host = shared.IpFormat(connInfo.Host) + + if connInfo.Port == 0 { + connInfo.Port = DefaultPort + } + // Set default targetPlatform to unix if it's empty + if connInfo.TargetPlatform == "" { + connInfo.TargetPlatform = TargetPlatformUnix + } else if connInfo.TargetPlatform != TargetPlatformUnix && connInfo.TargetPlatform != TargetPlatformWindows { + return nil, fmt.Errorf("target_platform for provisioner has to be either %s or %s", TargetPlatformUnix, TargetPlatformWindows) + } + // Choose an appropriate default script path based on the target platform. There is no single + // suitable default script path which works on both UNIX and Windows targets. + if connInfo.ScriptPath == "" && connInfo.TargetPlatform == TargetPlatformUnix { + connInfo.ScriptPath = DefaultUnixScriptPath + } + if connInfo.ScriptPath == "" && connInfo.TargetPlatform == TargetPlatformWindows { + connInfo.ScriptPath = DefaultWindowsScriptPath + } + if connInfo.Timeout != "" { + connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) + } else { + connInfo.TimeoutVal = DefaultTimeout + } + + // Default all bastion config attrs to their non-bastion counterparts + if connInfo.BastionHost != "" { + // Format the bastion host if needed. + // Needed for IPv6 support. + connInfo.BastionHost = shared.IpFormat(connInfo.BastionHost) + + if connInfo.BastionUser == "" { + connInfo.BastionUser = connInfo.User + } + if connInfo.BastionPassword == "" { + connInfo.BastionPassword = connInfo.Password + } + if connInfo.BastionPrivateKey == "" { + connInfo.BastionPrivateKey = connInfo.PrivateKey + } + if connInfo.BastionCertificate == "" { + connInfo.BastionCertificate = connInfo.Certificate + } + if connInfo.BastionPort == 0 { + connInfo.BastionPort = connInfo.Port + } + } + + return connInfo, nil +} + +// safeDuration returns either the parsed duration or a default value +func safeDuration(dur string, defaultDur time.Duration) time.Duration { + d, err := time.ParseDuration(dur) + if err != nil { + log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur) + return defaultDur + } + return d +} + +// prepareSSHConfig is used to turn the *ConnectionInfo provided into a +// usable *SSHConfig for client initialization. +func prepareSSHConfig(connInfo *connectionInfo) (*sshConfig, error) { + sshAgent, err := connectToAgent(connInfo) + if err != nil { + return nil, err + } + + host := fmt.Sprintf("%s:%d", connInfo.Host, connInfo.Port) + + sshConf, err := buildSSHClientConfig(sshClientConfigOpts{ + user: connInfo.User, + host: host, + privateKey: connInfo.PrivateKey, + password: connInfo.Password, + hostKey: connInfo.HostKey, + certificate: connInfo.Certificate, + sshAgent: sshAgent, + }) + if err != nil { + return nil, err + } + + var p *proxyInfo + + if connInfo.ProxyHost != "" { + p = newProxyInfo( + fmt.Sprintf("%s:%d", connInfo.ProxyHost, connInfo.ProxyPort), + connInfo.ProxyScheme, + connInfo.ProxyUserName, + connInfo.ProxyUserPassword, + ) + } + + connectFunc := ConnectFunc("tcp", host, p) + + var bastionConf *ssh.ClientConfig + if connInfo.BastionHost != "" { + bastionHost := fmt.Sprintf("%s:%d", connInfo.BastionHost, connInfo.BastionPort) + + bastionConf, err = buildSSHClientConfig(sshClientConfigOpts{ + user: connInfo.BastionUser, + host: bastionHost, + privateKey: connInfo.BastionPrivateKey, + password: connInfo.BastionPassword, + hostKey: connInfo.HostKey, + certificate: connInfo.BastionCertificate, + sshAgent: sshAgent, + }) + if err != nil { + return nil, err + } + + connectFunc = BastionConnectFunc("tcp", bastionHost, bastionConf, "tcp", host, p) + } + + config := &sshConfig{ + config: sshConf, + connection: connectFunc, + sshAgent: sshAgent, + } + return config, nil +} + +type sshClientConfigOpts struct { + privateKey string + password string + sshAgent *sshAgent + certificate string + user string + host string + hostKey string +} + +func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) { + hkCallback := ssh.InsecureIgnoreHostKey() + + if opts.hostKey != "" { + // The knownhosts package only takes paths to files, but terraform + // generally wants to handle config data in-memory. Rather than making + // the known_hosts file an exception, write out the data to a temporary + // file to create the HostKeyCallback. + tf, err := ioutil.TempFile("", "tf-known_hosts") + if err != nil { + return nil, fmt.Errorf("failed to create temp known_hosts file: %s", err) + } + defer tf.Close() + defer os.RemoveAll(tf.Name()) + + // we mark this as a CA as well, but the host key fallback will still + // use it as a direct match if the remote host doesn't return a + // certificate. + if _, err := tf.WriteString(fmt.Sprintf("@cert-authority %s %s\n", opts.host, opts.hostKey)); err != nil { + return nil, fmt.Errorf("failed to write temp known_hosts file: %s", err) + } + tf.Sync() + + hkCallback, err = knownhosts.New(tf.Name()) + if err != nil { + return nil, err + } + } + + conf := &ssh.ClientConfig{ + HostKeyCallback: hkCallback, + User: opts.user, + } + + if opts.privateKey != "" { + if opts.certificate != "" { + log.Println("using client certificate for authentication") + + certSigner, err := signCertWithPrivateKey(opts.privateKey, opts.certificate) + if err != nil { + return nil, err + } + conf.Auth = append(conf.Auth, certSigner) + } else { + log.Println("using private key for authentication") + + pubKeyAuth, err := readPrivateKey(opts.privateKey) + if err != nil { + return nil, err + } + conf.Auth = append(conf.Auth, pubKeyAuth) + } + } + + if opts.password != "" { + conf.Auth = append(conf.Auth, ssh.Password(opts.password)) + conf.Auth = append(conf.Auth, ssh.KeyboardInteractive( + PasswordKeyboardInteractive(opts.password))) + } + + if opts.sshAgent != nil { + conf.Auth = append(conf.Auth, opts.sshAgent.Auth()) + } + + return conf, nil +} + +// Create a Cert Signer and return ssh.AuthMethod +func signCertWithPrivateKey(pk string, certificate string) (ssh.AuthMethod, error) { + rawPk, err := ssh.ParseRawPrivateKey([]byte(pk)) + if err != nil { + return nil, fmt.Errorf("failed to parse private key %q: %s", pk, err) + } + + pcert, _, _, _, err := ssh.ParseAuthorizedKey([]byte(certificate)) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate %q: %s", certificate, err) + } + + usigner, err := ssh.NewSignerFromKey(rawPk) + if err != nil { + return nil, fmt.Errorf("failed to create signer from raw private key %q: %s", rawPk, err) + } + + ucertSigner, err := ssh.NewCertSigner(pcert.(*ssh.Certificate), usigner) + if err != nil { + return nil, fmt.Errorf("failed to create cert signer %q: %s", usigner, err) + } + + return ssh.PublicKeys(ucertSigner), nil +} + +func readPrivateKey(pk string) (ssh.AuthMethod, error) { + // We parse the private key on our own first so that we can + // show a nicer error if the private key has a password. + block, _ := pem.Decode([]byte(pk)) + if block == nil { + return nil, errors.New("Failed to read ssh private key: no key found") + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return nil, errors.New( + "Failed to read ssh private key: password protected keys are\n" + + "not supported. Please decrypt the key prior to use.") + } + + signer, err := ssh.ParsePrivateKey([]byte(pk)) + if err != nil { + return nil, fmt.Errorf("Failed to parse ssh private key: %s", err) + } + + return ssh.PublicKeys(signer), nil +} + +func connectToAgent(connInfo *connectionInfo) (*sshAgent, error) { + if !connInfo.Agent { + // No agent configured + return nil, nil + } + + agent, conn, err := sshagent.New() + if err != nil { + return nil, err + } + + // connection close is handled over in Communicator + return &sshAgent{ + agent: agent, + conn: conn, + id: connInfo.AgentIdentity, + }, nil + +} + +// A tiny wrapper around an agent.Agent to expose the ability to close its +// associated connection on request. +type sshAgent struct { + agent agent.Agent + conn net.Conn + id string +} + +func (a *sshAgent) Close() error { + if a.conn == nil { + return nil + } + + return a.conn.Close() +} + +// make an attempt to either read the identity file or find a corresponding +// public key file using the typical openssh naming convention. +// This returns the public key in wire format, or nil when a key is not found. +func findIDPublicKey(id string) []byte { + for _, d := range idKeyData(id) { + signer, err := ssh.ParsePrivateKey(d) + if err == nil { + log.Println("[DEBUG] parsed id private key") + pk := signer.PublicKey() + return pk.Marshal() + } + + // try it as a publicKey + pk, err := ssh.ParsePublicKey(d) + if err == nil { + log.Println("[DEBUG] parsed id public key") + return pk.Marshal() + } + + // finally try it as an authorized key + pk, _, _, _, err = ssh.ParseAuthorizedKey(d) + if err == nil { + log.Println("[DEBUG] parsed id authorized key") + return pk.Marshal() + } + } + + return nil +} + +// Try to read an id file using the id as the file path. Also read the .pub +// file if it exists, as the id file may be encrypted. Return only the file +// data read. We don't need to know what data came from which path, as we will +// try parsing each as a private key, a public key and an authorized key +// regardless. +func idKeyData(id string) [][]byte { + idPath, err := filepath.Abs(id) + if err != nil { + return nil + } + + var fileData [][]byte + + paths := []string{idPath} + + if !strings.HasSuffix(idPath, ".pub") { + paths = append(paths, idPath+".pub") + } + + for _, p := range paths { + d, err := ioutil.ReadFile(p) + if err != nil { + log.Printf("[DEBUG] error reading %q: %s", p, err) + continue + } + log.Printf("[DEBUG] found identity data at %q", p) + fileData = append(fileData, d) + } + + return fileData +} + +// sortSigners moves a signer with an agent comment field matching the +// agent_identity to the head of the list when attempting authentication. This +// helps when there are more keys loaded in an agent than the host will allow +// attempts. +func (s *sshAgent) sortSigners(signers []ssh.Signer) { + if s.id == "" || len(signers) < 2 { + return + } + + // if we can locate the public key, either by extracting it from the id or + // locating the .pub file, then we can more easily determine an exact match + idPk := findIDPublicKey(s.id) + + // if we have a signer with a connect field that matches the id, send that + // first, otherwise put close matches at the front of the list. + head := 0 + for i := range signers { + pk := signers[i].PublicKey() + k, ok := pk.(*agent.Key) + if !ok { + continue + } + + // check for an exact match first + if bytes.Equal(pk.Marshal(), idPk) || s.id == k.Comment { + signers[0], signers[i] = signers[i], signers[0] + break + } + + // no exact match yet, move it to the front if it's close. The agent + // may have loaded as a full filepath, while the config refers to it by + // filename only. + if strings.HasSuffix(k.Comment, s.id) { + signers[head], signers[i] = signers[i], signers[head] + head++ + continue + } + } +} + +func (s *sshAgent) Signers() ([]ssh.Signer, error) { + signers, err := s.agent.Signers() + if err != nil { + return nil, err + } + + s.sortSigners(signers) + return signers, nil +} + +func (a *sshAgent) Auth() ssh.AuthMethod { + return ssh.PublicKeysCallback(a.Signers) +} + +func (a *sshAgent) ForwardToAgent(client *ssh.Client) error { + return agent.ForwardToAgent(client, a.agent) +} diff --git a/internal/communicator/ssh/provisioner_test.go b/communicator/ssh/provisioner_test.go similarity index 100% rename from internal/communicator/ssh/provisioner_test.go rename to communicator/ssh/provisioner_test.go diff --git a/internal/communicator/ssh/ssh_test.go b/communicator/ssh/ssh_test.go similarity index 100% rename from internal/communicator/ssh/ssh_test.go rename to communicator/ssh/ssh_test.go diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go new file mode 100644 index 000000000000..d877e3065f2d --- /dev/null +++ b/communicator/winrm/communicator.go @@ -0,0 +1,202 @@ +package winrm + +import ( + "fmt" + "io" + "log" + "math/rand" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/provisioners" + "github.com/masterzen/winrm" + "github.com/packer-community/winrmcp/winrmcp" + "github.com/zclconf/go-cty/cty" +) + +// Communicator represents the WinRM communicator +type Communicator struct { + connInfo *connectionInfo + client *winrm.Client + endpoint *winrm.Endpoint + rand *rand.Rand +} + +// New creates a new communicator implementation over WinRM. +func New(v cty.Value) (*Communicator, error) { + connInfo, err := parseConnectionInfo(v) + if err != nil { + return nil, err + } + + endpoint := &winrm.Endpoint{ + Host: connInfo.Host, + Port: int(connInfo.Port), + HTTPS: connInfo.HTTPS, + Insecure: connInfo.Insecure, + Timeout: connInfo.TimeoutVal, + } + if len(connInfo.CACert) > 0 { + endpoint.CACert = []byte(connInfo.CACert) + } + + comm := &Communicator{ + connInfo: connInfo, + endpoint: endpoint, + // Seed our own rand source so that script paths are not deterministic + rand: rand.New(rand.NewSource(time.Now().UnixNano())), + } + + return comm, nil +} + +// Connect implementation of communicator.Communicator interface +func (c *Communicator) Connect(o provisioners.UIOutput) error { + // Set the client to nil since we'll (re)create it + c.client = nil + + params := winrm.DefaultParameters + params.Timeout = formatDuration(c.Timeout()) + if c.connInfo.NTLM { + params.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } + } + + client, err := winrm.NewClientWithParameters( + c.endpoint, c.connInfo.User, c.connInfo.Password, params) + if err != nil { + return err + } + + if o != nil { + o.Output(fmt.Sprintf( + "Connecting to remote host via WinRM...\n"+ + " Host: %s\n"+ + " Port: %d\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " HTTPS: %t\n"+ + " Insecure: %t\n"+ + " NTLM: %t\n"+ + " CACert: %t", + c.connInfo.Host, + c.connInfo.Port, + c.connInfo.User, + c.connInfo.Password != "", + c.connInfo.HTTPS, + c.connInfo.Insecure, + c.connInfo.NTLM, + c.connInfo.CACert != "", + )) + } + + log.Printf("[DEBUG] connecting to remote shell using WinRM") + shell, err := client.CreateShell() + if err != nil { + log.Printf("[ERROR] error creating shell: %s", err) + return err + } + + err = shell.Close() + if err != nil { + log.Printf("[ERROR] error closing shell: %s", err) + return err + } + + if o != nil { + o.Output("Connected!") + } + + c.client = client + + return nil +} + +// Disconnect implementation of communicator.Communicator interface +func (c *Communicator) Disconnect() error { + c.client = nil + return nil +} + +// Timeout implementation of communicator.Communicator interface +func (c *Communicator) Timeout() time.Duration { + return c.connInfo.TimeoutVal +} + +// ScriptPath implementation of communicator.Communicator interface +func (c *Communicator) ScriptPath() string { + return strings.Replace( + c.connInfo.ScriptPath, "%RAND%", + strconv.FormatInt(int64(c.rand.Int31()), 10), -1) +} + +// Start implementation of communicator.Communicator interface +func (c *Communicator) Start(rc *remote.Cmd) error { + rc.Init() + log.Printf("[DEBUG] starting remote command: %s", rc.Command) + + // TODO: make sure communicators always connect first, so we can get output + // from the connection. + if c.client == nil { + log.Println("[WARN] winrm client not connected, attempting to connect") + if err := c.Connect(nil); err != nil { + return err + } + } + + status, err := c.client.Run(rc.Command, rc.Stdout, rc.Stderr) + rc.SetExitStatus(status, err) + + return nil +} + +// Upload implementation of communicator.Communicator interface +func (c *Communicator) Upload(path string, input io.Reader) error { + wcp, err := c.newCopyClient() + if err != nil { + return err + } + log.Printf("[DEBUG] Uploading file to '%s'", path) + return wcp.Write(path, input) +} + +// UploadScript implementation of communicator.Communicator interface +func (c *Communicator) UploadScript(path string, input io.Reader) error { + return c.Upload(path, input) +} + +// UploadDir implementation of communicator.Communicator interface +func (c *Communicator) UploadDir(dst string, src string) error { + log.Printf("[DEBUG] Uploading dir '%s' to '%s'", src, dst) + wcp, err := c.newCopyClient() + if err != nil { + return err + } + return wcp.Copy(src, dst) +} + +func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { + addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) + + config := winrmcp.Config{ + Auth: winrmcp.Auth{ + User: c.connInfo.User, + Password: c.connInfo.Password, + }, + Https: c.connInfo.HTTPS, + Insecure: c.connInfo.Insecure, + OperationTimeout: c.Timeout(), + MaxOperationsPerShell: 15, // lowest common denominator + } + + if c.connInfo.NTLM { + config.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } + } + + if c.connInfo.CACert != "" { + config.CACertBytes = []byte(c.connInfo.CACert) + } + + return winrmcp.New(addr, &config) +} diff --git a/communicator/winrm/communicator_test.go b/communicator/winrm/communicator_test.go new file mode 100644 index 000000000000..bd8d2ecd5421 --- /dev/null +++ b/communicator/winrm/communicator_test.go @@ -0,0 +1,218 @@ +package winrm + +import ( + "bytes" + "io" + "regexp" + "strconv" + "testing" + + "github.com/dylanmei/winrmtest" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/communicator/shared" + "github.com/zclconf/go-cty/cty" +) + +func newMockWinRMServer(t *testing.T) *winrmtest.Remote { + wrm := winrmtest.NewRemote() + + wrm.CommandFunc( + winrmtest.MatchText("echo foo"), + func(out, err io.Writer) int { + out.Write([]byte("foo")) + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchPattern(`^echo c29tZXRoaW5n >> ".*"$`), + func(out, err io.Writer) int { + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchPattern(`^powershell.exe -EncodedCommand .*$`), + func(out, err io.Writer) int { + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchText("powershell"), + func(out, err io.Writer) int { + return 0 + }) + + return wrm +} + +func TestStart(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } + cmd.Wait() + + if stdout.String() != "foo" { + t.Fatalf("bad command response: expected %q, got %q", "foo", stdout.String()) + } +} + +func TestUpload(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Connect(nil) + if err != nil { + t.Fatalf("error connecting communicator: %s", err) + } + defer c.Disconnect() + + err = c.Upload("C:/Temp/terraform.cmd", bytes.NewReader([]byte("something"))) + if err != nil { + t.Fatalf("error uploading file: %s", err) + } +} + +func TestScriptPath(t *testing.T) { + cases := []struct { + Input string + Pattern string + }{ + { + "/tmp/script.sh", + `^/tmp/script\.sh$`, + }, + { + "/tmp/script_%RAND%.sh", + `^/tmp/script_(\d+)\.sh$`, + }, + } + + for _, tc := range cases { + v := cty.ObjectVal(map[string]cty.Value{ + "host": cty.StringVal(""), + "type": cty.StringVal("winrm"), + "script_path": cty.StringVal(tc.Input), + }) + + comm, err := New(v) + if err != nil { + t.Fatalf("err: %s", err) + } + output := comm.ScriptPath() + + match, err := regexp.Match(tc.Pattern, []byte(output)) + if err != nil { + t.Fatalf("bad: %s\n\nerr: %s", tc.Input, err) + } + if !match { + t.Fatalf("bad: %s\n\n%s", tc.Input, output) + } + } +} + +func TestNoTransportDecorator(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Connect(nil) + if err != nil { + t.Fatalf("error connecting communicator: %s", err) + } + defer c.Disconnect() + + if c.client.TransportDecorator != nil { + t.Fatal("bad TransportDecorator: expected nil, got non-nil") + } +} + +func TestTransportDecorator(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "use_ntlm": cty.StringVal("true"), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Connect(nil) + if err != nil { + t.Fatalf("error connecting communicator: %s", err) + } + defer c.Disconnect() + + if c.client.TransportDecorator == nil { + t.Fatal("bad TransportDecorator: expected non-nil, got nil") + } +} + +func TestScriptPath_randSeed(t *testing.T) { + // Pre GH-4186 fix, this value was the deterministic start the pseudorandom + // chain of unseeded math/rand values for Int31(). + staticSeedPath := "C:/Temp/terraform_1298498081.cmd" + c, err := New(cty.NullVal(shared.ConnectionBlockSupersetSchema.ImpliedType())) + if err != nil { + t.Fatalf("err: %s", err) + } + path := c.ScriptPath() + if path == staticSeedPath { + t.Fatalf("rand not seeded! got: %s", path) + } +} diff --git a/communicator/winrm/provisioner.go b/communicator/winrm/provisioner.go new file mode 100644 index 000000000000..f77918ec8733 --- /dev/null +++ b/communicator/winrm/provisioner.go @@ -0,0 +1,169 @@ +package winrm + +import ( + "fmt" + "log" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/terraform/communicator/shared" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +const ( + // DefaultUser is used if there is no user given + DefaultUser = "Administrator" + + // DefaultPort is used if there is no port given + DefaultPort = 5985 + + // DefaultHTTPSPort is used if there is no port given and HTTPS is true + DefaultHTTPSPort = 5986 + + // DefaultScriptPath is used as the path to copy the file to + // for remote execution if not provided otherwise. + DefaultScriptPath = "C:/Temp/terraform_%RAND%.cmd" + + // DefaultTimeout is used if there is no timeout given + DefaultTimeout = 5 * time.Minute +) + +// connectionInfo is decoded from the ConnInfo of the resource. These are the +// only keys we look at. If a KeyFile is given, that is used instead +// of a password. +type connectionInfo struct { + User string + Password string + Host string + Port uint16 + HTTPS bool + Insecure bool + NTLM bool `mapstructure:"use_ntlm"` + CACert string `mapstructure:"cacert"` + Timeout string + ScriptPath string `mapstructure:"script_path"` + TimeoutVal time.Duration `mapstructure:"-"` +} + +// decodeConnInfo decodes the given cty.Value using the same behavior as the +// lgeacy mapstructure decoder in order to preserve as much of the existing +// logic as possible for compatibility. +func decodeConnInfo(v cty.Value) (*connectionInfo, error) { + connInfo := &connectionInfo{} + if v.IsNull() { + return connInfo, nil + } + + for k, v := range v.AsValueMap() { + if v.IsNull() { + continue + } + + switch k { + case "user": + connInfo.User = v.AsString() + case "password": + connInfo.Password = v.AsString() + case "host": + connInfo.Host = v.AsString() + case "port": + if err := gocty.FromCtyValue(v, &connInfo.Port); err != nil { + return nil, err + } + case "https": + connInfo.HTTPS = v.True() + case "insecure": + connInfo.Insecure = v.True() + case "use_ntlm": + connInfo.NTLM = v.True() + case "cacert": + connInfo.CACert = v.AsString() + case "script_path": + connInfo.ScriptPath = v.AsString() + case "timeout": + connInfo.Timeout = v.AsString() + } + } + return connInfo, nil +} + +// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into +// a ConnectionInfo struct +func parseConnectionInfo(v cty.Value) (*connectionInfo, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) + if err != nil { + return nil, err + } + + connInfo, err := decodeConnInfo(v) + if err != nil { + return nil, err + } + // Check on script paths which point to the default Windows TEMP folder because files + // which are put in there very early in the boot process could get cleaned/deleted + // before you had the change to execute them. + // + // TODO (SvH) Needs some more debugging to fully understand the exact sequence of events + // causing this... + if strings.HasPrefix(filepath.ToSlash(connInfo.ScriptPath), "C:/Windows/Temp") { + return nil, fmt.Errorf( + `Using the C:\Windows\Temp folder is not supported. Please use a different 'script_path'.`) + } + + if connInfo.User == "" { + connInfo.User = DefaultUser + } + + // Format the host if needed. + // Needed for IPv6 support. + connInfo.Host = shared.IpFormat(connInfo.Host) + + if connInfo.Port == 0 { + if connInfo.HTTPS { + connInfo.Port = DefaultHTTPSPort + } else { + connInfo.Port = DefaultPort + } + } + if connInfo.ScriptPath == "" { + connInfo.ScriptPath = DefaultScriptPath + } + if connInfo.Timeout != "" { + connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) + } else { + connInfo.TimeoutVal = DefaultTimeout + } + + return connInfo, nil +} + +// safeDuration returns either the parsed duration or a default value +func safeDuration(dur string, defaultDur time.Duration) time.Duration { + d, err := time.ParseDuration(dur) + if err != nil { + log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur) + return defaultDur + } + return d +} + +func formatDuration(duration time.Duration) string { + h := int(duration.Hours()) + m := int(duration.Minutes()) - h*60 + s := int(duration.Seconds()) - (h*3600 + m*60) + + res := "PT" + if h > 0 { + res = fmt.Sprintf("%s%dH", res, h) + } + if m > 0 { + res = fmt.Sprintf("%s%dM", res, m) + } + if s > 0 { + res = fmt.Sprintf("%s%dS", res, s) + } + + return res +} diff --git a/internal/communicator/winrm/provisioner_test.go b/communicator/winrm/provisioner_test.go similarity index 100% rename from internal/communicator/winrm/provisioner_test.go rename to communicator/winrm/provisioner_test.go diff --git a/configs/backend.go b/configs/backend.go new file mode 100644 index 000000000000..5d8b9732a8d4 --- /dev/null +++ b/configs/backend.go @@ -0,0 +1,55 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// Backend represents a "backend" block inside a "terraform" block in a module +// or file. +type Backend struct { + Type string + Config hcl.Body + + TypeRange hcl.Range + DeclRange hcl.Range +} + +func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { + return &Backend{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + Config: block.Body, + DeclRange: block.DefRange, + }, nil +} + +// Hash produces a hash value for the reciever that covers the type and the +// portions of the config that conform to the given schema. +// +// If the config does not conform to the schema then the result is not +// meaningful for comparison since it will be based on an incomplete result. +// +// As an exception, required attributes in the schema are treated as optional +// for the purpose of hashing, so that an incomplete configuration can still +// be hashed. Other errors, such as extraneous attributes, have no such special +// case. +func (b *Backend) Hash(schema *configschema.Block) int { + // Don't fail if required attributes are not set. Instead, we'll just + // hash them as nulls. + schema = schema.NoneRequired() + spec := schema.DecoderSpec() + val, _ := hcldec.Decode(b.Config, spec, nil) + if val == cty.NilVal { + val = cty.UnknownVal(schema.ImpliedType()) + } + + toHash := cty.TupleVal([]cty.Value{ + cty.StringVal(b.Type), + val, + }) + + return toHash.Hash() +} diff --git a/configs/checks.go b/configs/checks.go new file mode 100644 index 000000000000..822141058138 --- /dev/null +++ b/configs/checks.go @@ -0,0 +1,141 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang" +) + +// CheckRule represents a configuration-defined validation rule, precondition, +// or postcondition. Blocks of this sort can appear in a few different places +// in configuration, including "validation" blocks for variables, +// and "precondition" and "postcondition" blocks for resources. +type CheckRule struct { + // Condition is an expression that must evaluate to true if the condition + // holds or false if it does not. If the expression produces an error then + // that's considered to be a bug in the module defining the check. + // + // The available variables in a condition expression vary depending on what + // a check is attached to. For example, validation rules attached to + // input variables can only refer to the variable that is being validated. + Condition hcl.Expression + + // ErrorMessage should be one or more full sentences, which should be in + // English for consistency with the rest of the error message output but + // can in practice be in any language. The message should describe what is + // required for the condition to return true in a way that would make sense + // to a caller of the module. + // + // The error message expression has the same variables available for + // interpolation as the corresponding condition. + ErrorMessage hcl.Expression + + DeclRange hcl.Range +} + +// validateSelfReferences looks for references in the check rule matching the +// specified resource address, returning error diagnostics if such a reference +// is found. +func (cr *CheckRule) validateSelfReferences(checkType string, addr addrs.Resource) hcl.Diagnostics { + var diags hcl.Diagnostics + exprs := []hcl.Expression{ + cr.Condition, + cr.ErrorMessage, + } + for _, expr := range exprs { + if expr == nil { + continue + } + refs, _ := lang.References(expr.Variables()) + for _, ref := range refs { + var refAddr addrs.Resource + + switch rs := ref.Subject.(type) { + case addrs.Resource: + refAddr = rs + case addrs.ResourceInstance: + refAddr = rs.Resource + default: + continue + } + + if refAddr.Equal(addr) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid reference in %s", checkType), + Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addr.String()), + Subject: expr.Range().Ptr(), + }) + break + } + } + } + return diags +} + +// decodeCheckRuleBlock decodes the contents of the given block as a check rule. +// +// Unlike most of our "decode..." functions, this one can be applied to blocks +// of various types as long as their body structures are "check-shaped". The +// function takes the containing block only because some error messages will +// refer to its location, and the returned object's DeclRange will be the +// block's header. +func decodeCheckRuleBlock(block *hcl.Block, override bool) (*CheckRule, hcl.Diagnostics) { + var diags hcl.Diagnostics + cr := &CheckRule{ + DeclRange: block.DefRange, + } + + if override { + // For now we'll just forbid overriding check blocks, to simplify + // the initial design. If we can find a clear use-case for overriding + // checks in override files and there's a way to define it that + // isn't confusing then we could relax this. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Can't override %s blocks", block.Type), + Detail: fmt.Sprintf("Override files cannot override %q blocks.", block.Type), + Subject: cr.DeclRange.Ptr(), + }) + return cr, diags + } + + content, moreDiags := block.Body.Content(checkRuleBlockSchema) + diags = append(diags, moreDiags...) + + if attr, exists := content.Attributes["condition"]; exists { + cr.Condition = attr.Expr + + if len(cr.Condition.Variables()) == 0 { + // A condition expression that doesn't refer to any variable is + // pointless, because its result would always be a constant. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid %s expression", block.Type), + Detail: "The condition expression must refer to at least one object from elsewhere in the configuration, or else its result would not be checking anything.", + Subject: cr.Condition.Range().Ptr(), + }) + } + } + + if attr, exists := content.Attributes["error_message"]; exists { + cr.ErrorMessage = attr.Expr + } + + return cr, diags +} + +var checkRuleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "condition", + Required: true, + }, + { + Name: "error_message", + Required: true, + }, + }, +} diff --git a/internal/configs/cloud.go b/configs/cloud.go similarity index 100% rename from internal/configs/cloud.go rename to configs/cloud.go diff --git a/internal/configs/compat_shim.go b/configs/compat_shim.go similarity index 100% rename from internal/configs/compat_shim.go rename to configs/compat_shim.go diff --git a/configs/config.go b/configs/config.go new file mode 100644 index 000000000000..1b654e0d2b44 --- /dev/null +++ b/configs/config.go @@ -0,0 +1,557 @@ +package configs + +import ( + "fmt" + "log" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +// +// A module tree described in *this* package represents the static tree +// represented by configuration. During evaluation a static ModuleNode may +// expand into zero or more module instances depending on the use of count and +// for_each configuration attributes within each call. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *Module + + // CallRange is the source range for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallRange hcl.Range + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. SourceAddrRaw is the same + // information, but as the raw string the user originally entered. + // + // These fields are meaningless for the root module, where their contents are undefined. + SourceAddr addrs.ModuleSource + SourceAddrRaw string + + // SourceAddrRange is the location in the configuration source where the + // SourceAddr value was set, for use in diagnostic messages. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddrRange hcl.Range + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// ModuleRequirements represents the provider requirements for an individual +// module, along with references to any child modules. This is used to +// determine which modules require which providers. +type ModuleRequirements struct { + Name string + SourceAddr addrs.ModuleSource + SourceDir string + Requirements getproviders.Requirements + Children map[string]*ModuleRequirements +} + +// NewEmptyConfig constructs a single-node configuration tree with an empty +// root module. This is generally a pretty useless thing to do, so most callers +// should instead use BuildConfig. +func NewEmptyConfig() *Config { + ret := &Config{} + ret.Root = ret + ret.Children = make(map[string]*Config) + ret.Module = &Module{} + return ret +} + +// Depth returns the number of "hops" the receiver is from the root of its +// module tree, with the root module having a depth of zero. +func (c *Config) Depth() int { + ret := 0 + this := c + for this.Parent != nil { + ret++ + this = this.Parent + } + return ret +} + +// DeepEach calls the given function once for each module in the tree, starting +// with the receiver. +// +// A parent is always called before its children and children of a particular +// node are visited in lexicographic order by their names. +func (c *Config) DeepEach(cb func(c *Config)) { + cb(c) + + names := make([]string, 0, len(c.Children)) + for name := range c.Children { + names = append(names, name) + } + + for _, name := range names { + c.Children[name].DeepEach(cb) + } +} + +// AllModules returns a slice of all the receiver and all of its descendent +// nodes in the module tree, in the same order they would be visited by +// DeepEach. +func (c *Config) AllModules() []*Config { + var ret []*Config + c.DeepEach(func(c *Config) { + ret = append(ret, c) + }) + return ret +} + +// Descendent returns the descendent config that has the given path beneath +// the receiver, or nil if there is no such module. +// +// The path traverses the static module tree, prior to any expansion to handle +// count and for_each arguments. +// +// An empty path will just return the receiver, and is therefore pointless. +func (c *Config) Descendent(path addrs.Module) *Config { + current := c + for _, name := range path { + current = current.Children[name] + if current == nil { + return nil + } + } + return current +} + +// DescendentForInstance is like Descendent except that it accepts a path +// to a particular module instance in the dynamic module graph, returning +// the node from the static module graph that corresponds to it. +// +// All instances created by a particular module call share the same +// configuration, so the keys within the given path are disregarded. +func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { + current := c + for _, step := range path { + current = current.Children[step.Name] + if current == nil { + return nil + } + } + return current +} + +// EntersNewPackage returns true if this call is to an external module, either +// directly via a remote source address or indirectly via a registry source +// address. +// +// Other behaviors in Terraform may treat package crossings as a special +// situation, because that indicates that the caller and callee can change +// independently of one another and thus we should disallow using any features +// where the caller assumes anything about the callee other than its input +// variables, required provider configurations, and output values. +// +// It's not meaningful to ask if the Config representing the root module enters +// a new package because the root module is always outside of all module +// packages, and so this function will arbitrarily return false in that case. +func (c *Config) EntersNewPackage() bool { + return moduleSourceAddrEntersNewPackage(c.SourceAddr) +} + +// VerifyDependencySelections checks whether the given locked dependencies +// are acceptable for all of the version constraints reported in the +// configuration tree represented by the reciever. +// +// This function will errors only if any of the locked dependencies are out of +// range for corresponding constraints in the configuration. If there are +// multiple inconsistencies then it will attempt to describe as many of them +// as possible, rather than stopping at the first problem. +// +// It's typically the responsibility of "terraform init" to change the locked +// dependencies to conform with the configuration, and so +// VerifyDependencySelections is intended for other commands to check whether +// it did so correctly and to catch if anything has changed in configuration +// since the last "terraform init" which requires re-initialization. However, +// it's up to the caller to decide how to advise users recover from these +// errors, because the advise can vary depending on what operation the user +// is attempting. +func (c *Config) VerifyDependencySelections(depLocks *depsfile.Locks) []error { + var errs []error + + reqs, diags := c.ProviderRequirements() + if diags.HasErrors() { + // It should be very unusual to get here, but unfortunately we can + // end up here in some edge cases where the config loader doesn't + // process version constraint strings in exactly the same way as + // the requirements resolver. (See the addProviderRequirements method + // for more information.) + errs = append(errs, fmt.Errorf("failed to determine the configuration's provider requirements: %s", diags.Error())) + } + + for providerAddr, constraints := range reqs { + if !depsfile.ProviderIsLockable(providerAddr) { + continue // disregard builtin providers, and such + } + if depLocks != nil && depLocks.ProviderIsOverridden(providerAddr) { + // The "overridden" case is for unusual special situations like + // dev overrides, so we'll explicitly note it in the logs just in + // case we see bug reports with these active and it helps us + // understand why we ended up using the "wrong" plugin. + log.Printf("[DEBUG] Config.VerifyDependencySelections: skipping %s because it's overridden by a special configuration setting", providerAddr) + continue + } + + var lock *depsfile.ProviderLock + if depLocks != nil { // Should always be true in main code, but unfortunately sometimes not true in old tests that don't fill out arguments completely + lock = depLocks.Provider(providerAddr) + } + if lock == nil { + log.Printf("[TRACE] Config.VerifyDependencySelections: provider %s has no lock file entry to satisfy %q", providerAddr, getproviders.VersionConstraintsString(constraints)) + errs = append(errs, fmt.Errorf("provider %s: required by this configuration but no version is selected", providerAddr)) + continue + } + + selectedVersion := lock.Version() + allowedVersions := getproviders.MeetingConstraints(constraints) + log.Printf("[TRACE] Config.VerifyDependencySelections: provider %s has %s to satisfy %q", providerAddr, selectedVersion.String(), getproviders.VersionConstraintsString(constraints)) + if !allowedVersions.Has(selectedVersion) { + // The most likely cause of this is that the author of a module + // has changed its constraints, but this could also happen in + // some other unusual situations, such as the user directly + // editing the lock file to record something invalid. We'll + // distinguish those cases here in order to avoid the more + // specific error message potentially being a red herring in + // the edge-cases. + currentConstraints := getproviders.VersionConstraintsString(constraints) + lockedConstraints := getproviders.VersionConstraintsString(lock.VersionConstraints()) + switch { + case currentConstraints != lockedConstraints: + errs = append(errs, fmt.Errorf("provider %s: locked version selection %s doesn't match the updated version constraints %q", providerAddr, selectedVersion.String(), currentConstraints)) + default: + errs = append(errs, fmt.Errorf("provider %s: version constraints %q don't match the locked version selection %s", providerAddr, currentConstraints, selectedVersion.String())) + } + } + } + + // Return multiple errors in an arbitrary-but-deterministic order. + sort.Slice(errs, func(i, j int) bool { + return errs[i].Error() < errs[j].Error() + }) + + return errs +} + +// ProviderRequirements searches the full tree of modules under the receiver +// for both explicit and implicit dependencies on providers. +// +// The result is a full manifest of all of the providers that must be available +// in order to work with the receiving configuration. +// +// If the returned diagnostics includes errors then the resulting Requirements +// may be incomplete. +func (c *Config) ProviderRequirements() (getproviders.Requirements, hcl.Diagnostics) { + reqs := make(getproviders.Requirements) + diags := c.addProviderRequirements(reqs, true) + + return reqs, diags +} + +// ProviderRequirementsShallow searches only the direct receiver for explicit +// and implicit dependencies on providers. Descendant modules are ignored. +// +// If the returned diagnostics includes errors then the resulting Requirements +// may be incomplete. +func (c *Config) ProviderRequirementsShallow() (getproviders.Requirements, hcl.Diagnostics) { + reqs := make(getproviders.Requirements) + diags := c.addProviderRequirements(reqs, false) + + return reqs, diags +} + +// ProviderRequirementsByModule searches the full tree of modules under the +// receiver for both explicit and implicit dependencies on providers, +// constructing a tree where the requirements are broken out by module. +// +// If the returned diagnostics includes errors then the resulting Requirements +// may be incomplete. +func (c *Config) ProviderRequirementsByModule() (*ModuleRequirements, hcl.Diagnostics) { + reqs := make(getproviders.Requirements) + diags := c.addProviderRequirements(reqs, false) + + children := make(map[string]*ModuleRequirements) + for name, child := range c.Children { + childReqs, childDiags := child.ProviderRequirementsByModule() + childReqs.Name = name + children[name] = childReqs + diags = append(diags, childDiags...) + } + + ret := &ModuleRequirements{ + SourceAddr: c.SourceAddr, + SourceDir: c.Module.SourceDir, + Requirements: reqs, + Children: children, + } + + return ret, diags +} + +// addProviderRequirements is the main part of the ProviderRequirements +// implementation, gradually mutating a shared requirements object to +// eventually return. If the recurse argument is true, the requirements will +// include all descendant modules; otherwise, only the specified module. +func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse bool) hcl.Diagnostics { + var diags hcl.Diagnostics + + // First we'll deal with the requirements directly in _our_ module... + if c.Module.ProviderRequirements != nil { + for _, providerReqs := range c.Module.ProviderRequirements.RequiredProviders { + fqn := providerReqs.Type + if _, ok := reqs[fqn]; !ok { + // We'll at least have an unconstrained dependency then, but might + // add to this in the loop below. + reqs[fqn] = nil + } + // The model of version constraints in this package is still the + // old one using a different upstream module to represent versions, + // so we'll need to shim that out here for now. The two parsers + // don't exactly agree in practice 🙄 so this might produce new errors. + // TODO: Use the new parser throughout this package so we can get the + // better error messages it produces in more situations. + constraints, err := getproviders.ParseVersionConstraints(providerReqs.Requirement.Required.String()) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + // The errors returned by ParseVersionConstraint already include + // the section of input that was incorrect, so we don't need to + // include that here. + Detail: fmt.Sprintf("Incorrect version constraint syntax: %s.", err.Error()), + Subject: providerReqs.Requirement.DeclRange.Ptr(), + }) + } + reqs[fqn] = append(reqs[fqn], constraints...) + } + } + + // Each resource in the configuration creates an *implicit* provider + // dependency, though we'll only record it if there isn't already + // an explicit dependency on the same provider. + for _, rc := range c.Module.ManagedResources { + fqn := rc.Provider + if _, exists := reqs[fqn]; exists { + // Explicit dependency already present + continue + } + reqs[fqn] = nil + } + for _, rc := range c.Module.DataResources { + fqn := rc.Provider + if _, exists := reqs[fqn]; exists { + // Explicit dependency already present + continue + } + reqs[fqn] = nil + } + + // "provider" block can also contain version constraints + for _, provider := range c.Module.ProviderConfigs { + fqn := c.Module.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: provider.Name}) + if _, ok := reqs[fqn]; !ok { + // We'll at least have an unconstrained dependency then, but might + // add to this in the loop below. + reqs[fqn] = nil + } + if provider.Version.Required != nil { + // The model of version constraints in this package is still the + // old one using a different upstream module to represent versions, + // so we'll need to shim that out here for now. The two parsers + // don't exactly agree in practice 🙄 so this might produce new errors. + // TODO: Use the new parser throughout this package so we can get the + // better error messages it produces in more situations. + constraints, err := getproviders.ParseVersionConstraints(provider.Version.Required.String()) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + // The errors returned by ParseVersionConstraint already include + // the section of input that was incorrect, so we don't need to + // include that here. + Detail: fmt.Sprintf("Incorrect version constraint syntax: %s.", err.Error()), + Subject: provider.Version.DeclRange.Ptr(), + }) + } + reqs[fqn] = append(reqs[fqn], constraints...) + } + } + + if recurse { + for _, childConfig := range c.Children { + moreDiags := childConfig.addProviderRequirements(reqs, true) + diags = append(diags, moreDiags...) + } + } + + return diags +} + +// resolveProviderTypes walks through the providers in the module and ensures +// the true types are assigned based on the provider requirements for the +// module. +func (c *Config) resolveProviderTypes() { + for _, child := range c.Children { + child.resolveProviderTypes() + } + + // collect the required_providers, and then add any missing default providers + providers := map[string]addrs.Provider{} + for name, p := range c.Module.ProviderRequirements.RequiredProviders { + providers[name] = p.Type + } + + // ensure all provider configs know their correct type + for _, p := range c.Module.ProviderConfigs { + addr, required := providers[p.Name] + if required { + p.providerType = addr + } else { + addr := addrs.NewDefaultProvider(p.Name) + p.providerType = addr + providers[p.Name] = addr + } + } + + // connect module call providers to the correct type + for _, mod := range c.Module.ModuleCalls { + for _, p := range mod.Providers { + if addr, known := providers[p.InParent.Name]; known { + p.InParent.providerType = addr + } + } + } + + // fill in parent module calls too + if c.Parent != nil { + for _, mod := range c.Parent.Module.ModuleCalls { + for _, p := range mod.Providers { + if addr, known := providers[p.InChild.Name]; known { + p.InChild.providerType = addr + } + } + } + } +} + +// ProviderTypes returns the FQNs of each distinct provider type referenced +// in the receiving configuration. +// +// This is a helper for easily determining which provider types are required +// to fully interpret the configuration, though it does not include version +// information and so callers are expected to have already dealt with +// provider version selection in an earlier step and have identified suitable +// versions for each provider. +func (c *Config) ProviderTypes() []addrs.Provider { + // Ignore diagnostics here because they relate to version constraints + reqs, _ := c.ProviderRequirements() + + ret := make([]addrs.Provider, 0, len(reqs)) + for k := range reqs { + ret = append(ret, k) + } + sort.Slice(ret, func(i, j int) bool { + return ret[i].String() < ret[j].String() + }) + return ret +} + +// ResolveAbsProviderAddr returns the AbsProviderConfig represented by the given +// ProviderConfig address, which must not be nil or this method will panic. +// +// If the given address is already an AbsProviderConfig then this method returns +// it verbatim, and will always succeed. If it's a LocalProviderConfig then +// it will consult the local-to-FQN mapping table for the given module +// to find the absolute address corresponding to the given local one. +// +// The module address to resolve local addresses in must be given in the second +// argument, and must refer to a module that exists under the receiver or +// else this method will panic. +func (c *Config) ResolveAbsProviderAddr(addr addrs.ProviderConfig, inModule addrs.Module) addrs.AbsProviderConfig { + switch addr := addr.(type) { + + case addrs.AbsProviderConfig: + return addr + + case addrs.LocalProviderConfig: + // Find the descendent Config that contains the module that this + // local config belongs to. + mc := c.Descendent(inModule) + if mc == nil { + panic(fmt.Sprintf("ResolveAbsProviderAddr with non-existent module %s", inModule.String())) + } + + var provider addrs.Provider + if providerReq, exists := c.Module.ProviderRequirements.RequiredProviders[addr.LocalName]; exists { + provider = providerReq.Type + } else { + provider = addrs.ImpliedProviderForUnqualifiedType(addr.LocalName) + } + + return addrs.AbsProviderConfig{ + Module: inModule, + Provider: provider, + Alias: addr.Alias, + } + + default: + panic(fmt.Sprintf("cannot ResolveAbsProviderAddr(%v, ...)", addr)) + } + +} + +// ProviderForConfigAddr returns the FQN for a given addrs.ProviderConfig, first +// by checking for the provider in module.ProviderRequirements and falling +// back to addrs.NewDefaultProvider if it is not found. +func (c *Config) ProviderForConfigAddr(addr addrs.LocalProviderConfig) addrs.Provider { + if provider, exists := c.Module.ProviderRequirements.RequiredProviders[addr.LocalName]; exists { + return provider.Type + } + return c.ResolveAbsProviderAddr(addr, addrs.RootModule).Provider +} diff --git a/configs/config_build.go b/configs/config_build.go new file mode 100644 index 000000000000..d33ec99fdc5f --- /dev/null +++ b/configs/config_build.go @@ -0,0 +1,200 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/addrs" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +// +// The result is a module tree that has so far only had basic module- and +// file-level invariants validated. If the returned diagnostics contains errors, +// the returned module tree may be incomplete but can still be used carefully +// for static analysis. +func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + + // Skip provider resolution if there are any errors, since the provider + // configurations themselves may not be valid. + if !diags.HasErrors() { + // Now that the config is built, we can connect the provider names to all + // the known types for validation. + cfg.resolveProviderTypes() + } + + diags = append(diags, validateProviderConfigs(nil, cfg, nil)...) + + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := map[string]*Config{} + + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + VersionConstraint: call.Version, + Parent: parent, + CallRange: call.DeclRange, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallRange: call.DeclRange, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + diags = append(diags, modDiags...) + + if mod.Backend != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Backend configuration ignored", + Detail: "Any selected backend applies to the entire configuration, so Terraform expects provider configurations only in the root module.\n\nThis is a warning rather than an error because it's sometimes convenient to temporarily call a root module as a child module for testing purposes, but this backend configuration block will have no effect.", + Subject: mod.Backend.DeclRange.Ptr(), + }) + } + + ret[call.Name] = child + } + + return ret, diags +} + +// A ModuleWalker knows how to find and load a child module given details about +// the module to be loaded and a reference to its partially-loaded parent +// Config. +type ModuleWalker interface { + // LoadModule finds and loads a requested child module. + // + // If errors are detected during loading, implementations should return them + // in the diagnostics object. If the diagnostics object contains any errors + // then the caller will tolerate the returned module being nil or incomplete. + // If no errors are returned, it should be non-nil and complete. + // + // Full validation need not have been performed but an implementation should + // ensure that the basic file- and module-validations performed by the + // LoadConfigDir function (valid syntax, no namespace collisions, etc) have + // been performed before returning a module. + LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) + +// LoadModule implements ModuleWalker. +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return f(req) +} + +// ModuleRequest is used with the ModuleWalker interface to describe a child +// module that must be loaded. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr addrs.ModuleSource + + // SourceAddrRange is the source range for the SourceAddr value as it + // was provided in configuration. This can and should be used to generate + // diagnostics about the source address having invalid syntax, referring + // to a non-existent object, etc. + SourceAddrRange hcl.Range + + // VersionConstraint is the version constraint applied to the module in + // configuration. This data structure includes the source range for + // the constraint, which can and should be used to generate diagnostics + // about constraint-related issues, such as constraints that eliminate all + // available versions of a module whose source is otherwise valid. + VersionConstraint VersionConstraint + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source range for the header of the "module" block + // in configuration that prompted this request. This can be used as the + // subject of an error diagnostic that relates to the module call itself, + // rather than to either its source address or its version number. + CallRange hcl.Range +} + +// DisabledModuleWalker is a ModuleWalker that doesn't support +// child modules at all, and so will return an error if asked to load one. +// +// This is provided primarily for testing. There is no good reason to use this +// in the main application. +var DisabledModuleWalker ModuleWalker + +func init() { + DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Child modules are not supported", + Detail: "Child module calls are not allowed in this context.", + Subject: &req.CallRange, + }, + } + }) +} diff --git a/internal/configs/config_build_test.go b/configs/config_build_test.go similarity index 100% rename from internal/configs/config_build_test.go rename to configs/config_build_test.go diff --git a/configs/config_test.go b/configs/config_test.go new file mode 100644 index 000000000000..e5b4de5cf0e9 --- /dev/null +++ b/configs/config_test.go @@ -0,0 +1,421 @@ +package configs + +import ( + "testing" + + "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/zclconf/go-cty/cty" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2/hclsyntax" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" +) + +func TestConfigProviderTypes(t *testing.T) { + // nil cfg should return an empty map + got := NewEmptyConfig().ProviderTypes() + if len(got) != 0 { + t.Fatal("expected empty result from empty config") + } + + cfg, diags := testModuleConfigFromFile("testdata/valid-files/providers-explicit-implied.tf") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + got = cfg.ProviderTypes() + want := []addrs.Provider{ + addrs.NewDefaultProvider("aws"), + addrs.NewDefaultProvider("null"), + addrs.NewDefaultProvider("template"), + addrs.NewDefaultProvider("test"), + } + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} + +func TestConfigProviderTypes_nested(t *testing.T) { + // basic test with a nil config + c := NewEmptyConfig() + got := c.ProviderTypes() + if len(got) != 0 { + t.Fatalf("wrong result!\ngot: %#v\nwant: nil\n", got) + } + + // config with two provider sources, and one implicit (default) provider + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/valid-modules/nested-providers-fqns") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + got = cfg.ProviderTypes() + want := []addrs.Provider{ + addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test"), + addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test"), + addrs.NewDefaultProvider("test"), + } + + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} + +func TestConfigResolveAbsProviderAddr(t *testing.T) { + cfg, diags := testModuleConfigFromDir("testdata/providers-explicit-fqn") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + t.Run("already absolute", func(t *testing.T) { + addr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + Alias: "boop", + } + got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) + if got, want := got.String(), addr.String(); got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("local, implied mapping", func(t *testing.T) { + addr := addrs.LocalProviderConfig{ + LocalName: "implied", + Alias: "boop", + } + got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) + want := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("implied"), + Alias: "boop", + } + if got, want := got.String(), want.String(); got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("local, explicit mapping", func(t *testing.T) { + addr := addrs.LocalProviderConfig{ + LocalName: "foo-test", // this is explicitly set in the config + Alias: "boop", + } + got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) + want := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test"), + Alias: "boop", + } + if got, want := got.String(), want.String(); got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestConfigProviderRequirements(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + happycloudProvider := addrs.NewProvider( + svchost.Hostname("tf.example.com"), + "awesomecorp", "happycloud", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + grandchildProvider := addrs.NewDefaultProvider("grandchild") + + got, diags := cfg.ProviderRequirements() + assertNoDiagnostics(t, diags) + want := getproviders.Requirements{ + // the nullProvider constraints from the two modules are merged + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + impliedProvider: nil, + happycloudProvider: nil, + terraformProvider: nil, + grandchildProvider: nil, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestConfigProviderRequirementsDuplicate(t *testing.T) { + _, diags := testNestedModuleConfigFromDir(t, "testdata/duplicate-local-name") + assertDiagnosticCount(t, diags, 3) + assertDiagnosticSummary(t, diags, "Duplicate required provider") +} + +func TestConfigProviderRequirementsShallow(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + + got, diags := cfg.ProviderRequirementsShallow() + assertNoDiagnostics(t, diags) + want := getproviders.Requirements{ + // the nullProvider constraint is only from the root module + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + impliedProvider: nil, + terraformProvider: nil, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestConfigProviderRequirementsByModule(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + happycloudProvider := addrs.NewProvider( + svchost.Hostname("tf.example.com"), + "awesomecorp", "happycloud", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + grandchildProvider := addrs.NewDefaultProvider("grandchild") + + got, diags := cfg.ProviderRequirementsByModule() + assertNoDiagnostics(t, diags) + want := &ModuleRequirements{ + Name: "", + SourceAddr: nil, + SourceDir: "testdata/provider-reqs", + Requirements: getproviders.Requirements{ + // Only the root module's version is present here + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + impliedProvider: nil, + terraformProvider: nil, + }, + Children: map[string]*ModuleRequirements{ + "kinder": { + Name: "kinder", + SourceAddr: addrs.ModuleSourceLocal("./child"), + SourceDir: "testdata/provider-reqs/child", + Requirements: getproviders.Requirements{ + nullProvider: getproviders.MustParseVersionConstraints("= 2.0.1"), + happycloudProvider: nil, + }, + Children: map[string]*ModuleRequirements{ + "nested": { + Name: "nested", + SourceAddr: addrs.ModuleSourceLocal("./grandchild"), + SourceDir: "testdata/provider-reqs/child/grandchild", + Requirements: getproviders.Requirements{ + grandchildProvider: nil, + }, + Children: map[string]*ModuleRequirements{}, + }, + }, + }, + }, + } + + ignore := cmpopts.IgnoreUnexported(version.Constraint{}, cty.Value{}, hclsyntax.Body{}) + if diff := cmp.Diff(want, got, ignore); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestVerifyDependencySelections(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + happycloudProvider := addrs.NewProvider( + svchost.Hostname("tf.example.com"), + "awesomecorp", "happycloud", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + configuredProvider := addrs.NewDefaultProvider("configured") + grandchildProvider := addrs.NewDefaultProvider("grandchild") + + tests := map[string]struct { + PrepareLocks func(*depsfile.Locks) + WantErrs []string + }{ + "empty locks": { + func(*depsfile.Locks) { + // Intentionally blank + }, + []string{ + `provider registry.terraform.io/hashicorp/configured: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/grandchild: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/implied: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/null: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/random: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/tls: required by this configuration but no version is selected`, + `provider tf.example.com/awesomecorp/happycloud: required by this configuration but no version is selected`, + }, + }, + "suitable locks": { + func(locks *depsfile.Locks) { + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(nullProvider, getproviders.MustParseVersion("2.0.1"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + nil, + }, + "null provider constraints changed": { + func(locks *depsfile.Locks) { + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + []string{ + `provider registry.terraform.io/hashicorp/null: locked version selection 3.0.0 doesn't match the updated version constraints "~> 2.0.0, 2.0.1"`, + }, + }, + "null provider lock changed": { + func(locks *depsfile.Locks) { + // In this case, we set the lock file version constraints to + // match the configuration, and so our error message changes + // to not assume the configuration changed anymore. + locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), nil) + + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + []string{ + `provider registry.terraform.io/hashicorp/null: version constraints "~> 2.0.0, 2.0.1" don't match the locked version selection 3.0.0`, + }, + }, + "overridden provider": { + func(locks *depsfile.Locks) { + locks.SetProviderOverridden(happycloudProvider) + }, + []string{ + // We still catch all of the other ones, because only happycloud was overridden + `provider registry.terraform.io/hashicorp/configured: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/grandchild: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/implied: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/null: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/random: required by this configuration but no version is selected`, + `provider registry.terraform.io/hashicorp/tls: required by this configuration but no version is selected`, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + depLocks := depsfile.NewLocks() + test.PrepareLocks(depLocks) + gotErrs := cfg.VerifyDependencySelections(depLocks) + + var gotErrsStr []string + if gotErrs != nil { + gotErrsStr = make([]string, len(gotErrs)) + for i, err := range gotErrs { + gotErrsStr[i] = err.Error() + } + } + + if diff := cmp.Diff(test.WantErrs, gotErrsStr); diff != "" { + t.Errorf("wrong errors\n%s", diff) + } + }) + } +} + +func TestConfigProviderForConfigAddr(t *testing.T) { + cfg, diags := testModuleConfigFromDir("testdata/valid-modules/providers-fqns") + assertNoDiagnostics(t, diags) + + got := cfg.ProviderForConfigAddr(addrs.NewDefaultLocalProviderConfig("foo-test")) + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + if !got.Equals(want) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + + // now check a provider that isn't in the configuration. It should return a DefaultProvider. + got = cfg.ProviderForConfigAddr(addrs.NewDefaultLocalProviderConfig("bar-test")) + want = addrs.NewDefaultProvider("bar-test") + if !got.Equals(want) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } +} + +func TestConfigAddProviderRequirements(t *testing.T) { + cfg, diags := testModuleConfigFromFile("testdata/valid-files/providers-explicit-implied.tf") + assertNoDiagnostics(t, diags) + + reqs := getproviders.Requirements{ + addrs.NewDefaultProvider("null"): nil, + } + diags = cfg.addProviderRequirements(reqs, true) + assertNoDiagnostics(t, diags) +} diff --git a/internal/configs/configload/copy_dir.go b/configs/configload/copy_dir.go similarity index 100% rename from internal/configs/configload/copy_dir.go rename to configs/configload/copy_dir.go diff --git a/internal/configs/configload/copy_dir_test.go b/configs/configload/copy_dir_test.go similarity index 100% rename from internal/configs/configload/copy_dir_test.go rename to configs/configload/copy_dir_test.go diff --git a/internal/configs/configload/doc.go b/configs/configload/doc.go similarity index 100% rename from internal/configs/configload/doc.go rename to configs/configload/doc.go diff --git a/internal/configs/configload/inode.go b/configs/configload/inode.go similarity index 100% rename from internal/configs/configload/inode.go rename to configs/configload/inode.go diff --git a/internal/configs/configload/inode_freebsd.go b/configs/configload/inode_freebsd.go similarity index 100% rename from internal/configs/configload/inode_freebsd.go rename to configs/configload/inode_freebsd.go diff --git a/internal/configs/configload/inode_windows.go b/configs/configload/inode_windows.go similarity index 100% rename from internal/configs/configload/inode_windows.go rename to configs/configload/inode_windows.go diff --git a/internal/configs/configload/loader.go b/configs/configload/loader.go similarity index 98% rename from internal/configs/configload/loader.go rename to configs/configload/loader.go index 0f2481d3f611..72f89bbe8ae6 100644 --- a/internal/configs/configload/loader.go +++ b/configs/configload/loader.go @@ -5,8 +5,8 @@ import ( "path/filepath" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/registry" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/registry" "github.com/spf13/afero" ) diff --git a/internal/configs/configload/loader_load.go b/configs/configload/loader_load.go similarity index 98% rename from internal/configs/configload/loader_load.go rename to configs/configload/loader_load.go index 1ca26814a273..b14a3aed4423 100644 --- a/internal/configs/configload/loader_load.go +++ b/configs/configload/loader_load.go @@ -5,7 +5,7 @@ import ( version "github.com/hashicorp/go-version" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/configs" + "github.com/hashicorp/terraform/configs" ) // LoadConfig reads the Terraform module in the given directory and uses it as the diff --git a/internal/configs/configload/loader_load_test.go b/configs/configload/loader_load_test.go similarity index 99% rename from internal/configs/configload/loader_load_test.go rename to configs/configload/loader_load_test.go index d285cc9f1656..b90f7a8e902f 100644 --- a/internal/configs/configload/loader_load_test.go +++ b/configs/configload/loader_load_test.go @@ -10,7 +10,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs" + "github.com/hashicorp/terraform/configs" ) func TestLoaderLoadConfig_okay(t *testing.T) { diff --git a/internal/configs/configload/loader_snapshot.go b/configs/configload/loader_snapshot.go similarity index 99% rename from internal/configs/configload/loader_snapshot.go rename to configs/configload/loader_snapshot.go index 915665f833ba..51b428f9e563 100644 --- a/internal/configs/configload/loader_snapshot.go +++ b/configs/configload/loader_snapshot.go @@ -10,8 +10,8 @@ import ( version "github.com/hashicorp/go-version" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/modsdir" "github.com/spf13/afero" ) diff --git a/internal/configs/configload/loader_snapshot_test.go b/configs/configload/loader_snapshot_test.go similarity index 100% rename from internal/configs/configload/loader_snapshot_test.go rename to configs/configload/loader_snapshot_test.go diff --git a/internal/configs/configload/loader_test.go b/configs/configload/loader_test.go similarity index 100% rename from internal/configs/configload/loader_test.go rename to configs/configload/loader_test.go diff --git a/internal/configs/configload/module_mgr.go b/configs/configload/module_mgr.go similarity index 94% rename from internal/configs/configload/module_mgr.go rename to configs/configload/module_mgr.go index bf8d067e6d23..314043bfebbd 100644 --- a/internal/configs/configload/module_mgr.go +++ b/configs/configload/module_mgr.go @@ -5,8 +5,8 @@ import ( "path/filepath" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/hashicorp/terraform/internal/registry" + "github.com/hashicorp/terraform/modsdir" + "github.com/hashicorp/terraform/registry" "github.com/spf13/afero" ) diff --git a/internal/configs/configload/testdata/add-version-constraint/.terraform/modules/child/empty.tf b/configs/configload/testdata/add-version-constraint/.terraform/modules/child/empty.tf similarity index 100% rename from internal/configs/configload/testdata/add-version-constraint/.terraform/modules/child/empty.tf rename to configs/configload/testdata/add-version-constraint/.terraform/modules/child/empty.tf diff --git a/internal/configs/configload/testdata/add-version-constraint/.terraform/modules/modules.json b/configs/configload/testdata/add-version-constraint/.terraform/modules/modules.json similarity index 100% rename from internal/configs/configload/testdata/add-version-constraint/.terraform/modules/modules.json rename to configs/configload/testdata/add-version-constraint/.terraform/modules/modules.json diff --git a/internal/configs/configload/testdata/add-version-constraint/add-version-constraint.tf b/configs/configload/testdata/add-version-constraint/add-version-constraint.tf similarity index 100% rename from internal/configs/configload/testdata/add-version-constraint/add-version-constraint.tf rename to configs/configload/testdata/add-version-constraint/add-version-constraint.tf diff --git a/internal/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json b/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json similarity index 100% rename from internal/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json rename to configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json diff --git a/internal/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf b/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf similarity index 100% rename from internal/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf rename to configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf diff --git a/internal/configs/configload/testdata/already-installed-now-invalid/foo/main.tf b/configs/configload/testdata/already-installed-now-invalid/foo/main.tf similarity index 100% rename from internal/configs/configload/testdata/already-installed-now-invalid/foo/main.tf rename to configs/configload/testdata/already-installed-now-invalid/foo/main.tf diff --git a/internal/configs/configload/testdata/already-installed-now-invalid/root.tf b/configs/configload/testdata/already-installed-now-invalid/root.tf similarity index 100% rename from internal/configs/configload/testdata/already-installed-now-invalid/root.tf rename to configs/configload/testdata/already-installed-now-invalid/root.tf diff --git a/internal/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_a.tf b/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_a.tf similarity index 100% rename from internal/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_a.tf rename to configs/configload/testdata/already-installed/.terraform/modules/child_a/child_a.tf diff --git a/internal/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_c/child_c.tf b/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_c/child_c.tf similarity index 100% rename from internal/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_c/child_c.tf rename to configs/configload/testdata/already-installed/.terraform/modules/child_a/child_c/child_c.tf diff --git a/internal/configs/configload/testdata/already-installed/.terraform/modules/child_b.child_d/child_d.tf b/configs/configload/testdata/already-installed/.terraform/modules/child_b.child_d/child_d.tf similarity index 100% rename from internal/configs/configload/testdata/already-installed/.terraform/modules/child_b.child_d/child_d.tf rename to configs/configload/testdata/already-installed/.terraform/modules/child_b.child_d/child_d.tf diff --git a/internal/configs/configload/testdata/already-installed/.terraform/modules/child_b/child_b.tf b/configs/configload/testdata/already-installed/.terraform/modules/child_b/child_b.tf similarity index 100% rename from internal/configs/configload/testdata/already-installed/.terraform/modules/child_b/child_b.tf rename to configs/configload/testdata/already-installed/.terraform/modules/child_b/child_b.tf diff --git a/internal/configs/configload/testdata/already-installed/.terraform/modules/modules.json b/configs/configload/testdata/already-installed/.terraform/modules/modules.json similarity index 100% rename from internal/configs/configload/testdata/already-installed/.terraform/modules/modules.json rename to configs/configload/testdata/already-installed/.terraform/modules/modules.json diff --git a/internal/configs/configload/testdata/already-installed/root.tf b/configs/configload/testdata/already-installed/root.tf similarity index 100% rename from internal/configs/configload/testdata/already-installed/root.tf rename to configs/configload/testdata/already-installed/root.tf diff --git a/internal/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json b/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json similarity index 100% rename from internal/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json rename to configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json diff --git a/internal/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf b/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf similarity index 100% rename from internal/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf rename to configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf diff --git a/internal/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf b/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf similarity index 100% rename from internal/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf rename to configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf diff --git a/internal/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf b/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf similarity index 100% rename from internal/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf rename to configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf diff --git a/internal/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json b/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json similarity index 100% rename from internal/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json rename to configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json diff --git a/internal/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf b/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf similarity index 100% rename from internal/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf rename to configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf diff --git a/internal/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf b/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf similarity index 100% rename from internal/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf rename to configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf diff --git a/internal/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf b/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf similarity index 100% rename from internal/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf rename to configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf diff --git a/internal/configs/configload/testdata/empty/.gitignore b/configs/configload/testdata/empty/.gitignore similarity index 100% rename from internal/configs/configload/testdata/empty/.gitignore rename to configs/configload/testdata/empty/.gitignore diff --git a/internal/configs/configload/testdata/go-getter-modules/.gitignore b/configs/configload/testdata/go-getter-modules/.gitignore similarity index 100% rename from internal/configs/configload/testdata/go-getter-modules/.gitignore rename to configs/configload/testdata/go-getter-modules/.gitignore diff --git a/internal/configs/configload/testdata/go-getter-modules/root.tf b/configs/configload/testdata/go-getter-modules/root.tf similarity index 100% rename from internal/configs/configload/testdata/go-getter-modules/root.tf rename to configs/configload/testdata/go-getter-modules/root.tf diff --git a/internal/configs/configload/testdata/invalid-names-in-submodules/.terraform/modules/modules.json b/configs/configload/testdata/invalid-names-in-submodules/.terraform/modules/modules.json similarity index 100% rename from internal/configs/configload/testdata/invalid-names-in-submodules/.terraform/modules/modules.json rename to configs/configload/testdata/invalid-names-in-submodules/.terraform/modules/modules.json diff --git a/internal/configs/configload/testdata/invalid-names-in-submodules/main.tf b/configs/configload/testdata/invalid-names-in-submodules/main.tf similarity index 100% rename from internal/configs/configload/testdata/invalid-names-in-submodules/main.tf rename to configs/configload/testdata/invalid-names-in-submodules/main.tf diff --git a/internal/configs/configload/testdata/invalid-names-in-submodules/sub/main.tf b/configs/configload/testdata/invalid-names-in-submodules/sub/main.tf similarity index 100% rename from internal/configs/configload/testdata/invalid-names-in-submodules/sub/main.tf rename to configs/configload/testdata/invalid-names-in-submodules/sub/main.tf diff --git a/internal/configs/configload/testdata/invalid-names/main.tf b/configs/configload/testdata/invalid-names/main.tf similarity index 100% rename from internal/configs/configload/testdata/invalid-names/main.tf rename to configs/configload/testdata/invalid-names/main.tf diff --git a/internal/configs/configload/testdata/local-modules/child_a/child_a.tf b/configs/configload/testdata/local-modules/child_a/child_a.tf similarity index 100% rename from internal/configs/configload/testdata/local-modules/child_a/child_a.tf rename to configs/configload/testdata/local-modules/child_a/child_a.tf diff --git a/internal/configs/configload/testdata/local-modules/child_a/child_b/child_b.tf b/configs/configload/testdata/local-modules/child_a/child_b/child_b.tf similarity index 100% rename from internal/configs/configload/testdata/local-modules/child_a/child_b/child_b.tf rename to configs/configload/testdata/local-modules/child_a/child_b/child_b.tf diff --git a/internal/configs/configload/testdata/local-modules/root.tf b/configs/configload/testdata/local-modules/root.tf similarity index 100% rename from internal/configs/configload/testdata/local-modules/root.tf rename to configs/configload/testdata/local-modules/root.tf diff --git a/internal/configs/configload/testdata/module-depends-on/.terraform/modules/modules.json b/configs/configload/testdata/module-depends-on/.terraform/modules/modules.json similarity index 100% rename from internal/configs/configload/testdata/module-depends-on/.terraform/modules/modules.json rename to configs/configload/testdata/module-depends-on/.terraform/modules/modules.json diff --git a/internal/configs/configload/testdata/module-depends-on/child/main.tf b/configs/configload/testdata/module-depends-on/child/main.tf similarity index 100% rename from internal/configs/configload/testdata/module-depends-on/child/main.tf rename to configs/configload/testdata/module-depends-on/child/main.tf diff --git a/internal/configs/configload/testdata/module-depends-on/child2/main.tf b/configs/configload/testdata/module-depends-on/child2/main.tf similarity index 100% rename from internal/configs/configload/testdata/module-depends-on/child2/main.tf rename to configs/configload/testdata/module-depends-on/child2/main.tf diff --git a/internal/configs/configload/testdata/module-depends-on/root.tf b/configs/configload/testdata/module-depends-on/root.tf similarity index 100% rename from internal/configs/configload/testdata/module-depends-on/root.tf rename to configs/configload/testdata/module-depends-on/root.tf diff --git a/internal/configs/configload/testdata/registry-modules/.gitignore b/configs/configload/testdata/registry-modules/.gitignore similarity index 100% rename from internal/configs/configload/testdata/registry-modules/.gitignore rename to configs/configload/testdata/registry-modules/.gitignore diff --git a/internal/configs/configload/testdata/registry-modules/root.tf b/configs/configload/testdata/registry-modules/root.tf similarity index 100% rename from internal/configs/configload/testdata/registry-modules/root.tf rename to configs/configload/testdata/registry-modules/root.tf diff --git a/internal/configs/configload/testing.go b/configs/configload/testing.go similarity index 100% rename from internal/configs/configload/testing.go rename to configs/configload/testing.go diff --git a/internal/configs/configschema/coerce_value.go b/configs/configschema/coerce_value.go similarity index 100% rename from internal/configs/configschema/coerce_value.go rename to configs/configschema/coerce_value.go diff --git a/internal/configs/configschema/coerce_value_test.go b/configs/configschema/coerce_value_test.go similarity index 99% rename from internal/configs/configschema/coerce_value_test.go rename to configs/configschema/coerce_value_test.go index 37f81b76986f..d682a0ed0be3 100644 --- a/internal/configs/configschema/coerce_value_test.go +++ b/configs/configschema/coerce_value_test.go @@ -5,7 +5,7 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) func TestCoerceValue(t *testing.T) { diff --git a/internal/configs/configschema/decoder_spec.go b/configs/configschema/decoder_spec.go similarity index 100% rename from internal/configs/configschema/decoder_spec.go rename to configs/configschema/decoder_spec.go diff --git a/internal/configs/configschema/decoder_spec_test.go b/configs/configschema/decoder_spec_test.go similarity index 100% rename from internal/configs/configschema/decoder_spec_test.go rename to configs/configschema/decoder_spec_test.go diff --git a/internal/configs/configschema/doc.go b/configs/configschema/doc.go similarity index 100% rename from internal/configs/configschema/doc.go rename to configs/configschema/doc.go diff --git a/internal/configs/configschema/empty_value.go b/configs/configschema/empty_value.go similarity index 100% rename from internal/configs/configschema/empty_value.go rename to configs/configschema/empty_value.go diff --git a/internal/configs/configschema/empty_value_test.go b/configs/configschema/empty_value_test.go similarity index 100% rename from internal/configs/configschema/empty_value_test.go rename to configs/configschema/empty_value_test.go diff --git a/internal/configs/configschema/implied_type.go b/configs/configschema/implied_type.go similarity index 100% rename from internal/configs/configschema/implied_type.go rename to configs/configschema/implied_type.go diff --git a/internal/configs/configschema/implied_type_test.go b/configs/configschema/implied_type_test.go similarity index 100% rename from internal/configs/configschema/implied_type_test.go rename to configs/configschema/implied_type_test.go diff --git a/internal/configs/configschema/internal_validate.go b/configs/configschema/internal_validate.go similarity index 100% rename from internal/configs/configschema/internal_validate.go rename to configs/configschema/internal_validate.go diff --git a/internal/configs/configschema/internal_validate_test.go b/configs/configschema/internal_validate_test.go similarity index 100% rename from internal/configs/configschema/internal_validate_test.go rename to configs/configschema/internal_validate_test.go diff --git a/configs/configschema/marks.go b/configs/configschema/marks.go new file mode 100644 index 000000000000..92fed660ae38 --- /dev/null +++ b/configs/configschema/marks.go @@ -0,0 +1,153 @@ +package configschema + +import ( + "fmt" + + "github.com/hashicorp/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +// copyAndExtendPath returns a copy of a cty.Path with some additional +// `cty.PathStep`s appended to its end, to simplify creating new child paths. +func copyAndExtendPath(path cty.Path, nextSteps ...cty.PathStep) cty.Path { + newPath := make(cty.Path, len(path), len(path)+len(nextSteps)) + copy(newPath, path) + newPath = append(newPath, nextSteps...) + return newPath +} + +// ValueMarks returns a set of path value marks for a given value and path, +// based on the sensitive flag for each attribute within the schema. Nested +// blocks are descended (if present in the given value). +func (b *Block) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { + var pvm []cty.PathValueMarks + + // We can mark attributes as sensitive even if the value is null + for name, attrS := range b.Attributes { + if attrS.Sensitive { + // Create a copy of the path, with this step added, to add to our PathValueMarks slice + attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) + pvm = append(pvm, cty.PathValueMarks{ + Path: attrPath, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } + } + + // If the value is null, no other marks are possible + if val.IsNull() { + return pvm + } + + // Extract marks for nested attribute type values + for name, attrS := range b.Attributes { + // If the attribute has no nested type, or the nested type doesn't + // contain any sensitive attributes, skip inspecting it + if attrS.NestedType == nil || !attrS.NestedType.ContainsSensitive() { + continue + } + + // Create a copy of the path, with this step added, to add to our PathValueMarks slice + attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) + + pvm = append(pvm, attrS.NestedType.ValueMarks(val.GetAttr(name), attrPath)...) + } + + // Extract marks for nested blocks + for name, blockS := range b.BlockTypes { + // If our block doesn't contain any sensitive attributes, skip inspecting it + if !blockS.Block.ContainsSensitive() { + continue + } + + blockV := val.GetAttr(name) + if blockV.IsNull() || !blockV.IsKnown() { + continue + } + + // Create a copy of the path, with this step added, to add to our PathValueMarks slice + blockPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + pvm = append(pvm, blockS.Block.ValueMarks(blockV, blockPath)...) + case NestingList, NestingMap, NestingSet: + for it := blockV.ElementIterator(); it.Next(); { + idx, blockEV := it.Element() + // Create a copy of the path, with this block instance's index + // step added, to add to our PathValueMarks slice + blockInstancePath := copyAndExtendPath(blockPath, cty.IndexStep{Key: idx}) + morePaths := blockS.Block.ValueMarks(blockEV, blockInstancePath) + pvm = append(pvm, morePaths...) + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + return pvm +} + +// ValueMarks returns a set of path value marks for a given value and path, +// based on the sensitive flag for each attribute within the nested attribute. +// Attributes with nested types are descended (if present in the given value). +func (o *Object) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { + var pvm []cty.PathValueMarks + + if val.IsNull() || !val.IsKnown() { + return pvm + } + + for name, attrS := range o.Attributes { + // Skip attributes which can never produce sensitive path value marks + if !attrS.Sensitive && (attrS.NestedType == nil || !attrS.NestedType.ContainsSensitive()) { + continue + } + + switch o.Nesting { + case NestingSingle, NestingGroup: + // Create a path to this attribute + attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) + + if attrS.Sensitive { + // If the entire attribute is sensitive, mark it so + pvm = append(pvm, cty.PathValueMarks{ + Path: attrPath, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } else { + // The attribute has a nested type which contains sensitive + // attributes, so recurse + pvm = append(pvm, attrS.NestedType.ValueMarks(val.GetAttr(name), attrPath)...) + } + case NestingList, NestingMap, NestingSet: + // For nested attribute types which have a non-single nesting mode, + // we add path value marks for each element of the collection + for it := val.ElementIterator(); it.Next(); { + idx, attrEV := it.Element() + attrV := attrEV.GetAttr(name) + + // Create a path to this element of the attribute's collection. Note + // that the path is extended in opposite order to the iteration order + // of the loops: index into the collection, then the contained + // attribute name. This is because we have one type + // representing multiple collection elements. + attrPath := copyAndExtendPath(path, cty.IndexStep{Key: idx}, cty.GetAttrStep{Name: name}) + + if attrS.Sensitive { + // If the entire attribute is sensitive, mark it so + pvm = append(pvm, cty.PathValueMarks{ + Path: attrPath, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } else { + // The attribute has a nested type which contains sensitive + // attributes, so recurse + pvm = append(pvm, attrS.NestedType.ValueMarks(attrV, attrPath)...) + } + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", attrS.NestedType.Nesting)) + } + } + return pvm +} diff --git a/configs/configschema/marks_test.go b/configs/configschema/marks_test.go new file mode 100644 index 000000000000..2bd8f9651f84 --- /dev/null +++ b/configs/configschema/marks_test.go @@ -0,0 +1,182 @@ +package configschema + +import ( + "testing" + + "github.com/hashicorp/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestBlockValueMarks(t *testing.T) { + schema := &Block{ + Attributes: map[string]*Attribute{ + "unsensitive": { + Type: cty.String, + Optional: true, + }, + "sensitive": { + Type: cty.String, + Sensitive: true, + }, + "nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "boop": { + Type: cty.String, + }, + "honk": { + Type: cty.String, + Sensitive: true, + }, + }, + Nesting: NestingList, + }, + }, + }, + + BlockTypes: map[string]*NestedBlock{ + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "unsensitive": { + Type: cty.String, + Optional: true, + }, + "sensitive": { + Type: cty.String, + Sensitive: true, + }, + }, + }, + }, + }, + } + + testCases := map[string]struct { + given cty.Value + expect cty.Value + }{ + "unknown object": { + cty.UnknownVal(schema.ImpliedType()), + cty.UnknownVal(schema.ImpliedType()), + }, + "null object": { + cty.NullVal(schema.ImpliedType()), + cty.NullVal(schema.ImpliedType()), + }, + "object with unknown attributes and blocks": { + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String), + "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String).Mark(marks.Sensitive), + "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), + }), + }, + "object with block value": { + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.NullVal(cty.String), + "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String), + "unsensitive": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.NullVal(cty.String), + "unsensitive": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.NullVal(cty.String).Mark(marks.Sensitive), + "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String).Mark(marks.Sensitive), + "unsensitive": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.NullVal(cty.String).Mark(marks.Sensitive), + "unsensitive": cty.NullVal(cty.String), + }), + }), + }), + }, + "object with known values and nested attribute": { + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.StringVal("foo"), + "unsensitive": cty.StringVal("bar"), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("foo"), + "honk": cty.StringVal("bar"), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.NullVal(cty.String), + "honk": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.UnknownVal(cty.String), + "honk": cty.UnknownVal(cty.String), + }), + }), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "sensitive": cty.String, + "unsensitive": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.StringVal("foo").Mark(marks.Sensitive), + "unsensitive": cty.StringVal("bar"), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("foo"), + "honk": cty.StringVal("bar").Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.NullVal(cty.String), + "honk": cty.NullVal(cty.String).Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.UnknownVal(cty.String), + "honk": cty.UnknownVal(cty.String).Mark(marks.Sensitive), + }), + }), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "sensitive": cty.String, + "unsensitive": cty.String, + }))), + }), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.given.MarkWithPaths(schema.ValueMarks(tc.given, nil)) + if !got.RawEquals(tc.expect) { + t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.expect, got) + } + }) + } +} diff --git a/internal/configs/configschema/nestingmode_string.go b/configs/configschema/nestingmode_string.go similarity index 100% rename from internal/configs/configschema/nestingmode_string.go rename to configs/configschema/nestingmode_string.go diff --git a/internal/configs/configschema/none_required.go b/configs/configschema/none_required.go similarity index 100% rename from internal/configs/configschema/none_required.go rename to configs/configschema/none_required.go diff --git a/internal/configs/configschema/path.go b/configs/configschema/path.go similarity index 100% rename from internal/configs/configschema/path.go rename to configs/configschema/path.go diff --git a/internal/configs/configschema/path_test.go b/configs/configschema/path_test.go similarity index 100% rename from internal/configs/configschema/path_test.go rename to configs/configschema/path_test.go diff --git a/internal/configs/configschema/schema.go b/configs/configschema/schema.go similarity index 100% rename from internal/configs/configschema/schema.go rename to configs/configschema/schema.go diff --git a/internal/configs/configschema/validate_traversal.go b/configs/configschema/validate_traversal.go similarity index 98% rename from internal/configs/configschema/validate_traversal.go rename to configs/configschema/validate_traversal.go index 8320c9de3663..4ef50e31c453 100644 --- a/internal/configs/configschema/validate_traversal.go +++ b/configs/configschema/validate_traversal.go @@ -8,8 +8,8 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/didyoumean" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/didyoumean" + "github.com/hashicorp/terraform/tfdiags" ) // StaticValidateTraversal checks whether the given traversal (which must be diff --git a/internal/configs/configschema/validate_traversal_test.go b/configs/configschema/validate_traversal_test.go similarity index 100% rename from internal/configs/configschema/validate_traversal_test.go rename to configs/configschema/validate_traversal_test.go diff --git a/internal/configs/depends_on.go b/configs/depends_on.go similarity index 100% rename from internal/configs/depends_on.go rename to configs/depends_on.go diff --git a/internal/configs/doc.go b/configs/doc.go similarity index 100% rename from internal/configs/doc.go rename to configs/doc.go diff --git a/internal/configs/escaping_blocks_test.go b/configs/escaping_blocks_test.go similarity index 100% rename from internal/configs/escaping_blocks_test.go rename to configs/escaping_blocks_test.go diff --git a/internal/configs/experiments.go b/configs/experiments.go similarity index 98% rename from internal/configs/experiments.go rename to configs/experiments.go index 1d1a36b02069..4b6df3b10096 100644 --- a/internal/configs/experiments.go +++ b/configs/experiments.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/experiments" + "github.com/hashicorp/terraform/experiments" "github.com/hashicorp/terraform/version" ) @@ -12,7 +12,7 @@ import ( // the experiment warning by setting this package-level variable to a non-empty // value using a link-time flag: // -// go install -ldflags="-X 'github.com/hashicorp/terraform/internal/configs.disableExperimentWarnings=yes'" +// go install -ldflags="-X 'github.com/hashicorp/terraform/configs.disableExperimentWarnings=yes'" // // This functionality is for development purposes only and is not a feature we // are committing to supporting for end users. diff --git a/internal/configs/experiments_test.go b/configs/experiments_test.go similarity index 98% rename from internal/configs/experiments_test.go rename to configs/experiments_test.go index 7d1b9dc4e391..be60d278ff49 100644 --- a/internal/configs/experiments_test.go +++ b/configs/experiments_test.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/experiments" + "github.com/hashicorp/terraform/experiments" ) func TestExperimentsConfig(t *testing.T) { diff --git a/internal/configs/hcl2shim/flatmap.go b/configs/hcl2shim/flatmap.go similarity index 100% rename from internal/configs/hcl2shim/flatmap.go rename to configs/hcl2shim/flatmap.go diff --git a/internal/configs/hcl2shim/flatmap_test.go b/configs/hcl2shim/flatmap_test.go similarity index 100% rename from internal/configs/hcl2shim/flatmap_test.go rename to configs/hcl2shim/flatmap_test.go diff --git a/internal/configs/hcl2shim/paths.go b/configs/hcl2shim/paths.go similarity index 100% rename from internal/configs/hcl2shim/paths.go rename to configs/hcl2shim/paths.go diff --git a/internal/configs/hcl2shim/paths_test.go b/configs/hcl2shim/paths_test.go similarity index 100% rename from internal/configs/hcl2shim/paths_test.go rename to configs/hcl2shim/paths_test.go diff --git a/internal/configs/hcl2shim/single_attr_body.go b/configs/hcl2shim/single_attr_body.go similarity index 100% rename from internal/configs/hcl2shim/single_attr_body.go rename to configs/hcl2shim/single_attr_body.go diff --git a/configs/hcl2shim/values.go b/configs/hcl2shim/values.go new file mode 100644 index 000000000000..44551fe1ea20 --- /dev/null +++ b/configs/hcl2shim/values.go @@ -0,0 +1,230 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// terraform.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} diff --git a/internal/configs/hcl2shim/values_equiv.go b/configs/hcl2shim/values_equiv.go similarity index 100% rename from internal/configs/hcl2shim/values_equiv.go rename to configs/hcl2shim/values_equiv.go diff --git a/internal/configs/hcl2shim/values_equiv_test.go b/configs/hcl2shim/values_equiv_test.go similarity index 100% rename from internal/configs/hcl2shim/values_equiv_test.go rename to configs/hcl2shim/values_equiv_test.go diff --git a/configs/hcl2shim/values_test.go b/configs/hcl2shim/values_test.go new file mode 100644 index 000000000000..7c3011da053c --- /dev/null +++ b/configs/hcl2shim/values_test.go @@ -0,0 +1,415 @@ +package hcl2shim + +import ( + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func TestConfigValueFromHCL2Block(t *testing.T) { + tests := []struct { + Input cty.Value + Schema *configschema.Block + Want map[string]interface{} + }{ + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "age": cty.NumberIntVal(19), + "address": cty.ObjectVal(map[string]cty.Value{ + "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), + "city": cty.StringVal("Fridgewater"), + "state": cty.StringVal("MA"), + "zip": cty.StringVal("91037"), + }), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "age": {Type: cty.Number, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "street": {Type: cty.List(cty.String), Optional: true}, + "city": {Type: cty.String, Optional: true}, + "state": {Type: cty.String, Optional: true}, + "zip": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + map[string]interface{}{ + "name": "Ermintrude", + "age": int(19), + "address": map[string]interface{}{ + "street": []interface{}{"421 Shoreham Loop"}, + "city": "Fridgewater", + "state": "MA", + "zip": "91037", + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "age": cty.NumberIntVal(19), + "address": cty.NullVal(cty.Object(map[string]cty.Type{ + "street": cty.List(cty.String), + "city": cty.String, + "state": cty.String, + "zip": cty.String, + })), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "age": {Type: cty.Number, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "street": {Type: cty.List(cty.String), Optional: true}, + "city": {Type: cty.String, Optional: true}, + "state": {Type: cty.String, Optional: true}, + "zip": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + map[string]interface{}{ + "name": "Ermintrude", + "age": int(19), + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "age": cty.NumberIntVal(19), + "address": cty.ObjectVal(map[string]cty.Value{ + "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), + "city": cty.StringVal("Fridgewater"), + "state": cty.StringVal("MA"), + "zip": cty.NullVal(cty.String), // should be omitted altogether in result + }), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "age": {Type: cty.Number, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "street": {Type: cty.List(cty.String), Optional: true}, + "city": {Type: cty.String, Optional: true}, + "state": {Type: cty.String, Optional: true}, + "zip": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + map[string]interface{}{ + "name": "Ermintrude", + "age": int(19), + "address": map[string]interface{}{ + "street": []interface{}{"421 Shoreham Loop"}, + "city": "Fridgewater", + "state": "MA", + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.ListVal([]cty.Value{cty.EmptyObjectVal}), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingList, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{ + "address": []interface{}{ + map[string]interface{}{}, + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.ListValEmpty(cty.EmptyObject), // should be omitted altogether in result + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingList, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.SetVal([]cty.Value{cty.EmptyObjectVal}), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSet, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{ + "address": []interface{}{ + map[string]interface{}{}, + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.SetValEmpty(cty.EmptyObject), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSet, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.MapVal(map[string]cty.Value{"foo": cty.EmptyObjectVal}), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingMap, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{ + "address": map[string]interface{}{ + "foo": map[string]interface{}{}, + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.MapValEmpty(cty.EmptyObject), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingMap, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{}, + }, + { + cty.NullVal(cty.EmptyObject), + &configschema.Block{}, + nil, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { + got := ConfigValueFromHCL2Block(test.Input, test.Schema) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) + } + }) + } +} + +func TestConfigValueFromHCL2(t *testing.T) { + tests := []struct { + Input cty.Value + Want interface{} + }{ + { + cty.True, + true, + }, + { + cty.False, + false, + }, + { + cty.NumberIntVal(12), + int(12), + }, + { + cty.NumberFloatVal(12.5), + float64(12.5), + }, + { + cty.StringVal("hello world"), + "hello world", + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "age": cty.NumberIntVal(19), + "address": cty.ObjectVal(map[string]cty.Value{ + "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), + "city": cty.StringVal("Fridgewater"), + "state": cty.StringVal("MA"), + "zip": cty.StringVal("91037"), + }), + }), + map[string]interface{}{ + "name": "Ermintrude", + "age": int(19), + "address": map[string]interface{}{ + "street": []interface{}{"421 Shoreham Loop"}, + "city": "Fridgewater", + "state": "MA", + "zip": "91037", + }, + }, + }, + { + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "bar": cty.StringVal("baz"), + }), + map[string]interface{}{ + "foo": "bar", + "bar": "baz", + }, + }, + { + cty.TupleVal([]cty.Value{ + cty.StringVal("foo"), + cty.True, + }), + []interface{}{ + "foo", + true, + }, + }, + { + cty.NullVal(cty.String), + nil, + }, + { + cty.UnknownVal(cty.String), + UnknownVariableValue, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { + got := ConfigValueFromHCL2(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) + } + }) + } +} + +func TestHCL2ValueFromConfigValue(t *testing.T) { + tests := []struct { + Input interface{} + Want cty.Value + }{ + { + nil, + cty.NullVal(cty.DynamicPseudoType), + }, + { + UnknownVariableValue, + cty.DynamicVal, + }, + { + true, + cty.True, + }, + { + false, + cty.False, + }, + { + int(12), + cty.NumberIntVal(12), + }, + { + int(0), + cty.Zero, + }, + { + float64(12.5), + cty.NumberFloatVal(12.5), + }, + { + "hello world", + cty.StringVal("hello world"), + }, + { + "O\u0308", // decomposed letter + diacritic + cty.StringVal("\u00D6"), // NFC-normalized on entry into cty + }, + { + []interface{}{}, + cty.EmptyTupleVal, + }, + { + []interface{}(nil), + cty.EmptyTupleVal, + }, + { + []interface{}{"hello", "world"}, + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world")}), + }, + { + map[string]interface{}{}, + cty.EmptyObjectVal, + }, + { + map[string]interface{}(nil), + cty.EmptyObjectVal, + }, + { + map[string]interface{}{ + "foo": "bar", + "bar": "baz", + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "bar": cty.StringVal("baz"), + }), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { + got := HCL2ValueFromConfigValue(test.Input) + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) + } + }) + } +} diff --git a/configs/module.go b/configs/module.go new file mode 100644 index 000000000000..caefb966b606 --- /dev/null +++ b/configs/module.go @@ -0,0 +1,591 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/experiments" +) + +// Module is a container for a set of configuration constructs that are +// evaluated within a common namespace. +type Module struct { + // SourceDir is the filesystem directory that the module was loaded from. + // + // This is populated automatically only for configurations loaded with + // LoadConfigDir. If the parser is using a virtual filesystem then the + // path here will be in terms of that virtual filesystem. + + // Any other caller that constructs a module directly with NewModule may + // assign a suitable value to this attribute before using it for other + // purposes. It should be treated as immutable by all consumers of Module + // values. + SourceDir string + + CoreVersionConstraints []VersionConstraint + + ActiveExperiments experiments.Set + + Backend *Backend + CloudConfig *CloudConfig + ProviderConfigs map[string]*Provider + ProviderRequirements *RequiredProviders + ProviderLocalNames map[addrs.Provider]string + ProviderMetas map[addrs.Provider]*ProviderMeta + + Variables map[string]*Variable + Locals map[string]*Local + Outputs map[string]*Output + + ModuleCalls map[string]*ModuleCall + + ManagedResources map[string]*Resource + DataResources map[string]*Resource + + Moved []*Moved +} + +// File describes the contents of a single configuration file. +// +// Individual files are not usually used alone, but rather combined together +// with other files (conventionally, those in the same directory) to produce +// a *Module, using NewModule. +// +// At the level of an individual file we represent directly the structural +// elements present in the file, without any attempt to detect conflicting +// declarations. A File object can therefore be used for some basic static +// analysis of individual elements, but must be built into a Module to detect +// duplicate declarations. +type File struct { + CoreVersionConstraints []VersionConstraint + + ActiveExperiments experiments.Set + + Backends []*Backend + CloudConfigs []*CloudConfig + ProviderConfigs []*Provider + ProviderMetas []*ProviderMeta + RequiredProviders []*RequiredProviders + + Variables []*Variable + Locals []*Local + Outputs []*Output + + ModuleCalls []*ModuleCall + + ManagedResources []*Resource + DataResources []*Resource + + Moved []*Moved +} + +// NewModule takes a list of primary files and a list of override files and +// produces a *Module by combining the files together. +// +// If there are any conflicting declarations in the given files -- for example, +// if the same variable name is defined twice -- then the resulting module +// will be incomplete and error diagnostics will be returned. Careful static +// analysis of the returned Module is still possible in this case, but the +// module will probably not be semantically valid. +func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { + var diags hcl.Diagnostics + mod := &Module{ + ProviderConfigs: map[string]*Provider{}, + ProviderLocalNames: map[addrs.Provider]string{}, + Variables: map[string]*Variable{}, + Locals: map[string]*Local{}, + Outputs: map[string]*Output{}, + ModuleCalls: map[string]*ModuleCall{}, + ManagedResources: map[string]*Resource{}, + DataResources: map[string]*Resource{}, + ProviderMetas: map[addrs.Provider]*ProviderMeta{}, + } + + // Process the required_providers blocks first, to ensure that all + // resources have access to the correct provider FQNs + for _, file := range primaryFiles { + for _, r := range file.RequiredProviders { + if mod.ProviderRequirements != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate required providers configuration", + Detail: fmt.Sprintf("A module may have only one required providers configuration. The required providers were previously configured at %s.", mod.ProviderRequirements.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + mod.ProviderRequirements = r + } + } + + // If no required_providers block is configured, create a useful empty + // state to reduce nil checks elsewhere + if mod.ProviderRequirements == nil { + mod.ProviderRequirements = &RequiredProviders{ + RequiredProviders: make(map[string]*RequiredProvider), + } + } + + // Any required_providers blocks in override files replace the entire + // block for each provider + for _, file := range overrideFiles { + for _, override := range file.RequiredProviders { + for name, rp := range override.RequiredProviders { + mod.ProviderRequirements.RequiredProviders[name] = rp + } + } + } + + for _, file := range primaryFiles { + fileDiags := mod.appendFile(file) + diags = append(diags, fileDiags...) + } + + for _, file := range overrideFiles { + fileDiags := mod.mergeFile(file) + diags = append(diags, fileDiags...) + } + + diags = append(diags, checkModuleExperiments(mod)...) + + // Generate the FQN -> LocalProviderName map + mod.gatherProviderLocalNames() + + return mod, diags +} + +// ResourceByAddr returns the configuration for the resource with the given +// address, or nil if there is no such resource. +func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { + key := addr.String() + switch addr.Mode { + case addrs.ManagedResourceMode: + return m.ManagedResources[key] + case addrs.DataResourceMode: + return m.DataResources[key] + default: + return nil + } +} + +func (m *Module) appendFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + // If there are any conflicting requirements then we'll catch them + // when we actually check these constraints. + m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...) + + m.ActiveExperiments = experiments.SetUnion(m.ActiveExperiments, file.ActiveExperiments) + + for _, b := range file.Backends { + if m.Backend != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), + Subject: &b.DeclRange, + }) + continue + } + m.Backend = b + } + + for _, c := range file.CloudConfigs { + if m.CloudConfig != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate Terraform Cloud configurations", + Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring Terraform Cloud. Terraform Cloud was previously configured at %s.", m.CloudConfig.DeclRange), + Subject: &c.DeclRange, + }) + continue + } + + m.CloudConfig = c + } + + if m.Backend != nil && m.CloudConfig != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Both a backend and Terraform Cloud configuration are present", + Detail: fmt.Sprintf("A module may declare either one 'cloud' block configuring Terraform Cloud OR one 'backend' block configuring a state backend. Terraform Cloud is configured at %s; a backend is configured at %s. Remove the backend block to configure Terraform Cloud.", m.CloudConfig.DeclRange, m.Backend.DeclRange), + Subject: &m.Backend.DeclRange, + }) + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + if existing, exists := m.ProviderConfigs[key]; exists { + if existing.Alias == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } + continue + } + m.ProviderConfigs[key] = pc + } + + for _, pm := range file.ProviderMetas { + provider := m.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: pm.Provider}) + if existing, exists := m.ProviderMetas[provider]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider_meta block", + Detail: fmt.Sprintf("A provider_meta block for provider %q was already declared at %s. Providers may only have one provider_meta block per module.", existing.Provider, existing.DeclRange), + Subject: &pm.DeclRange, + }) + } + m.ProviderMetas[provider] = pm + } + + for _, v := range file.Variables { + if existing, exists := m.Variables[v.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate variable declaration", + Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &v.DeclRange, + }) + } + m.Variables[v.Name] = v + } + + for _, l := range file.Locals { + if existing, exists := m.Locals[l.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate local value definition", + Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &l.DeclRange, + }) + } + m.Locals[l.Name] = l + } + + for _, o := range file.Outputs { + if existing, exists := m.Outputs[o.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate output definition", + Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &o.DeclRange, + }) + } + m.Outputs[o.Name] = o + } + + for _, mc := range file.ModuleCalls { + if existing, exists := m.ModuleCalls[mc.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate module call", + Detail: fmt.Sprintf("A module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), + Subject: &mc.DeclRange, + }) + } + m.ModuleCalls[mc.Name] = mc + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + if existing, exists := m.ManagedResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.ManagedResources[key] = r + + // set the provider FQN for the resource + if r.ProviderConfigRef != nil { + r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr()) + } else { + // an invalid resource name (for e.g. "null resource" instead of + // "null_resource") can cause a panic down the line in addrs: + // https://github.com/hashicorp/terraform/issues/25560 + implied, err := addrs.ParseProviderPart(r.Addr().ImpliedProvider()) + if err == nil { + r.Provider = m.ImpliedProviderForUnqualifiedType(implied) + } + // We don't return a diagnostic because the invalid resource name + // will already have been caught. + } + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + if existing, exists := m.DataResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.DataResources[key] = r + + // set the provider FQN for the resource + if r.ProviderConfigRef != nil { + r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr()) + } else { + // an invalid data source name (for e.g. "null resource" instead of + // "null_resource") can cause a panic down the line in addrs: + // https://github.com/hashicorp/terraform/issues/25560 + implied, err := addrs.ParseProviderPart(r.Addr().ImpliedProvider()) + if err == nil { + r.Provider = m.ImpliedProviderForUnqualifiedType(implied) + } + // We don't return a diagnostic because the invalid resource name + // will already have been caught. + } + } + + // "Moved" blocks just append, because they are all independent + // of one another at this level. (We handle any references between + // them at runtime.) + m.Moved = append(m.Moved, file.Moved...) + + return diags +} + +func (m *Module) mergeFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + if len(file.CoreVersionConstraints) != 0 { + // This is a bit of a strange case for overriding since we normally + // would union together across multiple files anyway, but we'll + // allow it and have each override file clobber any existing list. + m.CoreVersionConstraints = nil + m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...) + } + + if len(file.Backends) != 0 { + switch len(file.Backends) { + case 1: + m.CloudConfig = nil // A backend block is mutually exclusive with a cloud one, and overwrites any cloud config + m.Backend = file.Backends[0] + default: + // An override file with multiple backends is still invalid, even + // though it can override backends from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), + Subject: &file.Backends[1].DeclRange, + }) + } + } + + if len(file.CloudConfigs) != 0 { + switch len(file.CloudConfigs) { + case 1: + m.Backend = nil // A cloud block is mutually exclusive with a backend one, and overwrites any backend + m.CloudConfig = file.CloudConfigs[0] + default: + // An override file with multiple cloud blocks is still invalid, even + // though it can override cloud/backend blocks from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate Terraform Cloud configurations", + Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring Terraform Cloud. Terraform Cloud was previously configured at %s.", file.CloudConfigs[0].DeclRange), + Subject: &file.CloudConfigs[1].DeclRange, + }) + } + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + existing, exists := m.ProviderConfigs[key] + if pc.Alias == "" { + // We allow overriding a non-existing _default_ provider configuration + // because the user model is that an absent provider configuration + // implies an empty provider configuration, which is what the user + // is therefore overriding here. + if exists { + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } else { + m.ProviderConfigs[key] = pc + } + } else { + // For aliased providers, there must be a base configuration to + // override. This allows us to detect and report alias typos + // that might otherwise cause the override to not apply. + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base provider configuration for override", + Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), + Subject: &pc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } + } + + for _, v := range file.Variables { + existing, exists := m.Variables[v.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base variable declaration to override", + Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), + Subject: &v.DeclRange, + }) + continue + } + mergeDiags := existing.merge(v) + diags = append(diags, mergeDiags...) + } + + for _, l := range file.Locals { + existing, exists := m.Locals[l.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base local value definition to override", + Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), + Subject: &l.DeclRange, + }) + continue + } + mergeDiags := existing.merge(l) + diags = append(diags, mergeDiags...) + } + + for _, o := range file.Outputs { + existing, exists := m.Outputs[o.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base output definition to override", + Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), + Subject: &o.DeclRange, + }) + continue + } + mergeDiags := existing.merge(o) + diags = append(diags, mergeDiags...) + } + + for _, mc := range file.ModuleCalls { + existing, exists := m.ModuleCalls[mc.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing module call to override", + Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), + Subject: &mc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(mc) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + existing, exists := m.ManagedResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing resource to override", + Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + existing, exists := m.DataResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing data resource to override", + Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders) + diags = append(diags, mergeDiags...) + } + + for _, m := range file.Moved { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Cannot override 'moved' blocks", + Detail: "Records of moved objects can appear only in normal files, not in override files.", + Subject: m.DeclRange.Ptr(), + }) + } + + return diags +} + +// gatherProviderLocalNames is a helper function that populatesA a map of +// provider FQNs -> provider local names. This information is useful for +// user-facing output, which should include both the FQN and LocalName. It must +// only be populated after the module has been parsed. +func (m *Module) gatherProviderLocalNames() { + providers := make(map[addrs.Provider]string) + for k, v := range m.ProviderRequirements.RequiredProviders { + providers[v.Type] = k + } + m.ProviderLocalNames = providers +} + +// LocalNameForProvider returns the module-specific user-supplied local name for +// a given provider FQN, or the default local name if none was supplied. +func (m *Module) LocalNameForProvider(p addrs.Provider) string { + if existing, exists := m.ProviderLocalNames[p]; exists { + return existing + } else { + // If there isn't a map entry, fall back to the default: + // Type = LocalName + return p.Type + } +} + +// ProviderForLocalConfig returns the provider FQN for a given +// LocalProviderConfig, based on its local name. +func (m *Module) ProviderForLocalConfig(pc addrs.LocalProviderConfig) addrs.Provider { + return m.ImpliedProviderForUnqualifiedType(pc.LocalName) +} + +// ImpliedProviderForUnqualifiedType returns the provider FQN for a given type, +// first by looking up the type in the provider requirements map, and falling +// back to an implied default provider. +// +// The intended behaviour is that configuring a provider with local name "foo" +// in a required_providers block will result in resources with type "foo" using +// that provider. +func (m *Module) ImpliedProviderForUnqualifiedType(pType string) addrs.Provider { + if provider, exists := m.ProviderRequirements.RequiredProviders[pType]; exists { + return provider.Type + } + return addrs.ImpliedProviderForUnqualifiedType(pType) +} diff --git a/internal/configs/module_call.go b/configs/module_call.go similarity index 98% rename from internal/configs/module_call.go rename to configs/module_call.go index 3ec42ec01a17..d261de8639a3 100644 --- a/internal/configs/module_call.go +++ b/configs/module_call.go @@ -6,8 +6,8 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getmodules" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getmodules" ) // ModuleCall represents a "module" block in a module or file. diff --git a/internal/configs/module_call_test.go b/configs/module_call_test.go similarity index 99% rename from internal/configs/module_call_test.go rename to configs/module_call_test.go index af2269dca0bb..c7dfa1409e14 100644 --- a/internal/configs/module_call_test.go +++ b/configs/module_call_test.go @@ -6,7 +6,7 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestLoadModuleCall(t *testing.T) { diff --git a/internal/configs/module_merge.go b/configs/module_merge.go similarity index 99% rename from internal/configs/module_merge.go rename to configs/module_merge.go index 9494343f2e8a..8efc3fb80b7f 100644 --- a/internal/configs/module_merge.go +++ b/configs/module_merge.go @@ -3,7 +3,7 @@ package configs import ( "fmt" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" diff --git a/internal/configs/module_merge_body.go b/configs/module_merge_body.go similarity index 100% rename from internal/configs/module_merge_body.go rename to configs/module_merge_body.go diff --git a/internal/configs/module_merge_test.go b/configs/module_merge_test.go similarity index 99% rename from internal/configs/module_merge_test.go rename to configs/module_merge_test.go index 5d590661c728..4e77fb913108 100644 --- a/internal/configs/module_merge_test.go +++ b/configs/module_merge_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" "github.com/zclconf/go-cty/cty" ) diff --git a/configs/module_test.go b/configs/module_test.go new file mode 100644 index 000000000000..7a6b1e7d2388 --- /dev/null +++ b/configs/module_test.go @@ -0,0 +1,415 @@ +package configs + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" +) + +// TestNewModule_provider_fqns exercises module.gatherProviderLocalNames() +func TestNewModule_provider_local_name(t *testing.T) { + mod, diags := testModuleFromDir("testdata/providers-explicit-fqn") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + p := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + if name, exists := mod.ProviderLocalNames[p]; !exists { + t.Fatal("provider FQN foo/test not found") + } else { + if name != "foo-test" { + t.Fatalf("provider localname mismatch: got %s, want foo-test", name) + } + } + + // ensure the reverse lookup (fqn to local name) works as well + localName := mod.LocalNameForProvider(p) + if localName != "foo-test" { + t.Fatal("provider local name not found") + } + + // if there is not a local name for a provider, it should return the type name + localName = mod.LocalNameForProvider(addrs.NewDefaultProvider("nonexist")) + if localName != "nonexist" { + t.Error("wrong local name returned for a non-local provider") + } + + // can also look up the "terraform" provider and see that it sources is + // allowed to be overridden, even though there is a builtin provider + // called "terraform". + p = addrs.NewProvider(addrs.DefaultProviderRegistryHost, "not-builtin", "not-terraform") + if name, exists := mod.ProviderLocalNames[p]; !exists { + t.Fatal("provider FQN not-builtin/not-terraform not found") + } else { + if name != "terraform" { + t.Fatalf("provider localname mismatch: got %s, want terraform", name) + } + } +} + +// This test validates the provider FQNs set in each Resource +func TestNewModule_resource_providers(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/valid-modules/nested-providers-fqns") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + // both the root and child module have two resources, one which should use + // the default implied provider and one explicitly using a provider set in + // required_providers + wantImplicit := addrs.NewDefaultProvider("test") + wantFoo := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + wantBar := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test") + + // root module + if !cfg.Module.ManagedResources["test_instance.explicit"].Provider.Equals(wantFoo) { + t.Fatalf("wrong provider for \"test_instance.explicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.explicit"].Provider, + wantFoo, + ) + } + if !cfg.Module.ManagedResources["test_instance.implicit"].Provider.Equals(wantImplicit) { + t.Fatalf("wrong provider for \"test_instance.implicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.implicit"].Provider, + wantImplicit, + ) + } + + // a data source + if !cfg.Module.DataResources["data.test_resource.explicit"].Provider.Equals(wantFoo) { + t.Fatalf("wrong provider for \"module.child.test_instance.explicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.explicit"].Provider, + wantBar, + ) + } + + // child module + cm := cfg.Children["child"].Module + if !cm.ManagedResources["test_instance.explicit"].Provider.Equals(wantBar) { + t.Fatalf("wrong provider for \"module.child.test_instance.explicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.explicit"].Provider, + wantBar, + ) + } + if !cm.ManagedResources["test_instance.implicit"].Provider.Equals(wantImplicit) { + t.Fatalf("wrong provider for \"module.child.test_instance.implicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.implicit"].Provider, + wantImplicit, + ) + } +} + +func TestProviderForLocalConfig(t *testing.T) { + mod, diags := testModuleFromDir("testdata/providers-explicit-fqn") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + lc := addrs.LocalProviderConfig{LocalName: "foo-test"} + got := mod.ProviderForLocalConfig(lc) + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + if !got.Equals(want) { + t.Fatalf("wrong result! got %#v, want %#v\n", got, want) + } +} + +// At most one required_providers block per module is permitted. +func TestModule_required_providers_multiple(t *testing.T) { + _, diags := testModuleFromDir("testdata/invalid-modules/multiple-required-providers") + if !diags.HasErrors() { + t.Fatal("module should have error diags, but does not") + } + + want := `Duplicate required providers configuration` + if got := diags.Error(); !strings.Contains(got, want) { + t.Fatalf("expected error to contain %q\nerror was:\n%s", want, got) + } +} + +// A module may have required_providers configured in files loaded later than +// resources. These provider settings should still be reflected in the +// resources' configuration. +func TestModule_required_providers_after_resource(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/required-providers-after-resource") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + + req, exists := mod.ProviderRequirements.RequiredProviders["test"] + if !exists { + t.Fatal("no provider requirements found for \"test\"") + } + if req.Type != want { + t.Errorf("wrong provider addr for \"test\"\ngot: %s\nwant: %s", + req.Type, want, + ) + } + + if got := mod.ManagedResources["test_instance.my-instance"].Provider; !got.Equals(want) { + t.Errorf("wrong provider addr for \"test_instance.my-instance\"\ngot: %s\nwant: %s", + got, want, + ) + } +} + +// We support overrides for required_providers blocks, which should replace the +// entire block for each provider localname, leaving other blocks unaffected. +// This should also be reflected in any resources in the module using this +// provider. +func TestModule_required_provider_overrides(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/required-providers-overrides") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + // The foo provider and resource should be unaffected + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "foo") + req, exists := mod.ProviderRequirements.RequiredProviders["foo"] + if !exists { + t.Fatal("no provider requirements found for \"foo\"") + } + if req.Type != want { + t.Errorf("wrong provider addr for \"foo\"\ngot: %s\nwant: %s", + req.Type, want, + ) + } + if got := mod.ManagedResources["foo_thing.ft"].Provider; !got.Equals(want) { + t.Errorf("wrong provider addr for \"foo_thing.ft\"\ngot: %s\nwant: %s", + got, want, + ) + } + + // The bar provider and resource should be using the override config + want = addrs.NewProvider(addrs.DefaultProviderRegistryHost, "blorp", "bar") + req, exists = mod.ProviderRequirements.RequiredProviders["bar"] + if !exists { + t.Fatal("no provider requirements found for \"bar\"") + } + if req.Type != want { + t.Errorf("wrong provider addr for \"bar\"\ngot: %s\nwant: %s", + req.Type, want, + ) + } + if gotVer, wantVer := req.Requirement.Required.String(), "~>2.0.0"; gotVer != wantVer { + t.Errorf("wrong provider version constraint for \"bar\"\ngot: %s\nwant: %s", + gotVer, wantVer, + ) + } + if got := mod.ManagedResources["bar_thing.bt"].Provider; !got.Equals(want) { + t.Errorf("wrong provider addr for \"bar_thing.bt\"\ngot: %s\nwant: %s", + got, want, + ) + } +} + +// Resources without explicit provider configuration are assigned a provider +// implied based on the resource type. For example, this resource: +// +// resource "foo_instance" "test" {} +// +// ...is assigned to whichever provider has local name "foo" in the current +// module. +// +// To find the correct provider, we first look in the module's provider +// requirements map for a local name matching the resource type, and fall back +// to a default provider if none is found. This applies to both managed and +// data resources. +func TestModule_implied_provider(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/implied-providers") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + // The three providers used in the config resources + foo := addrs.NewProvider("registry.acme.corp", "acme", "foo") + whatever := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "something") + bar := addrs.NewDefaultProvider("bar") + + // Verify that the registry.acme.corp/acme/foo provider is defined in the + // module provider requirements with local name "foo" + req, exists := mod.ProviderRequirements.RequiredProviders["foo"] + if !exists { + t.Fatal("no provider requirements found for \"foo\"") + } + if req.Type != foo { + t.Errorf("wrong provider addr for \"foo\"\ngot: %s\nwant: %s", + req.Type, foo, + ) + } + + // Verify that the acme/something provider is defined in the + // module provider requirements with local name "whatever" + req, exists = mod.ProviderRequirements.RequiredProviders["whatever"] + if !exists { + t.Fatal("no provider requirements found for \"foo\"") + } + if req.Type != whatever { + t.Errorf("wrong provider addr for \"whatever\"\ngot: %s\nwant: %s", + req.Type, whatever, + ) + } + + // Check that resources are assigned the correct providers: foo_* resources + // should have the custom foo provider, bar_* resources the default bar + // provider. + tests := []struct { + Address string + Provider addrs.Provider + }{ + {"foo_resource.a", foo}, + {"data.foo_resource.b", foo}, + {"bar_resource.c", bar}, + {"data.bar_resource.d", bar}, + {"whatever_resource.e", whatever}, + {"data.whatever_resource.f", whatever}, + } + for _, test := range tests { + resources := mod.ManagedResources + if strings.HasPrefix(test.Address, "data.") { + resources = mod.DataResources + } + resource, exists := resources[test.Address] + if !exists { + t.Errorf("could not find resource %q in %#v", test.Address, resources) + continue + } + if got := resource.Provider; !got.Equals(test.Provider) { + t.Errorf("wrong provider addr for %q\ngot: %s\nwant: %s", + test.Address, got, test.Provider, + ) + } + } +} + +func TestImpliedProviderForUnqualifiedType(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/implied-providers") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + foo := addrs.NewProvider("registry.acme.corp", "acme", "foo") + whatever := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "something") + bar := addrs.NewDefaultProvider("bar") + tf := addrs.NewBuiltInProvider("terraform") + + tests := []struct { + Type string + Provider addrs.Provider + }{ + {"foo", foo}, + {"whatever", whatever}, + {"bar", bar}, + {"terraform", tf}, + } + for _, test := range tests { + got := mod.ImpliedProviderForUnqualifiedType(test.Type) + if !got.Equals(test.Provider) { + t.Errorf("wrong result for %q: got %#v, want %#v\n", test.Type, got, test.Provider) + } + } +} + +func TestModule_backend_override(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + gotType := mod.Backend.Type + wantType := "bar" + + if gotType != wantType { + t.Errorf("wrong result for backend type: got %#v, want %#v\n", gotType, wantType) + } + + attrs, _ := mod.Backend.Config.JustAttributes() + + gotAttr, diags := attrs["path"].Expr.Value(nil) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + wantAttr := cty.StringVal("CHANGED/relative/path/to/terraform.tfstate") + + if !gotAttr.RawEquals(wantAttr) { + t.Errorf("wrong result for backend 'path': got %#v, want %#v\n", gotAttr, wantAttr) + } +} + +// Unlike most other overrides, backend blocks do not require a base configuration in a primary +// configuration file, as an omitted backend there implies the local backend. +func TestModule_backend_override_no_base(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend-no-base") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.Backend == nil { + t.Errorf("expected module Backend not to be nil") + } +} + +func TestModule_cloud_override_backend(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend-with-cloud") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.Backend != nil { + t.Errorf("expected module Backend to be nil") + } + + if mod.CloudConfig == nil { + t.Errorf("expected module CloudConfig not to be nil") + } +} + +// Unlike most other overrides, cloud blocks do not require a base configuration in a primary +// configuration file, as an omitted backend there implies the local backend and cloud blocks +// override backends. +func TestModule_cloud_override_no_base(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-cloud-no-base") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.CloudConfig == nil { + t.Errorf("expected module CloudConfig not to be nil") + } +} + +func TestModule_cloud_override(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-cloud") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + attrs, _ := mod.CloudConfig.Config.JustAttributes() + + gotAttr, diags := attrs["organization"].Expr.Value(nil) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + wantAttr := cty.StringVal("CHANGED") + + if !gotAttr.RawEquals(wantAttr) { + t.Errorf("wrong result for Cloud 'organization': got %#v, want %#v\n", gotAttr, wantAttr) + } + + // The override should have completely replaced the cloud block in the primary file, no merging + if attrs["should_not_be_present_with_override"] != nil { + t.Errorf("expected 'should_not_be_present_with_override' attribute to be nil") + } +} + +func TestModule_cloud_duplicate_overrides(t *testing.T) { + _, diags := testModuleFromDir("testdata/invalid-modules/override-cloud-duplicates") + want := `Duplicate Terraform Cloud configurations` + if got := diags.Error(); !strings.Contains(got, want) { + t.Fatalf("expected module error to contain %q\nerror was:\n%s", want, got) + } +} diff --git a/internal/configs/moved.go b/configs/moved.go similarity index 97% rename from internal/configs/moved.go rename to configs/moved.go index 5cfbd5dfb01e..f892cbc07b6e 100644 --- a/internal/configs/moved.go +++ b/configs/moved.go @@ -2,7 +2,7 @@ package configs import ( "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) type Moved struct { diff --git a/internal/configs/moved_test.go b/configs/moved_test.go similarity index 98% rename from internal/configs/moved_test.go rename to configs/moved_test.go index 433525d28c17..ec5fe203f8f0 100644 --- a/internal/configs/moved_test.go +++ b/configs/moved_test.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestMovedBlock_decode(t *testing.T) { diff --git a/internal/configs/named_values.go b/configs/named_values.go similarity index 99% rename from internal/configs/named_values.go rename to configs/named_values.go index 3ce759c8713e..abfe0779ff24 100644 --- a/internal/configs/named_values.go +++ b/configs/named_values.go @@ -10,7 +10,7 @@ import ( "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // A consistent detail message for all "not a valid identifier" diagnostics. diff --git a/internal/configs/parser.go b/configs/parser.go similarity index 100% rename from internal/configs/parser.go rename to configs/parser.go diff --git a/internal/configs/parser_config.go b/configs/parser_config.go similarity index 100% rename from internal/configs/parser_config.go rename to configs/parser_config.go diff --git a/internal/configs/parser_config_dir.go b/configs/parser_config_dir.go similarity index 100% rename from internal/configs/parser_config_dir.go rename to configs/parser_config_dir.go diff --git a/internal/configs/parser_config_dir_test.go b/configs/parser_config_dir_test.go similarity index 100% rename from internal/configs/parser_config_dir_test.go rename to configs/parser_config_dir_test.go diff --git a/internal/configs/parser_config_test.go b/configs/parser_config_test.go similarity index 100% rename from internal/configs/parser_config_test.go rename to configs/parser_config_test.go diff --git a/internal/configs/parser_test.go b/configs/parser_test.go similarity index 100% rename from internal/configs/parser_test.go rename to configs/parser_test.go diff --git a/internal/configs/parser_values.go b/configs/parser_values.go similarity index 100% rename from internal/configs/parser_values.go rename to configs/parser_values.go diff --git a/internal/configs/parser_values_test.go b/configs/parser_values_test.go similarity index 100% rename from internal/configs/parser_values_test.go rename to configs/parser_values_test.go diff --git a/configs/provider.go b/configs/provider.go new file mode 100644 index 000000000000..beb56f424e4a --- /dev/null +++ b/configs/provider.go @@ -0,0 +1,282 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" +) + +// Provider represents a "provider" block in a module or file. A provider +// block is a provider configuration, and there can be zero or more +// configurations for each actual provider. +type Provider struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if no alias set + + Version VersionConstraint + + Config hcl.Body + + DeclRange hcl.Range + + // TODO: this may not be set in some cases, so it is not yet suitable for + // use outside of this package. We currently only use it for internal + // validation, but once we verify that this can be set in all cases, we can + // export this so providers don't need to be re-resolved. + // This same field is also added to the ProviderConfigRef struct. + providerType addrs.Provider +} + +func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { + var diags hcl.Diagnostics + + content, config, moreDiags := block.Body.PartialContent(providerBlockSchema) + diags = append(diags, moreDiags...) + + // Provider names must be localized. Produce an error with a message + // indicating the action the user can take to fix this message if the local + // name is not localized. + name := block.Labels[0] + nameDiags := checkProviderNameNormalized(name, block.DefRange) + diags = append(diags, nameDiags...) + if nameDiags.HasErrors() { + // If the name is invalid then we mustn't produce a result because + // downstreams could try to use it as a provider type and then crash. + return nil, diags + } + + provider := &Provider{ + Name: name, + NameRange: block.LabelRanges[0], + Config: config, + DeclRange: block.DefRange, + } + + if attr, exists := content.Attributes["alias"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) + diags = append(diags, valDiags...) + provider.AliasRange = attr.Expr.Range().Ptr() + + if !hclsyntax.ValidIdentifier(provider.Alias) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration alias", + Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), + }) + } + } + + if attr, exists := content.Attributes["version"]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Version constraints inside provider configuration blocks are deprecated", + Detail: "Terraform 0.13 and earlier allowed provider version constraints inside the provider configuration block, but that is now deprecated and will be removed in a future version of Terraform. To silence this warning, move the provider version constraint into the required_providers block.", + Subject: attr.Expr.Range().Ptr(), + }) + var versionDiags hcl.Diagnostics + provider.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + // Reserved attribute names + for _, name := range []string{"count", "depends_on", "for_each", "source"} { + if attr, exists := content.Attributes[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in provider block", + Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), + Subject: &attr.NameRange, + }) + } + } + + var seenEscapeBlock *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "_": + if seenEscapeBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate escaping block", + Detail: fmt.Sprintf( + "The special block type \"_\" can be used to force particular arguments to be interpreted as provider-specific rather than as meta-arguments, but each provider block can have only one such block. The first escaping block was at %s.", + seenEscapeBlock.DefRange, + ), + Subject: &block.DefRange, + }) + continue + } + seenEscapeBlock = block + + // When there's an escaping block its content merges with the + // existing config we extracted earlier, so later decoding + // will see a blend of both. + provider.Config = hcl.MergeBodies([]hcl.Body{provider.Config, block.Body}) + + default: + // All of the other block types in our schema are reserved for + // future expansion. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provider block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return provider, diags +} + +// Addr returns the address of the receiving provider configuration, relative +// to its containing module. +func (p *Provider) Addr() addrs.LocalProviderConfig { + return addrs.LocalProviderConfig{ + LocalName: p.Name, + Alias: p.Alias, + } +} + +func (p *Provider) moduleUniqueKey() string { + if p.Alias != "" { + return fmt.Sprintf("%s.%s", p.Name, p.Alias) + } + return p.Name +} + +// ParseProviderConfigCompact parses the given absolute traversal as a relative +// provider address in compact form. The following are examples of traversals +// that can be successfully parsed as compact relative provider configuration +// addresses: +// +// - aws +// - aws.foo +// +// This function will panic if given a relative traversal. +// +// If the returned diagnostics contains errors then the result value is invalid +// and must not be used. +func ParseProviderConfigCompact(traversal hcl.Traversal) (addrs.LocalProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := addrs.LocalProviderConfig{ + LocalName: traversal.RootName(), + } + + if len(traversal) < 2 { + // Just a type name, then. + return ret, diags + } + + aliasStep := traversal[1] + switch ts := aliasStep.(type) { + case hcl.TraverseAttr: + ret.Alias = ts.Name + return ret, diags + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", + Subject: aliasStep.SourceRange().Ptr(), + }) + } + + if len(traversal) > 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous extra operators after provider configuration address.", + Subject: traversal[2:].SourceRange().Ptr(), + }) + } + + return ret, diags +} + +// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseProviderConfigCompactStr(str string) (addrs.LocalProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return addrs.LocalProviderConfig{}, diags + } + + addr, addrDiags := ParseProviderConfigCompact(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +var providerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "alias", + }, + { + Name: "version", + }, + + // Attribute names reserved for future expansion. + {Name: "count"}, + {Name: "depends_on"}, + {Name: "for_each"}, + {Name: "source"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "_"}, // meta-argument escaping block + + // The rest of these are reserved for future expansion. + {Type: "lifecycle"}, + {Type: "locals"}, + }, +} + +// checkProviderNameNormalized verifies that the given string is already +// normalized and returns an error if not. +func checkProviderNameNormalized(name string, declrange hcl.Range) hcl.Diagnostics { + var diags hcl.Diagnostics + // verify that the provider local name is normalized + normalized, err := addrs.IsProviderPartNormalized(name) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider local name", + Detail: fmt.Sprintf("%s is an invalid provider local name: %s", name, err), + Subject: &declrange, + }) + return diags + } + if !normalized { + // we would have returned this error already + normalizedProvider, _ := addrs.ParseProviderPart(name) + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider local name", + Detail: fmt.Sprintf("Provider names must be normalized. Replace %q with %q to fix this error.", name, normalizedProvider), + Subject: &declrange, + }) + } + return diags +} diff --git a/internal/configs/provider_meta.go b/configs/provider_meta.go similarity index 100% rename from internal/configs/provider_meta.go rename to configs/provider_meta.go diff --git a/internal/configs/provider_requirements.go b/configs/provider_requirements.go similarity index 99% rename from internal/configs/provider_requirements.go rename to configs/provider_requirements.go index c982c1a37cd8..736e3636f66e 100644 --- a/internal/configs/provider_requirements.go +++ b/configs/provider_requirements.go @@ -5,7 +5,7 @@ import ( version "github.com/hashicorp/go-version" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/configs/provider_requirements_test.go b/configs/provider_requirements_test.go similarity index 99% rename from internal/configs/provider_requirements_test.go rename to configs/provider_requirements_test.go index 8d00f6190294..d580f964e8f3 100644 --- a/internal/configs/provider_requirements_test.go +++ b/configs/provider_requirements_test.go @@ -8,7 +8,7 @@ import ( version "github.com/hashicorp/go-version" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" "github.com/zclconf/go-cty/cty" ) diff --git a/configs/provider_test.go b/configs/provider_test.go new file mode 100644 index 000000000000..db8089f910d7 --- /dev/null +++ b/configs/provider_test.go @@ -0,0 +1,150 @@ +package configs + +import ( + "io/ioutil" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/addrs" +) + +func TestProviderReservedNames(t *testing.T) { + src, err := ioutil.ReadFile("testdata/invalid-files/provider-reserved.tf") + if err != nil { + t.Fatal(err) + } + parser := testParser(map[string]string{ + "config.tf": string(src), + }) + _, diags := parser.LoadConfigFile("config.tf") + + assertExactDiagnostics(t, diags, []string{ + //TODO: This deprecation warning will be removed in terraform v0.15. + `config.tf:4,13-20: Version constraints inside provider configuration blocks are deprecated; Terraform 0.13 and earlier allowed provider version constraints inside the provider configuration block, but that is now deprecated and will be removed in a future version of Terraform. To silence this warning, move the provider version constraint into the required_providers block.`, + `config.tf:10,3-8: Reserved argument name in provider block; The provider argument name "count" is reserved for use by Terraform in a future version.`, + `config.tf:11,3-13: Reserved argument name in provider block; The provider argument name "depends_on" is reserved for use by Terraform in a future version.`, + `config.tf:12,3-11: Reserved argument name in provider block; The provider argument name "for_each" is reserved for use by Terraform in a future version.`, + `config.tf:14,3-12: Reserved block type name in provider block; The block type name "lifecycle" is reserved for use by Terraform in a future version.`, + `config.tf:15,3-9: Reserved block type name in provider block; The block type name "locals" is reserved for use by Terraform in a future version.`, + `config.tf:13,3-9: Reserved argument name in provider block; The provider argument name "source" is reserved for use by Terraform in a future version.`, + }) +} + +func TestParseProviderConfigCompact(t *testing.T) { + tests := []struct { + Input string + Want addrs.LocalProviderConfig + WantDiag string + }{ + { + `aws`, + addrs.LocalProviderConfig{ + LocalName: "aws", + }, + ``, + }, + { + `aws.foo`, + addrs.LocalProviderConfig{ + LocalName: "aws", + Alias: "foo", + }, + ``, + }, + { + `aws["foo"]`, + addrs.LocalProviderConfig{}, + `The provider type name must either stand alone or be followed by an alias name separated with a dot.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.Pos{}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Logf("- %s", diag) + } + return + } + + got, diags := ParseProviderConfigCompact(traversal) + + if test.WantDiag != "" { + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want 1", len(diags)) + } + gotDetail := diags[0].Description().Detail + if gotDetail != test.WantDiag { + t.Fatalf("wrong diagnostic detail\ngot: %s\nwant: %s", gotDetail, test.WantDiag) + } + return + } else { + if len(diags) != 0 { + t.Fatalf("got %d diagnostics; want 0", len(diags)) + } + } + + for _, problem := range deep.Equal(got, test.Want) { + t.Error(problem) + } + }) + } +} + +func TestParseProviderConfigCompactStr(t *testing.T) { + tests := []struct { + Input string + Want addrs.LocalProviderConfig + WantDiag string + }{ + { + `aws`, + addrs.LocalProviderConfig{ + LocalName: "aws", + }, + ``, + }, + { + `aws.foo`, + addrs.LocalProviderConfig{ + LocalName: "aws", + Alias: "foo", + }, + ``, + }, + { + `aws["foo"]`, + addrs.LocalProviderConfig{}, + `The provider type name must either stand alone or be followed by an alias name separated with a dot.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + got, diags := ParseProviderConfigCompactStr(test.Input) + + if test.WantDiag != "" { + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want 1", len(diags)) + } + gotDetail := diags[0].Description().Detail + if gotDetail != test.WantDiag { + t.Fatalf("wrong diagnostic detail\ngot: %s\nwant: %s", gotDetail, test.WantDiag) + } + return + } else { + if len(diags) != 0 { + t.Fatalf("got %d diagnostics; want 0", len(diags)) + } + } + + for _, problem := range deep.Equal(got, test.Want) { + t.Error(problem) + } + }) + } +} diff --git a/internal/configs/provider_validation.go b/configs/provider_validation.go similarity index 99% rename from internal/configs/provider_validation.go rename to configs/provider_validation.go index 0cf7378d3639..1955b6dde3bf 100644 --- a/internal/configs/provider_validation.go +++ b/configs/provider_validation.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // validateProviderConfigs walks the full configuration tree from the root diff --git a/internal/configs/provisioner.go b/configs/provisioner.go similarity index 100% rename from internal/configs/provisioner.go rename to configs/provisioner.go diff --git a/internal/configs/provisioneronfailure_string.go b/configs/provisioneronfailure_string.go similarity index 100% rename from internal/configs/provisioneronfailure_string.go rename to configs/provisioneronfailure_string.go diff --git a/internal/configs/provisionerwhen_string.go b/configs/provisionerwhen_string.go similarity index 100% rename from internal/configs/provisionerwhen_string.go rename to configs/provisionerwhen_string.go diff --git a/configs/resource.go b/configs/resource.go new file mode 100644 index 000000000000..561877075972 --- /dev/null +++ b/configs/resource.go @@ -0,0 +1,777 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" +) + +// Resource represents a "resource" or "data" block in a module or file. +type Resource struct { + Mode addrs.ResourceMode + Name string + Type string + Config hcl.Body + Count hcl.Expression + ForEach hcl.Expression + + ProviderConfigRef *ProviderConfigRef + Provider addrs.Provider + + Preconditions []*CheckRule + Postconditions []*CheckRule + + DependsOn []hcl.Traversal + + TriggersReplacement []hcl.Expression + + // Managed is populated only for Mode = addrs.ManagedResourceMode, + // containing the additional fields that apply to managed resources. + // For all other resource modes, this field is nil. + Managed *ManagedResource + + DeclRange hcl.Range + TypeRange hcl.Range +} + +// ManagedResource represents a "resource" block in a module or file. +type ManagedResource struct { + Connection *Connection + Provisioners []*Provisioner + + CreateBeforeDestroy bool + PreventDestroy bool + IgnoreChanges []hcl.Traversal + IgnoreAllChanges bool + + CreateBeforeDestroySet bool + PreventDestroySet bool +} + +func (r *Resource) moduleUniqueKey() string { + return r.Addr().String() +} + +// Addr returns a resource address for the receiver that is relative to the +// resource's containing module. +func (r *Resource) Addr() addrs.Resource { + return addrs.Resource{ + Mode: r.Mode, + Type: r.Type, + Name: r.Name, + } +} + +// ProviderConfigAddr returns the address for the provider configuration that +// should be used for this resource. This function returns a default provider +// config addr if an explicit "provider" argument was not provided. +func (r *Resource) ProviderConfigAddr() addrs.LocalProviderConfig { + if r.ProviderConfigRef == nil { + // If no specific "provider" argument is given, we want to look up the + // provider config where the local name matches the implied provider + // from the resource type. This may be different from the resource's + // provider type. + return addrs.LocalProviderConfig{ + LocalName: r.Addr().ImpliedProvider(), + } + } + + return addrs.LocalProviderConfig{ + LocalName: r.ProviderConfigRef.Name, + Alias: r.ProviderConfigRef.Alias, + } +} + +// HasCustomConditions returns true if and only if the resource has at least +// one author-specified custom condition. +func (r *Resource) HasCustomConditions() bool { + return len(r.Postconditions) != 0 || len(r.Preconditions) != 0 +} + +func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { + var diags hcl.Diagnostics + r := &Resource{ + Mode: addrs.ManagedResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + Managed: &ManagedResource{}, + } + + content, remain, moreDiags := block.Body.PartialContent(resourceBlockSchema) + diags = append(diags, moreDiags...) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // Cannot have count and for_each on the same resource block + if r.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + var seenLifecycle *hcl.Block + var seenConnection *hcl.Block + var seenEscapeBlock *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "lifecycle": + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) + diags = append(diags, valDiags...) + r.Managed.CreateBeforeDestroySet = true + } + + if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) + diags = append(diags, valDiags...) + r.Managed.PreventDestroySet = true + } + + if attr, exists := lcContent.Attributes["replace_triggered_by"]; exists { + exprs, hclDiags := decodeReplaceTriggeredBy(attr.Expr) + diags = diags.Extend(hclDiags) + + r.TriggersReplacement = append(r.TriggersReplacement, exprs...) + } + + if attr, exists := lcContent.Attributes["ignore_changes"]; exists { + + // ignore_changes can either be a list of relative traversals + // or it can be just the keyword "all" to ignore changes to this + // resource entirely. + // ignore_changes = [ami, instance_type] + // ignore_changes = all + // We also allow two legacy forms for compatibility with earlier + // versions: + // ignore_changes = ["ami", "instance_type"] + // ignore_changes = ["*"] + + kw := hcl.ExprAsKeyword(attr.Expr) + + switch { + case kw == "all": + r.Managed.IgnoreAllChanges = true + default: + exprs, listDiags := hcl.ExprList(attr.Expr) + diags = append(diags, listDiags...) + + var ignoreAllRange hcl.Range + + for _, expr := range exprs { + + // our expr might be the literal string "*", which + // we accept as a deprecated way of saying "all". + if shimIsIgnoreChangesStar(expr) { + r.Managed.IgnoreAllChanges = true + ignoreAllRange = expr.Range() + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid ignore_changes wildcard", + Detail: "The [\"*\"] form of ignore_changes wildcard is was deprecated and is now invalid. Use \"ignore_changes = all\" to ignore changes to all attributes.", + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.RelTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) + } + } + + if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid ignore_changes ruleset", + Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", + Subject: &ignoreAllRange, + Context: attr.Expr.Range().Ptr(), + }) + } + + } + } + + for _, block := range lcContent.Blocks { + switch block.Type { + case "precondition", "postcondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + + moreDiags = cr.validateSelfReferences(block.Type, r.Addr()) + diags = append(diags, moreDiags...) + + switch block.Type { + case "precondition": + r.Preconditions = append(r.Preconditions, cr) + case "postcondition": + r.Postconditions = append(r.Postconditions, cr) + } + default: + // The cases above should be exhaustive for all block types + // defined in the lifecycle schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + r.Managed.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + case "provisioner": + pv, pvDiags := decodeProvisionerBlock(block) + diags = append(diags, pvDiags...) + if pv != nil { + r.Managed.Provisioners = append(r.Managed.Provisioners, pv) + } + + case "_": + if seenEscapeBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate escaping block", + Detail: fmt.Sprintf( + "The special block type \"_\" can be used to force particular arguments to be interpreted as resource-type-specific rather than as meta-arguments, but each resource block can have only one such block. The first escaping block was at %s.", + seenEscapeBlock.DefRange, + ), + Subject: &block.DefRange, + }) + continue + } + seenEscapeBlock = block + + // When there's an escaping block its content merges with the + // existing config we extracted earlier, so later decoding + // will see a blend of both. + r.Config = hcl.MergeBodies([]hcl.Body{r.Config, block.Body}) + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in resource block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + // Now we can validate the connection block references if there are any destroy provisioners. + // TODO: should we eliminate standalone connection blocks? + if r.Managed.Connection != nil { + for _, p := range r.Managed.Provisioners { + if p.When == ProvisionerWhenDestroy { + diags = append(diags, onlySelfRefs(r.Managed.Connection.Config)...) + break + } + } + } + + return r, diags +} + +func decodeDataBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { + var diags hcl.Diagnostics + r := &Resource{ + Mode: addrs.DataResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + } + + content, remain, moreDiags := block.Body.PartialContent(dataBlockSchema) + diags = append(diags, moreDiags...) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // Cannot have count and for_each on the same data block + if r.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + var seenEscapeBlock *hcl.Block + var seenLifecycle *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + + case "_": + if seenEscapeBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate escaping block", + Detail: fmt.Sprintf( + "The special block type \"_\" can be used to force particular arguments to be interpreted as resource-type-specific rather than as meta-arguments, but each data block can have only one such block. The first escaping block was at %s.", + seenEscapeBlock.DefRange, + ), + Subject: &block.DefRange, + }) + continue + } + seenEscapeBlock = block + + // When there's an escaping block its content merges with the + // existing config we extracted earlier, so later decoding + // will see a blend of both. + r.Config = hcl.MergeBodies([]hcl.Body{r.Config, block.Body}) + + case "lifecycle": + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: block.DefRange.Ptr(), + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + // All of the attributes defined for resource lifecycle are for + // managed resources only, so we can emit a common error message + // for any given attributes that HCL accepted. + for name, attr := range lcContent.Attributes { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource lifecycle argument", + Detail: fmt.Sprintf("The lifecycle argument %q is defined only for managed resources (\"resource\" blocks), and is not valid for data resources.", name), + Subject: attr.NameRange.Ptr(), + }) + } + + for _, block := range lcContent.Blocks { + switch block.Type { + case "precondition", "postcondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + + moreDiags = cr.validateSelfReferences(block.Type, r.Addr()) + diags = append(diags, moreDiags...) + + switch block.Type { + case "precondition": + r.Preconditions = append(r.Preconditions, cr) + case "postcondition": + r.Postconditions = append(r.Postconditions, cr) + } + default: + // The cases above should be exhaustive for all block types + // defined in the lifecycle schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } + + default: + // Any other block types are ones we're reserving for future use, + // but don't have any defined meaning today. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in data block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: block.TypeRange.Ptr(), + }) + } + } + + return r, diags +} + +// decodeReplaceTriggeredBy decodes and does basic validation of the +// replace_triggered_by expressions, ensuring they only contains references to +// a single resource, and the only extra variables are count.index or each.key. +func decodeReplaceTriggeredBy(expr hcl.Expression) ([]hcl.Expression, hcl.Diagnostics) { + // Since we are manually parsing the replace_triggered_by argument, we + // need to specially handle json configs, in which case the values will + // be json strings rather than hcl. To simplify parsing however we will + // decode the individual list elements, rather than the entire expression. + isJSON := hcljson.IsJSONExpression(expr) + + exprs, diags := hcl.ExprList(expr) + + for i, expr := range exprs { + if isJSON { + // We can abuse the hcl json api and rely on the fact that calling + // Value on a json expression with no EvalContext will return the + // raw string. We can then parse that as normal hcl syntax, and + // continue with the decoding. + v, ds := expr.Value(nil) + diags = diags.Extend(ds) + if diags.HasErrors() { + continue + } + + expr, ds = hclsyntax.ParseExpression([]byte(v.AsString()), "", expr.Range().Start) + diags = diags.Extend(ds) + if diags.HasErrors() { + continue + } + // make sure to swap out the expression we're returning too + exprs[i] = expr + } + + refs, refDiags := lang.ReferencesInExpr(expr) + for _, diag := range refDiags { + severity := hcl.DiagError + if diag.Severity() == tfdiags.Warning { + severity = hcl.DiagWarning + } + + desc := diag.Description() + + diags = append(diags, &hcl.Diagnostic{ + Severity: severity, + Summary: desc.Summary, + Detail: desc.Detail, + Subject: expr.Range().Ptr(), + }) + } + + if refDiags.HasErrors() { + continue + } + + resourceCount := 0 + for _, ref := range refs { + switch sub := ref.Subject.(type) { + case addrs.Resource, addrs.ResourceInstance: + resourceCount++ + + case addrs.ForEachAttr: + if sub.Name != "key" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid each reference in replace_triggered_by expression", + Detail: "Only each.key may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + case addrs.CountAttr: + if sub.Name != "index" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count reference in replace_triggered_by expression", + Detail: "Only count.index may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + default: + // everything else should be simple traversals + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference in replace_triggered_by expression", + Detail: "Only resources, count.index, and each.key may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + } + + switch { + case resourceCount == 0: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid replace_triggered_by expression", + Detail: "Missing resource reference in replace_triggered_by expression.", + Subject: expr.Range().Ptr(), + }) + case resourceCount > 1: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid replace_triggered_by expression", + Detail: "Multiple resource references in replace_triggered_by expression.", + Subject: expr.Range().Ptr(), + }) + } + } + return exprs, diags +} + +type ProviderConfigRef struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if alias not set + + // TODO: this may not be set in some cases, so it is not yet suitable for + // use outside of this package. We currently only use it for internal + // validation, but once we verify that this can be set in all cases, we can + // export this so providers don't need to be re-resolved. + // This same field is also added to the Provider struct. + providerType addrs.Provider +} + +func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var shimDiags hcl.Diagnostics + expr, shimDiags = shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + + // AbsTraversalForExpr produces only generic errors, so we'll discard + // the errors given and produce our own with extra context. If we didn't + // get any errors then we might still have warnings, though. + if !travDiags.HasErrors() { + diags = append(diags, travDiags...) + } + + if len(traversal) < 1 || len(traversal) > 2 { + // A provider reference was given as a string literal in the legacy + // configuration language and there are lots of examples out there + // showing that usage, so we'll sniff for that situation here and + // produce a specialized error message for it to help users find + // the new correct form. + if exprIsNativeQuotedString(expr) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "A provider configuration reference must not be given in quotes.", + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + // verify that the provider local name is normalized + name := traversal.RootName() + nameDiags := checkProviderNameNormalized(name, traversal[0].SourceRange()) + diags = append(diags, nameDiags...) + if diags.HasErrors() { + return nil, diags + } + + ret := &ProviderConfigRef{ + Name: name, + NameRange: traversal[0].SourceRange(), + } + + if len(traversal) > 1 { + aliasStep, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", + Subject: traversal[1].SourceRange().Ptr(), + }) + return ret, diags + } + + ret.Alias = aliasStep.Name + ret.AliasRange = aliasStep.SourceRange().Ptr() + } + + return ret, diags +} + +// Addr returns the provider config address corresponding to the receiving +// config reference. +// +// This is a trivial conversion, essentially just discarding the source +// location information and keeping just the addressing information. +func (r *ProviderConfigRef) Addr() addrs.LocalProviderConfig { + return addrs.LocalProviderConfig{ + LocalName: r.Name, + Alias: r.Alias, + } +} + +func (r *ProviderConfigRef) String() string { + if r == nil { + return "" + } + if r.Alias != "" { + return fmt.Sprintf("%s.%s", r.Name, r.Alias) + } + return r.Name +} + +var commonResourceAttributes = []hcl.AttributeSchema{ + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "provider", + }, + { + Name: "depends_on", + }, +} + +var resourceBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "locals"}, // reserved for future use + {Type: "lifecycle"}, + {Type: "connection"}, + {Type: "provisioner", LabelNames: []string{"type"}}, + {Type: "_"}, // meta-argument escaping block + }, +} + +var dataBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "lifecycle"}, + {Type: "locals"}, // reserved for future use + {Type: "_"}, // meta-argument escaping block + }, +} + +var resourceLifecycleBlockSchema = &hcl.BodySchema{ + // We tell HCL that these elements are all valid for both "resource" + // and "data" lifecycle blocks, but the rules are actually more restrictive + // than that. We deal with that after decoding so that we can return + // more specific error messages than HCL would typically return itself. + Attributes: []hcl.AttributeSchema{ + { + Name: "create_before_destroy", + }, + { + Name: "prevent_destroy", + }, + { + Name: "ignore_changes", + }, + { + Name: "replace_triggered_by", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "precondition"}, + {Type: "postcondition"}, + }, +} diff --git a/internal/configs/synth_body.go b/configs/synth_body.go similarity index 100% rename from internal/configs/synth_body.go rename to configs/synth_body.go diff --git a/internal/configs/synth_body_test.go b/configs/synth_body_test.go similarity index 100% rename from internal/configs/synth_body_test.go rename to configs/synth_body_test.go diff --git a/internal/configs/testdata/config-build/child_a/child_a.tf b/configs/testdata/config-build/child_a/child_a.tf similarity index 100% rename from internal/configs/testdata/config-build/child_a/child_a.tf rename to configs/testdata/config-build/child_a/child_a.tf diff --git a/internal/configs/testdata/config-build/child_b/child_b.tf b/configs/testdata/config-build/child_b/child_b.tf similarity index 100% rename from internal/configs/testdata/config-build/child_b/child_b.tf rename to configs/testdata/config-build/child_b/child_b.tf diff --git a/internal/configs/testdata/config-build/child_c/child_c.tf b/configs/testdata/config-build/child_c/child_c.tf similarity index 100% rename from internal/configs/testdata/config-build/child_c/child_c.tf rename to configs/testdata/config-build/child_c/child_c.tf diff --git a/internal/configs/testdata/config-build/root.tf b/configs/testdata/config-build/root.tf similarity index 100% rename from internal/configs/testdata/config-build/root.tf rename to configs/testdata/config-build/root.tf diff --git a/internal/configs/testdata/config-diagnostics/empty-configs/main.tf b/configs/testdata/config-diagnostics/empty-configs/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/empty-configs/main.tf rename to configs/testdata/config-diagnostics/empty-configs/main.tf diff --git a/internal/configs/testdata/config-diagnostics/empty-configs/mod/main.tf b/configs/testdata/config-diagnostics/empty-configs/mod/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/empty-configs/mod/main.tf rename to configs/testdata/config-diagnostics/empty-configs/mod/main.tf diff --git a/internal/configs/testdata/config-diagnostics/empty-configs/warnings b/configs/testdata/config-diagnostics/empty-configs/warnings similarity index 100% rename from internal/configs/testdata/config-diagnostics/empty-configs/warnings rename to configs/testdata/config-diagnostics/empty-configs/warnings diff --git a/internal/configs/testdata/config-diagnostics/incorrect-type/errors b/configs/testdata/config-diagnostics/incorrect-type/errors similarity index 100% rename from internal/configs/testdata/config-diagnostics/incorrect-type/errors rename to configs/testdata/config-diagnostics/incorrect-type/errors diff --git a/internal/configs/testdata/config-diagnostics/incorrect-type/main.tf b/configs/testdata/config-diagnostics/incorrect-type/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/incorrect-type/main.tf rename to configs/testdata/config-diagnostics/incorrect-type/main.tf diff --git a/internal/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf b/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf rename to configs/testdata/config-diagnostics/incorrect-type/mod/main.tf diff --git a/internal/configs/testdata/config-diagnostics/incorrect-type/warnings b/configs/testdata/config-diagnostics/incorrect-type/warnings similarity index 100% rename from internal/configs/testdata/config-diagnostics/incorrect-type/warnings rename to configs/testdata/config-diagnostics/incorrect-type/warnings diff --git a/internal/configs/testdata/config-diagnostics/invalid-provider/errors b/configs/testdata/config-diagnostics/invalid-provider/errors similarity index 100% rename from internal/configs/testdata/config-diagnostics/invalid-provider/errors rename to configs/testdata/config-diagnostics/invalid-provider/errors diff --git a/internal/configs/testdata/config-diagnostics/invalid-provider/main.tf b/configs/testdata/config-diagnostics/invalid-provider/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/invalid-provider/main.tf rename to configs/testdata/config-diagnostics/invalid-provider/main.tf diff --git a/internal/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf b/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf rename to configs/testdata/config-diagnostics/invalid-provider/mod/main.tf diff --git a/internal/configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf b/configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf rename to configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf diff --git a/internal/configs/testdata/config-diagnostics/nested-provider/child/main.tf b/configs/testdata/config-diagnostics/nested-provider/child/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/nested-provider/child/main.tf rename to configs/testdata/config-diagnostics/nested-provider/child/main.tf diff --git a/internal/configs/testdata/config-diagnostics/nested-provider/errors b/configs/testdata/config-diagnostics/nested-provider/errors similarity index 100% rename from internal/configs/testdata/config-diagnostics/nested-provider/errors rename to configs/testdata/config-diagnostics/nested-provider/errors diff --git a/internal/configs/testdata/config-diagnostics/nested-provider/root.tf b/configs/testdata/config-diagnostics/nested-provider/root.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/nested-provider/root.tf rename to configs/testdata/config-diagnostics/nested-provider/root.tf diff --git a/internal/configs/testdata/config-diagnostics/override-provider/errors b/configs/testdata/config-diagnostics/override-provider/errors similarity index 100% rename from internal/configs/testdata/config-diagnostics/override-provider/errors rename to configs/testdata/config-diagnostics/override-provider/errors diff --git a/internal/configs/testdata/config-diagnostics/override-provider/main.tf b/configs/testdata/config-diagnostics/override-provider/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/override-provider/main.tf rename to configs/testdata/config-diagnostics/override-provider/main.tf diff --git a/internal/configs/testdata/config-diagnostics/override-provider/mod/main.tf b/configs/testdata/config-diagnostics/override-provider/mod/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/override-provider/mod/main.tf rename to configs/testdata/config-diagnostics/override-provider/mod/main.tf diff --git a/internal/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf b/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf rename to configs/testdata/config-diagnostics/pass-inherited-provider/main.tf diff --git a/internal/configs/testdata/config-diagnostics/pass-inherited-provider/mod/main.tf b/configs/testdata/config-diagnostics/pass-inherited-provider/mod/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/pass-inherited-provider/mod/main.tf rename to configs/testdata/config-diagnostics/pass-inherited-provider/mod/main.tf diff --git a/internal/configs/testdata/config-diagnostics/pass-inherited-provider/mod2/main.tf b/configs/testdata/config-diagnostics/pass-inherited-provider/mod2/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/pass-inherited-provider/mod2/main.tf rename to configs/testdata/config-diagnostics/pass-inherited-provider/mod2/main.tf diff --git a/internal/configs/testdata/config-diagnostics/pass-inherited-provider/warnings b/configs/testdata/config-diagnostics/pass-inherited-provider/warnings similarity index 100% rename from internal/configs/testdata/config-diagnostics/pass-inherited-provider/warnings rename to configs/testdata/config-diagnostics/pass-inherited-provider/warnings diff --git a/internal/configs/testdata/config-diagnostics/required-alias/errors b/configs/testdata/config-diagnostics/required-alias/errors similarity index 100% rename from internal/configs/testdata/config-diagnostics/required-alias/errors rename to configs/testdata/config-diagnostics/required-alias/errors diff --git a/internal/configs/testdata/config-diagnostics/required-alias/main.tf b/configs/testdata/config-diagnostics/required-alias/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/required-alias/main.tf rename to configs/testdata/config-diagnostics/required-alias/main.tf diff --git a/internal/configs/testdata/config-diagnostics/required-alias/mod/main.tf b/configs/testdata/config-diagnostics/required-alias/mod/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/required-alias/mod/main.tf rename to configs/testdata/config-diagnostics/required-alias/mod/main.tf diff --git a/internal/configs/testdata/config-diagnostics/unexpected-provider/main.tf b/configs/testdata/config-diagnostics/unexpected-provider/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/unexpected-provider/main.tf rename to configs/testdata/config-diagnostics/unexpected-provider/main.tf diff --git a/internal/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf b/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf rename to configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf diff --git a/internal/configs/testdata/config-diagnostics/unexpected-provider/warnings b/configs/testdata/config-diagnostics/unexpected-provider/warnings similarity index 100% rename from internal/configs/testdata/config-diagnostics/unexpected-provider/warnings rename to configs/testdata/config-diagnostics/unexpected-provider/warnings diff --git a/internal/configs/testdata/config-diagnostics/unknown-root-provider/main.tf b/configs/testdata/config-diagnostics/unknown-root-provider/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/unknown-root-provider/main.tf rename to configs/testdata/config-diagnostics/unknown-root-provider/main.tf diff --git a/internal/configs/testdata/config-diagnostics/unknown-root-provider/mod/main.tf b/configs/testdata/config-diagnostics/unknown-root-provider/mod/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/unknown-root-provider/mod/main.tf rename to configs/testdata/config-diagnostics/unknown-root-provider/mod/main.tf diff --git a/internal/configs/testdata/config-diagnostics/unknown-root-provider/warnings b/configs/testdata/config-diagnostics/unknown-root-provider/warnings similarity index 100% rename from internal/configs/testdata/config-diagnostics/unknown-root-provider/warnings rename to configs/testdata/config-diagnostics/unknown-root-provider/warnings diff --git a/internal/configs/testdata/config-diagnostics/with-depends-on/main.tf b/configs/testdata/config-diagnostics/with-depends-on/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/with-depends-on/main.tf rename to configs/testdata/config-diagnostics/with-depends-on/main.tf diff --git a/internal/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf b/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf rename to configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf diff --git a/internal/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf b/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf rename to configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf diff --git a/internal/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf b/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf similarity index 100% rename from internal/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf rename to configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf diff --git a/internal/configs/testdata/dir-empty/.gitkeep b/configs/testdata/dir-empty/.gitkeep similarity index 100% rename from internal/configs/testdata/dir-empty/.gitkeep rename to configs/testdata/dir-empty/.gitkeep diff --git a/internal/configs/testdata/duplicate-local-name/main.tf b/configs/testdata/duplicate-local-name/main.tf similarity index 100% rename from internal/configs/testdata/duplicate-local-name/main.tf rename to configs/testdata/duplicate-local-name/main.tf diff --git a/internal/configs/testdata/error-files/destroy-provisioners.tf b/configs/testdata/error-files/destroy-provisioners.tf similarity index 100% rename from internal/configs/testdata/error-files/destroy-provisioners.tf rename to configs/testdata/error-files/destroy-provisioners.tf diff --git a/internal/configs/testdata/error-files/ignore_changes.tf b/configs/testdata/error-files/ignore_changes.tf similarity index 100% rename from internal/configs/testdata/error-files/ignore_changes.tf rename to configs/testdata/error-files/ignore_changes.tf diff --git a/internal/configs/testdata/error-files/invalid_language_edition.tf b/configs/testdata/error-files/invalid_language_edition.tf similarity index 100% rename from internal/configs/testdata/error-files/invalid_language_edition.tf rename to configs/testdata/error-files/invalid_language_edition.tf diff --git a/internal/configs/testdata/error-files/module-invalid-registry-source-with-module.tf b/configs/testdata/error-files/module-invalid-registry-source-with-module.tf similarity index 100% rename from internal/configs/testdata/error-files/module-invalid-registry-source-with-module.tf rename to configs/testdata/error-files/module-invalid-registry-source-with-module.tf diff --git a/internal/configs/testdata/error-files/module-local-source-with-version.tf b/configs/testdata/error-files/module-local-source-with-version.tf similarity index 100% rename from internal/configs/testdata/error-files/module-local-source-with-version.tf rename to configs/testdata/error-files/module-local-source-with-version.tf diff --git a/internal/configs/testdata/error-files/precondition-postcondition-constant.tf b/configs/testdata/error-files/precondition-postcondition-constant.tf similarity index 100% rename from internal/configs/testdata/error-files/precondition-postcondition-constant.tf rename to configs/testdata/error-files/precondition-postcondition-constant.tf diff --git a/internal/configs/testdata/error-files/precondition-postcondition-selfref.tf b/configs/testdata/error-files/precondition-postcondition-selfref.tf similarity index 100% rename from internal/configs/testdata/error-files/precondition-postcondition-selfref.tf rename to configs/testdata/error-files/precondition-postcondition-selfref.tf diff --git a/internal/configs/testdata/error-files/provider-source-prefix.tf b/configs/testdata/error-files/provider-source-prefix.tf similarity index 100% rename from internal/configs/testdata/error-files/provider-source-prefix.tf rename to configs/testdata/error-files/provider-source-prefix.tf diff --git a/internal/configs/testdata/error-files/required-providers-toplevel.tf b/configs/testdata/error-files/required-providers-toplevel.tf similarity index 100% rename from internal/configs/testdata/error-files/required-providers-toplevel.tf rename to configs/testdata/error-files/required-providers-toplevel.tf diff --git a/internal/configs/testdata/error-files/unsupported_language_edition.tf b/configs/testdata/error-files/unsupported_language_edition.tf similarity index 100% rename from internal/configs/testdata/error-files/unsupported_language_edition.tf rename to configs/testdata/error-files/unsupported_language_edition.tf diff --git a/internal/configs/testdata/error-files/variable_type_quoted.tf b/configs/testdata/error-files/variable_type_quoted.tf similarity index 100% rename from internal/configs/testdata/error-files/variable_type_quoted.tf rename to configs/testdata/error-files/variable_type_quoted.tf diff --git a/internal/configs/testdata/error-files/vendor_provisioners.tf b/configs/testdata/error-files/vendor_provisioners.tf similarity index 100% rename from internal/configs/testdata/error-files/vendor_provisioners.tf rename to configs/testdata/error-files/vendor_provisioners.tf diff --git a/internal/configs/testdata/escaping-blocks/data/data-escaping-block.tf b/configs/testdata/escaping-blocks/data/data-escaping-block.tf similarity index 100% rename from internal/configs/testdata/escaping-blocks/data/data-escaping-block.tf rename to configs/testdata/escaping-blocks/data/data-escaping-block.tf diff --git a/internal/configs/testdata/escaping-blocks/module/child/nothing.tf b/configs/testdata/escaping-blocks/module/child/nothing.tf similarity index 100% rename from internal/configs/testdata/escaping-blocks/module/child/nothing.tf rename to configs/testdata/escaping-blocks/module/child/nothing.tf diff --git a/internal/configs/testdata/escaping-blocks/module/module-escaping-block.tf b/configs/testdata/escaping-blocks/module/module-escaping-block.tf similarity index 100% rename from internal/configs/testdata/escaping-blocks/module/module-escaping-block.tf rename to configs/testdata/escaping-blocks/module/module-escaping-block.tf diff --git a/internal/configs/testdata/escaping-blocks/provider/provider-escaping-block.tf b/configs/testdata/escaping-blocks/provider/provider-escaping-block.tf similarity index 100% rename from internal/configs/testdata/escaping-blocks/provider/provider-escaping-block.tf rename to configs/testdata/escaping-blocks/provider/provider-escaping-block.tf diff --git a/internal/configs/testdata/escaping-blocks/resource/resource-escaping-block.tf b/configs/testdata/escaping-blocks/resource/resource-escaping-block.tf similarity index 100% rename from internal/configs/testdata/escaping-blocks/resource/resource-escaping-block.tf rename to configs/testdata/escaping-blocks/resource/resource-escaping-block.tf diff --git a/internal/configs/testdata/experiments/concluded/concluded_experiment.tf b/configs/testdata/experiments/concluded/concluded_experiment.tf similarity index 100% rename from internal/configs/testdata/experiments/concluded/concluded_experiment.tf rename to configs/testdata/experiments/concluded/concluded_experiment.tf diff --git a/internal/configs/testdata/experiments/current/current_experiment.tf b/configs/testdata/experiments/current/current_experiment.tf similarity index 100% rename from internal/configs/testdata/experiments/current/current_experiment.tf rename to configs/testdata/experiments/current/current_experiment.tf diff --git a/internal/configs/testdata/experiments/invalid/invalid_experiments.tf b/configs/testdata/experiments/invalid/invalid_experiments.tf similarity index 100% rename from internal/configs/testdata/experiments/invalid/invalid_experiments.tf rename to configs/testdata/experiments/invalid/invalid_experiments.tf diff --git a/internal/configs/testdata/experiments/unknown/unknown_experiment.tf b/configs/testdata/experiments/unknown/unknown_experiment.tf similarity index 100% rename from internal/configs/testdata/experiments/unknown/unknown_experiment.tf rename to configs/testdata/experiments/unknown/unknown_experiment.tf diff --git a/internal/configs/testdata/invalid-files/data-count-and-for_each.tf b/configs/testdata/invalid-files/data-count-and-for_each.tf similarity index 100% rename from internal/configs/testdata/invalid-files/data-count-and-for_each.tf rename to configs/testdata/invalid-files/data-count-and-for_each.tf diff --git a/internal/configs/testdata/invalid-files/data-invalid-provider-reference.tf b/configs/testdata/invalid-files/data-invalid-provider-reference.tf similarity index 100% rename from internal/configs/testdata/invalid-files/data-invalid-provider-reference.tf rename to configs/testdata/invalid-files/data-invalid-provider-reference.tf diff --git a/internal/configs/testdata/invalid-files/data-reserved-locals.tf b/configs/testdata/invalid-files/data-reserved-locals.tf similarity index 100% rename from internal/configs/testdata/invalid-files/data-reserved-locals.tf rename to configs/testdata/invalid-files/data-reserved-locals.tf diff --git a/internal/configs/testdata/invalid-files/data-resource-lifecycle.tf b/configs/testdata/invalid-files/data-resource-lifecycle.tf similarity index 100% rename from internal/configs/testdata/invalid-files/data-resource-lifecycle.tf rename to configs/testdata/invalid-files/data-resource-lifecycle.tf diff --git a/internal/configs/testdata/invalid-files/everything-is-a-plan.tf b/configs/testdata/invalid-files/everything-is-a-plan.tf similarity index 100% rename from internal/configs/testdata/invalid-files/everything-is-a-plan.tf rename to configs/testdata/invalid-files/everything-is-a-plan.tf diff --git a/internal/configs/testdata/invalid-files/interp-in-data-label.tf b/configs/testdata/invalid-files/interp-in-data-label.tf similarity index 100% rename from internal/configs/testdata/invalid-files/interp-in-data-label.tf rename to configs/testdata/invalid-files/interp-in-data-label.tf diff --git a/internal/configs/testdata/invalid-files/interp-in-rsrc-label.tf b/configs/testdata/invalid-files/interp-in-rsrc-label.tf similarity index 100% rename from internal/configs/testdata/invalid-files/interp-in-rsrc-label.tf rename to configs/testdata/invalid-files/interp-in-rsrc-label.tf diff --git a/internal/configs/testdata/invalid-files/json-as-native-syntax.tf b/configs/testdata/invalid-files/json-as-native-syntax.tf similarity index 100% rename from internal/configs/testdata/invalid-files/json-as-native-syntax.tf rename to configs/testdata/invalid-files/json-as-native-syntax.tf diff --git a/internal/configs/testdata/invalid-files/module-calls.tf b/configs/testdata/invalid-files/module-calls.tf similarity index 100% rename from internal/configs/testdata/invalid-files/module-calls.tf rename to configs/testdata/invalid-files/module-calls.tf diff --git a/internal/configs/testdata/invalid-files/native-syntax-as-json.tf.json b/configs/testdata/invalid-files/native-syntax-as-json.tf.json similarity index 100% rename from internal/configs/testdata/invalid-files/native-syntax-as-json.tf.json rename to configs/testdata/invalid-files/native-syntax-as-json.tf.json diff --git a/internal/configs/testdata/invalid-files/precondition-postcondition-badref.tf b/configs/testdata/invalid-files/precondition-postcondition-badref.tf similarity index 100% rename from internal/configs/testdata/invalid-files/precondition-postcondition-badref.tf rename to configs/testdata/invalid-files/precondition-postcondition-badref.tf diff --git a/internal/configs/testdata/invalid-files/precondition-postcondition-missing-condition.tf b/configs/testdata/invalid-files/precondition-postcondition-missing-condition.tf similarity index 100% rename from internal/configs/testdata/invalid-files/precondition-postcondition-missing-condition.tf rename to configs/testdata/invalid-files/precondition-postcondition-missing-condition.tf diff --git a/internal/configs/testdata/invalid-files/provider-localname-normalization.tf b/configs/testdata/invalid-files/provider-localname-normalization.tf similarity index 100% rename from internal/configs/testdata/invalid-files/provider-localname-normalization.tf rename to configs/testdata/invalid-files/provider-localname-normalization.tf diff --git a/internal/configs/testdata/invalid-files/provider-reserved.tf b/configs/testdata/invalid-files/provider-reserved.tf similarity index 100% rename from internal/configs/testdata/invalid-files/provider-reserved.tf rename to configs/testdata/invalid-files/provider-reserved.tf diff --git a/internal/configs/testdata/invalid-files/provider-syntax.tf b/configs/testdata/invalid-files/provider-syntax.tf similarity index 100% rename from internal/configs/testdata/invalid-files/provider-syntax.tf rename to configs/testdata/invalid-files/provider-syntax.tf diff --git a/internal/configs/testdata/invalid-files/resource-count-and-for_each.tf b/configs/testdata/invalid-files/resource-count-and-for_each.tf similarity index 100% rename from internal/configs/testdata/invalid-files/resource-count-and-for_each.tf rename to configs/testdata/invalid-files/resource-count-and-for_each.tf diff --git a/internal/configs/testdata/invalid-files/resource-invalid-provider-reference.tf b/configs/testdata/invalid-files/resource-invalid-provider-reference.tf similarity index 100% rename from internal/configs/testdata/invalid-files/resource-invalid-provider-reference.tf rename to configs/testdata/invalid-files/resource-invalid-provider-reference.tf diff --git a/internal/configs/testdata/invalid-files/resource-lifecycle-badbool.tf b/configs/testdata/invalid-files/resource-lifecycle-badbool.tf similarity index 100% rename from internal/configs/testdata/invalid-files/resource-lifecycle-badbool.tf rename to configs/testdata/invalid-files/resource-lifecycle-badbool.tf diff --git a/internal/configs/testdata/invalid-files/resource-name-invalid.tf b/configs/testdata/invalid-files/resource-name-invalid.tf similarity index 100% rename from internal/configs/testdata/invalid-files/resource-name-invalid.tf rename to configs/testdata/invalid-files/resource-name-invalid.tf diff --git a/internal/configs/testdata/invalid-files/resource-reserved-locals.tf b/configs/testdata/invalid-files/resource-reserved-locals.tf similarity index 100% rename from internal/configs/testdata/invalid-files/resource-reserved-locals.tf rename to configs/testdata/invalid-files/resource-reserved-locals.tf diff --git a/internal/configs/testdata/invalid-files/resources-ignorechanges-all-legacymix.tf b/configs/testdata/invalid-files/resources-ignorechanges-all-legacymix.tf similarity index 100% rename from internal/configs/testdata/invalid-files/resources-ignorechanges-all-legacymix.tf rename to configs/testdata/invalid-files/resources-ignorechanges-all-legacymix.tf diff --git a/internal/configs/testdata/invalid-files/triggered-invalid-each.tf b/configs/testdata/invalid-files/triggered-invalid-each.tf similarity index 100% rename from internal/configs/testdata/invalid-files/triggered-invalid-each.tf rename to configs/testdata/invalid-files/triggered-invalid-each.tf diff --git a/internal/configs/testdata/invalid-files/triggered-invalid-expression.tf b/configs/testdata/invalid-files/triggered-invalid-expression.tf similarity index 100% rename from internal/configs/testdata/invalid-files/triggered-invalid-expression.tf rename to configs/testdata/invalid-files/triggered-invalid-expression.tf diff --git a/internal/configs/testdata/invalid-files/unexpected-attr.tf b/configs/testdata/invalid-files/unexpected-attr.tf similarity index 100% rename from internal/configs/testdata/invalid-files/unexpected-attr.tf rename to configs/testdata/invalid-files/unexpected-attr.tf diff --git a/internal/configs/testdata/invalid-files/unexpected-block.tf b/configs/testdata/invalid-files/unexpected-block.tf similarity index 100% rename from internal/configs/testdata/invalid-files/unexpected-block.tf rename to configs/testdata/invalid-files/unexpected-block.tf diff --git a/internal/configs/testdata/invalid-files/variable-bad-default.tf b/configs/testdata/invalid-files/variable-bad-default.tf similarity index 100% rename from internal/configs/testdata/invalid-files/variable-bad-default.tf rename to configs/testdata/invalid-files/variable-bad-default.tf diff --git a/internal/configs/testdata/invalid-files/variable-bad-sensitive.tf b/configs/testdata/invalid-files/variable-bad-sensitive.tf similarity index 100% rename from internal/configs/testdata/invalid-files/variable-bad-sensitive.tf rename to configs/testdata/invalid-files/variable-bad-sensitive.tf diff --git a/internal/configs/testdata/invalid-files/variable-type-unknown.tf b/configs/testdata/invalid-files/variable-type-unknown.tf similarity index 100% rename from internal/configs/testdata/invalid-files/variable-type-unknown.tf rename to configs/testdata/invalid-files/variable-type-unknown.tf diff --git a/internal/configs/testdata/invalid-files/variable-validation-condition-badref.tf b/configs/testdata/invalid-files/variable-validation-condition-badref.tf similarity index 100% rename from internal/configs/testdata/invalid-files/variable-validation-condition-badref.tf rename to configs/testdata/invalid-files/variable-validation-condition-badref.tf diff --git a/internal/configs/testdata/invalid-files/variable-validation-condition-noref.tf b/configs/testdata/invalid-files/variable-validation-condition-noref.tf similarity index 100% rename from internal/configs/testdata/invalid-files/variable-validation-condition-noref.tf rename to configs/testdata/invalid-files/variable-validation-condition-noref.tf diff --git a/internal/configs/testdata/invalid-files/version-variable.tf b/configs/testdata/invalid-files/version-variable.tf similarity index 100% rename from internal/configs/testdata/invalid-files/version-variable.tf rename to configs/testdata/invalid-files/version-variable.tf diff --git a/internal/configs/testdata/invalid-files/zerolen.tf.json b/configs/testdata/invalid-files/zerolen.tf.json similarity index 100% rename from internal/configs/testdata/invalid-files/zerolen.tf.json rename to configs/testdata/invalid-files/zerolen.tf.json diff --git a/internal/configs/testdata/invalid-modules/multiple-required-providers/a.tf b/configs/testdata/invalid-modules/multiple-required-providers/a.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/multiple-required-providers/a.tf rename to configs/testdata/invalid-modules/multiple-required-providers/a.tf diff --git a/internal/configs/testdata/invalid-modules/multiple-required-providers/b.tf b/configs/testdata/invalid-modules/multiple-required-providers/b.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/multiple-required-providers/b.tf rename to configs/testdata/invalid-modules/multiple-required-providers/b.tf diff --git a/internal/configs/testdata/invalid-modules/nullable-with-default-null/main.tf b/configs/testdata/invalid-modules/nullable-with-default-null/main.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/nullable-with-default-null/main.tf rename to configs/testdata/invalid-modules/nullable-with-default-null/main.tf diff --git a/internal/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf b/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf rename to configs/testdata/invalid-modules/override-cloud-duplicates/main.tf diff --git a/internal/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf b/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf rename to configs/testdata/invalid-modules/override-cloud-duplicates/override.tf diff --git a/internal/configs/testdata/invalid-modules/override-nonexist-variable/override.tf b/configs/testdata/invalid-modules/override-nonexist-variable/override.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/override-nonexist-variable/override.tf rename to configs/testdata/invalid-modules/override-nonexist-variable/override.tf diff --git a/internal/configs/testdata/invalid-modules/override-variable-causes-bad-default/base.tf b/configs/testdata/invalid-modules/override-variable-causes-bad-default/base.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/override-variable-causes-bad-default/base.tf rename to configs/testdata/invalid-modules/override-variable-causes-bad-default/base.tf diff --git a/internal/configs/testdata/invalid-modules/override-variable-causes-bad-default/override.tf b/configs/testdata/invalid-modules/override-variable-causes-bad-default/override.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/override-variable-causes-bad-default/override.tf rename to configs/testdata/invalid-modules/override-variable-causes-bad-default/override.tf diff --git a/internal/configs/testdata/invalid-modules/provider-meta/invalid-interpolation.tf b/configs/testdata/invalid-modules/provider-meta/invalid-interpolation.tf similarity index 100% rename from internal/configs/testdata/invalid-modules/provider-meta/invalid-interpolation.tf rename to configs/testdata/invalid-modules/provider-meta/invalid-interpolation.tf diff --git a/internal/configs/testdata/nested-backend-warning/child/child.tf b/configs/testdata/nested-backend-warning/child/child.tf similarity index 100% rename from internal/configs/testdata/nested-backend-warning/child/child.tf rename to configs/testdata/nested-backend-warning/child/child.tf diff --git a/internal/configs/testdata/nested-backend-warning/root.tf b/configs/testdata/nested-backend-warning/root.tf similarity index 100% rename from internal/configs/testdata/nested-backend-warning/root.tf rename to configs/testdata/nested-backend-warning/root.tf diff --git a/internal/configs/testdata/nested-cloud-warning/child/child.tf b/configs/testdata/nested-cloud-warning/child/child.tf similarity index 100% rename from internal/configs/testdata/nested-cloud-warning/child/child.tf rename to configs/testdata/nested-cloud-warning/child/child.tf diff --git a/internal/configs/testdata/nested-cloud-warning/root.tf b/configs/testdata/nested-cloud-warning/root.tf similarity index 100% rename from internal/configs/testdata/nested-cloud-warning/root.tf rename to configs/testdata/nested-cloud-warning/root.tf diff --git a/internal/configs/testdata/nested-errors/child_a/child_a.tf b/configs/testdata/nested-errors/child_a/child_a.tf similarity index 100% rename from internal/configs/testdata/nested-errors/child_a/child_a.tf rename to configs/testdata/nested-errors/child_a/child_a.tf diff --git a/internal/configs/testdata/nested-errors/child_c/child_c.tf b/configs/testdata/nested-errors/child_c/child_c.tf similarity index 100% rename from internal/configs/testdata/nested-errors/child_c/child_c.tf rename to configs/testdata/nested-errors/child_c/child_c.tf diff --git a/internal/configs/testdata/nested-errors/root.tf b/configs/testdata/nested-errors/root.tf similarity index 100% rename from internal/configs/testdata/nested-errors/root.tf rename to configs/testdata/nested-errors/root.tf diff --git a/internal/configs/testdata/provider-reqs/child/grandchild/provider-reqs-grandchild.tf b/configs/testdata/provider-reqs/child/grandchild/provider-reqs-grandchild.tf similarity index 100% rename from internal/configs/testdata/provider-reqs/child/grandchild/provider-reqs-grandchild.tf rename to configs/testdata/provider-reqs/child/grandchild/provider-reqs-grandchild.tf diff --git a/internal/configs/testdata/provider-reqs/child/provider-reqs-child.tf b/configs/testdata/provider-reqs/child/provider-reqs-child.tf similarity index 100% rename from internal/configs/testdata/provider-reqs/child/provider-reqs-child.tf rename to configs/testdata/provider-reqs/child/provider-reqs-child.tf diff --git a/internal/configs/testdata/provider-reqs/provider-reqs-root.tf b/configs/testdata/provider-reqs/provider-reqs-root.tf similarity index 100% rename from internal/configs/testdata/provider-reqs/provider-reqs-root.tf rename to configs/testdata/provider-reqs/provider-reqs-root.tf diff --git a/internal/configs/testdata/providers-explicit-fqn/root.tf b/configs/testdata/providers-explicit-fqn/root.tf similarity index 100% rename from internal/configs/testdata/providers-explicit-fqn/root.tf rename to configs/testdata/providers-explicit-fqn/root.tf diff --git a/internal/configs/testdata/valid-files/backend.tf b/configs/testdata/valid-files/backend.tf similarity index 100% rename from internal/configs/testdata/valid-files/backend.tf rename to configs/testdata/valid-files/backend.tf diff --git a/internal/configs/testdata/valid-files/cloud.tf b/configs/testdata/valid-files/cloud.tf similarity index 100% rename from internal/configs/testdata/valid-files/cloud.tf rename to configs/testdata/valid-files/cloud.tf diff --git a/internal/configs/testdata/valid-files/data-sources.tf b/configs/testdata/valid-files/data-sources.tf similarity index 100% rename from internal/configs/testdata/valid-files/data-sources.tf rename to configs/testdata/valid-files/data-sources.tf diff --git a/internal/configs/testdata/valid-files/empty.tf b/configs/testdata/valid-files/empty.tf similarity index 100% rename from internal/configs/testdata/valid-files/empty.tf rename to configs/testdata/valid-files/empty.tf diff --git a/internal/configs/testdata/valid-files/empty.tf.json b/configs/testdata/valid-files/empty.tf.json similarity index 100% rename from internal/configs/testdata/valid-files/empty.tf.json rename to configs/testdata/valid-files/empty.tf.json diff --git a/internal/configs/testdata/valid-files/locals.tf b/configs/testdata/valid-files/locals.tf similarity index 100% rename from internal/configs/testdata/valid-files/locals.tf rename to configs/testdata/valid-files/locals.tf diff --git a/internal/configs/testdata/valid-files/locals.tf.json b/configs/testdata/valid-files/locals.tf.json similarity index 100% rename from internal/configs/testdata/valid-files/locals.tf.json rename to configs/testdata/valid-files/locals.tf.json diff --git a/internal/configs/testdata/valid-files/object-optional-attrs.tf b/configs/testdata/valid-files/object-optional-attrs.tf similarity index 100% rename from internal/configs/testdata/valid-files/object-optional-attrs.tf rename to configs/testdata/valid-files/object-optional-attrs.tf diff --git a/internal/configs/testdata/valid-files/outputs.tf b/configs/testdata/valid-files/outputs.tf similarity index 100% rename from internal/configs/testdata/valid-files/outputs.tf rename to configs/testdata/valid-files/outputs.tf diff --git a/internal/configs/testdata/valid-files/preconditions-postconditions.tf b/configs/testdata/valid-files/preconditions-postconditions.tf similarity index 100% rename from internal/configs/testdata/valid-files/preconditions-postconditions.tf rename to configs/testdata/valid-files/preconditions-postconditions.tf diff --git a/internal/configs/testdata/valid-files/provider-configs.tf b/configs/testdata/valid-files/provider-configs.tf similarity index 100% rename from internal/configs/testdata/valid-files/provider-configs.tf rename to configs/testdata/valid-files/provider-configs.tf diff --git a/internal/configs/testdata/valid-files/providers-explicit-implied.tf b/configs/testdata/valid-files/providers-explicit-implied.tf similarity index 100% rename from internal/configs/testdata/valid-files/providers-explicit-implied.tf rename to configs/testdata/valid-files/providers-explicit-implied.tf diff --git a/internal/configs/testdata/valid-files/references.tf.json b/configs/testdata/valid-files/references.tf.json similarity index 100% rename from internal/configs/testdata/valid-files/references.tf.json rename to configs/testdata/valid-files/references.tf.json diff --git a/internal/configs/testdata/valid-files/required-providers.tf b/configs/testdata/valid-files/required-providers.tf similarity index 100% rename from internal/configs/testdata/valid-files/required-providers.tf rename to configs/testdata/valid-files/required-providers.tf diff --git a/internal/configs/testdata/valid-files/required-version.tf b/configs/testdata/valid-files/required-version.tf similarity index 100% rename from internal/configs/testdata/valid-files/required-version.tf rename to configs/testdata/valid-files/required-version.tf diff --git a/internal/configs/testdata/valid-files/resources-ignorechanges-all.tf b/configs/testdata/valid-files/resources-ignorechanges-all.tf similarity index 100% rename from internal/configs/testdata/valid-files/resources-ignorechanges-all.tf rename to configs/testdata/valid-files/resources-ignorechanges-all.tf diff --git a/internal/configs/testdata/valid-files/resources-ignorechanges-all.tf.json b/configs/testdata/valid-files/resources-ignorechanges-all.tf.json similarity index 100% rename from internal/configs/testdata/valid-files/resources-ignorechanges-all.tf.json rename to configs/testdata/valid-files/resources-ignorechanges-all.tf.json diff --git a/internal/configs/testdata/valid-files/resources.tf b/configs/testdata/valid-files/resources.tf similarity index 100% rename from internal/configs/testdata/valid-files/resources.tf rename to configs/testdata/valid-files/resources.tf diff --git a/internal/configs/testdata/valid-files/resources.tf.json b/configs/testdata/valid-files/resources.tf.json similarity index 100% rename from internal/configs/testdata/valid-files/resources.tf.json rename to configs/testdata/valid-files/resources.tf.json diff --git a/internal/configs/testdata/valid-files/valid-language-edition.tf b/configs/testdata/valid-files/valid-language-edition.tf similarity index 100% rename from internal/configs/testdata/valid-files/valid-language-edition.tf rename to configs/testdata/valid-files/valid-language-edition.tf diff --git a/internal/configs/testdata/valid-files/variable_validation.tf b/configs/testdata/valid-files/variable_validation.tf similarity index 100% rename from internal/configs/testdata/valid-files/variable_validation.tf rename to configs/testdata/valid-files/variable_validation.tf diff --git a/internal/configs/testdata/valid-files/variables.tf b/configs/testdata/valid-files/variables.tf similarity index 100% rename from internal/configs/testdata/valid-files/variables.tf rename to configs/testdata/valid-files/variables.tf diff --git a/internal/configs/testdata/valid-files/variables.tf.json b/configs/testdata/valid-files/variables.tf.json similarity index 100% rename from internal/configs/testdata/valid-files/variables.tf.json rename to configs/testdata/valid-files/variables.tf.json diff --git a/internal/configs/testdata/valid-modules/empty/README b/configs/testdata/valid-modules/empty/README similarity index 100% rename from internal/configs/testdata/valid-modules/empty/README rename to configs/testdata/valid-modules/empty/README diff --git a/internal/configs/testdata/valid-modules/implied-providers/providers.tf b/configs/testdata/valid-modules/implied-providers/providers.tf similarity index 100% rename from internal/configs/testdata/valid-modules/implied-providers/providers.tf rename to configs/testdata/valid-modules/implied-providers/providers.tf diff --git a/internal/configs/testdata/valid-modules/implied-providers/resources.tf b/configs/testdata/valid-modules/implied-providers/resources.tf similarity index 100% rename from internal/configs/testdata/valid-modules/implied-providers/resources.tf rename to configs/testdata/valid-modules/implied-providers/resources.tf diff --git a/internal/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf b/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf similarity index 100% rename from internal/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf rename to configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf diff --git a/internal/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf b/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf similarity index 100% rename from internal/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf rename to configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf diff --git a/internal/configs/testdata/valid-modules/nested-providers-fqns/child/main.tf b/configs/testdata/valid-modules/nested-providers-fqns/child/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/nested-providers-fqns/child/main.tf rename to configs/testdata/valid-modules/nested-providers-fqns/child/main.tf diff --git a/internal/configs/testdata/valid-modules/nested-providers-fqns/main.tf b/configs/testdata/valid-modules/nested-providers-fqns/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/nested-providers-fqns/main.tf rename to configs/testdata/valid-modules/nested-providers-fqns/main.tf diff --git a/internal/configs/testdata/valid-modules/override-backend-no-base/main.tf b/configs/testdata/valid-modules/override-backend-no-base/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-backend-no-base/main.tf rename to configs/testdata/valid-modules/override-backend-no-base/main.tf diff --git a/internal/configs/testdata/valid-modules/override-backend-no-base/override.tf b/configs/testdata/valid-modules/override-backend-no-base/override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-backend-no-base/override.tf rename to configs/testdata/valid-modules/override-backend-no-base/override.tf diff --git a/internal/configs/testdata/valid-modules/override-backend-with-cloud/main.tf b/configs/testdata/valid-modules/override-backend-with-cloud/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-backend-with-cloud/main.tf rename to configs/testdata/valid-modules/override-backend-with-cloud/main.tf diff --git a/internal/configs/testdata/valid-modules/override-backend-with-cloud/override.tf b/configs/testdata/valid-modules/override-backend-with-cloud/override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-backend-with-cloud/override.tf rename to configs/testdata/valid-modules/override-backend-with-cloud/override.tf diff --git a/internal/configs/testdata/valid-modules/override-backend/main.tf b/configs/testdata/valid-modules/override-backend/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-backend/main.tf rename to configs/testdata/valid-modules/override-backend/main.tf diff --git a/internal/configs/testdata/valid-modules/override-backend/override.tf b/configs/testdata/valid-modules/override-backend/override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-backend/override.tf rename to configs/testdata/valid-modules/override-backend/override.tf diff --git a/internal/configs/testdata/valid-modules/override-cloud-no-base/main.tf b/configs/testdata/valid-modules/override-cloud-no-base/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-cloud-no-base/main.tf rename to configs/testdata/valid-modules/override-cloud-no-base/main.tf diff --git a/internal/configs/testdata/valid-modules/override-cloud-no-base/override.tf b/configs/testdata/valid-modules/override-cloud-no-base/override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-cloud-no-base/override.tf rename to configs/testdata/valid-modules/override-cloud-no-base/override.tf diff --git a/internal/configs/testdata/valid-modules/override-cloud/main.tf b/configs/testdata/valid-modules/override-cloud/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-cloud/main.tf rename to configs/testdata/valid-modules/override-cloud/main.tf diff --git a/internal/configs/testdata/valid-modules/override-cloud/override.tf b/configs/testdata/valid-modules/override-cloud/override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-cloud/override.tf rename to configs/testdata/valid-modules/override-cloud/override.tf diff --git a/internal/configs/testdata/valid-modules/override-dynamic-block-base/a_override.tf b/configs/testdata/valid-modules/override-dynamic-block-base/a_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-dynamic-block-base/a_override.tf rename to configs/testdata/valid-modules/override-dynamic-block-base/a_override.tf diff --git a/internal/configs/testdata/valid-modules/override-dynamic-block-base/base.tf b/configs/testdata/valid-modules/override-dynamic-block-base/base.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-dynamic-block-base/base.tf rename to configs/testdata/valid-modules/override-dynamic-block-base/base.tf diff --git a/internal/configs/testdata/valid-modules/override-dynamic-block-override/a_override.tf b/configs/testdata/valid-modules/override-dynamic-block-override/a_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-dynamic-block-override/a_override.tf rename to configs/testdata/valid-modules/override-dynamic-block-override/a_override.tf diff --git a/internal/configs/testdata/valid-modules/override-dynamic-block-override/base.tf b/configs/testdata/valid-modules/override-dynamic-block-override/base.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-dynamic-block-override/base.tf rename to configs/testdata/valid-modules/override-dynamic-block-override/base.tf diff --git a/internal/configs/testdata/valid-modules/override-ignore-changes/main.tf b/configs/testdata/valid-modules/override-ignore-changes/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-ignore-changes/main.tf rename to configs/testdata/valid-modules/override-ignore-changes/main.tf diff --git a/internal/configs/testdata/valid-modules/override-ignore-changes/main_override.tf b/configs/testdata/valid-modules/override-ignore-changes/main_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-ignore-changes/main_override.tf rename to configs/testdata/valid-modules/override-ignore-changes/main_override.tf diff --git a/internal/configs/testdata/valid-modules/override-module/a_override.tf b/configs/testdata/valid-modules/override-module/a_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-module/a_override.tf rename to configs/testdata/valid-modules/override-module/a_override.tf diff --git a/internal/configs/testdata/valid-modules/override-module/b_override.tf b/configs/testdata/valid-modules/override-module/b_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-module/b_override.tf rename to configs/testdata/valid-modules/override-module/b_override.tf diff --git a/internal/configs/testdata/valid-modules/override-module/primary.tf b/configs/testdata/valid-modules/override-module/primary.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-module/primary.tf rename to configs/testdata/valid-modules/override-module/primary.tf diff --git a/internal/configs/testdata/valid-modules/override-output-sensitive/override.tf b/configs/testdata/valid-modules/override-output-sensitive/override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-output-sensitive/override.tf rename to configs/testdata/valid-modules/override-output-sensitive/override.tf diff --git a/internal/configs/testdata/valid-modules/override-output-sensitive/primary.tf b/configs/testdata/valid-modules/override-output-sensitive/primary.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-output-sensitive/primary.tf rename to configs/testdata/valid-modules/override-output-sensitive/primary.tf diff --git a/internal/configs/testdata/valid-modules/override-resource-provider/a_override.tf b/configs/testdata/valid-modules/override-resource-provider/a_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-resource-provider/a_override.tf rename to configs/testdata/valid-modules/override-resource-provider/a_override.tf diff --git a/internal/configs/testdata/valid-modules/override-resource-provider/base.tf b/configs/testdata/valid-modules/override-resource-provider/base.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-resource-provider/base.tf rename to configs/testdata/valid-modules/override-resource-provider/base.tf diff --git a/internal/configs/testdata/valid-modules/override-variable-sensitive/a_override.tf b/configs/testdata/valid-modules/override-variable-sensitive/a_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-variable-sensitive/a_override.tf rename to configs/testdata/valid-modules/override-variable-sensitive/a_override.tf diff --git a/internal/configs/testdata/valid-modules/override-variable-sensitive/b_override.tf b/configs/testdata/valid-modules/override-variable-sensitive/b_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-variable-sensitive/b_override.tf rename to configs/testdata/valid-modules/override-variable-sensitive/b_override.tf diff --git a/internal/configs/testdata/valid-modules/override-variable-sensitive/primary.tf b/configs/testdata/valid-modules/override-variable-sensitive/primary.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-variable-sensitive/primary.tf rename to configs/testdata/valid-modules/override-variable-sensitive/primary.tf diff --git a/internal/configs/testdata/valid-modules/override-variable/a_override.tf b/configs/testdata/valid-modules/override-variable/a_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-variable/a_override.tf rename to configs/testdata/valid-modules/override-variable/a_override.tf diff --git a/internal/configs/testdata/valid-modules/override-variable/b_override.tf b/configs/testdata/valid-modules/override-variable/b_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-variable/b_override.tf rename to configs/testdata/valid-modules/override-variable/b_override.tf diff --git a/internal/configs/testdata/valid-modules/override-variable/primary.tf b/configs/testdata/valid-modules/override-variable/primary.tf similarity index 100% rename from internal/configs/testdata/valid-modules/override-variable/primary.tf rename to configs/testdata/valid-modules/override-variable/primary.tf diff --git a/internal/configs/testdata/valid-modules/provider-aliases/main.tf b/configs/testdata/valid-modules/provider-aliases/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/provider-aliases/main.tf rename to configs/testdata/valid-modules/provider-aliases/main.tf diff --git a/internal/configs/testdata/valid-modules/provider-meta/main.tf b/configs/testdata/valid-modules/provider-meta/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/provider-meta/main.tf rename to configs/testdata/valid-modules/provider-meta/main.tf diff --git a/internal/configs/testdata/valid-modules/providers-fqns/main.tf b/configs/testdata/valid-modules/providers-fqns/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/providers-fqns/main.tf rename to configs/testdata/valid-modules/providers-fqns/main.tf diff --git a/internal/configs/testdata/valid-modules/required-providers-after-resource/main.tf b/configs/testdata/valid-modules/required-providers-after-resource/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/required-providers-after-resource/main.tf rename to configs/testdata/valid-modules/required-providers-after-resource/main.tf diff --git a/internal/configs/testdata/valid-modules/required-providers-after-resource/providers.tf b/configs/testdata/valid-modules/required-providers-after-resource/providers.tf similarity index 100% rename from internal/configs/testdata/valid-modules/required-providers-after-resource/providers.tf rename to configs/testdata/valid-modules/required-providers-after-resource/providers.tf diff --git a/internal/configs/testdata/valid-modules/required-providers-overrides/bar_provider_override.tf b/configs/testdata/valid-modules/required-providers-overrides/bar_provider_override.tf similarity index 100% rename from internal/configs/testdata/valid-modules/required-providers-overrides/bar_provider_override.tf rename to configs/testdata/valid-modules/required-providers-overrides/bar_provider_override.tf diff --git a/internal/configs/testdata/valid-modules/required-providers-overrides/main.tf b/configs/testdata/valid-modules/required-providers-overrides/main.tf similarity index 100% rename from internal/configs/testdata/valid-modules/required-providers-overrides/main.tf rename to configs/testdata/valid-modules/required-providers-overrides/main.tf diff --git a/internal/configs/testdata/valid-modules/required-providers-overrides/providers.tf b/configs/testdata/valid-modules/required-providers-overrides/providers.tf similarity index 100% rename from internal/configs/testdata/valid-modules/required-providers-overrides/providers.tf rename to configs/testdata/valid-modules/required-providers-overrides/providers.tf diff --git a/internal/configs/testdata/warning-files/depends_on.tf b/configs/testdata/warning-files/depends_on.tf similarity index 100% rename from internal/configs/testdata/warning-files/depends_on.tf rename to configs/testdata/warning-files/depends_on.tf diff --git a/internal/configs/testdata/warning-files/provider_ref.tf b/configs/testdata/warning-files/provider_ref.tf similarity index 100% rename from internal/configs/testdata/warning-files/provider_ref.tf rename to configs/testdata/warning-files/provider_ref.tf diff --git a/internal/configs/testdata/warning-files/provisioner_keyword.tf b/configs/testdata/warning-files/provisioner_keyword.tf similarity index 100% rename from internal/configs/testdata/warning-files/provisioner_keyword.tf rename to configs/testdata/warning-files/provisioner_keyword.tf diff --git a/internal/configs/util.go b/configs/util.go similarity index 100% rename from internal/configs/util.go rename to configs/util.go diff --git a/internal/configs/variable_type_hint.go b/configs/variable_type_hint.go similarity index 100% rename from internal/configs/variable_type_hint.go rename to configs/variable_type_hint.go diff --git a/internal/configs/variabletypehint_string.go b/configs/variabletypehint_string.go similarity index 100% rename from internal/configs/variabletypehint_string.go rename to configs/variabletypehint_string.go diff --git a/internal/configs/version_constraint.go b/configs/version_constraint.go similarity index 100% rename from internal/configs/version_constraint.go rename to configs/version_constraint.go diff --git a/internal/copy/copy_dir.go b/copy/copy_dir.go similarity index 100% rename from internal/copy/copy_dir.go rename to copy/copy_dir.go diff --git a/internal/copy/copy_dir_test.go b/copy/copy_dir_test.go similarity index 100% rename from internal/copy/copy_dir_test.go rename to copy/copy_dir_test.go diff --git a/internal/copy/copy_file.go b/copy/copy_file.go similarity index 100% rename from internal/copy/copy_file.go rename to copy/copy_file.go diff --git a/internal/dag/dag.go b/dag/dag.go similarity index 99% rename from internal/dag/dag.go rename to dag/dag.go index 362c847f3d9f..d375d5f09363 100644 --- a/internal/dag/dag.go +++ b/dag/dag.go @@ -5,7 +5,7 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/go-multierror" ) diff --git a/internal/dag/dag_test.go b/dag/dag_test.go similarity index 99% rename from internal/dag/dag_test.go rename to dag/dag_test.go index 5b273938e8fe..5a36564a3dad 100644 --- a/internal/dag/dag_test.go +++ b/dag/dag_test.go @@ -10,9 +10,9 @@ import ( "sync" "testing" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" - _ "github.com/hashicorp/terraform/internal/logging" + _ "github.com/hashicorp/terraform/logging" ) func TestMain(m *testing.M) { diff --git a/internal/dag/dot.go b/dag/dot.go similarity index 100% rename from internal/dag/dot.go rename to dag/dot.go diff --git a/internal/dag/dot_test.go b/dag/dot_test.go similarity index 100% rename from internal/dag/dot_test.go rename to dag/dot_test.go diff --git a/internal/dag/edge.go b/dag/edge.go similarity index 100% rename from internal/dag/edge.go rename to dag/edge.go diff --git a/internal/dag/edge_test.go b/dag/edge_test.go similarity index 100% rename from internal/dag/edge_test.go rename to dag/edge_test.go diff --git a/internal/dag/graph.go b/dag/graph.go similarity index 100% rename from internal/dag/graph.go rename to dag/graph.go diff --git a/internal/dag/graph_test.go b/dag/graph_test.go similarity index 100% rename from internal/dag/graph_test.go rename to dag/graph_test.go diff --git a/internal/dag/marshal.go b/dag/marshal.go similarity index 100% rename from internal/dag/marshal.go rename to dag/marshal.go diff --git a/internal/dag/marshal_test.go b/dag/marshal_test.go similarity index 100% rename from internal/dag/marshal_test.go rename to dag/marshal_test.go diff --git a/internal/dag/set.go b/dag/set.go similarity index 100% rename from internal/dag/set.go rename to dag/set.go diff --git a/internal/dag/set_test.go b/dag/set_test.go similarity index 100% rename from internal/dag/set_test.go rename to dag/set_test.go diff --git a/internal/dag/tarjan.go b/dag/tarjan.go similarity index 100% rename from internal/dag/tarjan.go rename to dag/tarjan.go diff --git a/internal/dag/tarjan_test.go b/dag/tarjan_test.go similarity index 100% rename from internal/dag/tarjan_test.go rename to dag/tarjan_test.go diff --git a/internal/dag/walk.go b/dag/walk.go similarity index 99% rename from internal/dag/walk.go rename to dag/walk.go index ff8afeac7c13..26b249230f33 100644 --- a/internal/dag/walk.go +++ b/dag/walk.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // Walker is used to walk every vertex of a graph in parallel. diff --git a/internal/dag/walk_test.go b/dag/walk_test.go similarity index 99% rename from internal/dag/walk_test.go rename to dag/walk_test.go index fc5844e2e154..5464248b13da 100644 --- a/internal/dag/walk_test.go +++ b/dag/walk_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) func TestWalker_basic(t *testing.T) { diff --git a/internal/depsfile/doc.go b/depsfile/doc.go similarity index 100% rename from internal/depsfile/doc.go rename to depsfile/doc.go diff --git a/internal/depsfile/locks.go b/depsfile/locks.go similarity index 99% rename from internal/depsfile/locks.go rename to depsfile/locks.go index 4997c4858f2b..fc84193baa08 100644 --- a/internal/depsfile/locks.go +++ b/depsfile/locks.go @@ -4,8 +4,8 @@ import ( "fmt" "sort" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) // Locks is the top-level type representing the information retained in a diff --git a/internal/depsfile/locks_file.go b/depsfile/locks_file.go similarity index 98% rename from internal/depsfile/locks_file.go rename to depsfile/locks_file.go index e619e06703c6..d9ad3552f861 100644 --- a/internal/depsfile/locks_file.go +++ b/depsfile/locks_file.go @@ -11,10 +11,10 @@ import ( "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/replacefile" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/replacefile" + "github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/terraform/version" ) diff --git a/internal/depsfile/locks_file_test.go b/depsfile/locks_file_test.go similarity index 98% rename from internal/depsfile/locks_file_test.go rename to depsfile/locks_file_test.go index 632aa71c7a07..21c08dda85b5 100644 --- a/internal/depsfile/locks_file_test.go +++ b/depsfile/locks_file_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/tfdiags" ) func TestLoadLocksFromFile(t *testing.T) { diff --git a/internal/depsfile/locks_test.go b/depsfile/locks_test.go similarity index 98% rename from internal/depsfile/locks_test.go rename to depsfile/locks_test.go index ce84d2e2e003..316e406eed36 100644 --- a/internal/depsfile/locks_test.go +++ b/depsfile/locks_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) func TestLocksEqual(t *testing.T) { diff --git a/internal/depsfile/paths.go b/depsfile/paths.go similarity index 100% rename from internal/depsfile/paths.go rename to depsfile/paths.go diff --git a/internal/depsfile/testdata/locks-files/empty.hcl b/depsfile/testdata/locks-files/empty.hcl similarity index 100% rename from internal/depsfile/testdata/locks-files/empty.hcl rename to depsfile/testdata/locks-files/empty.hcl diff --git a/internal/depsfile/testdata/locks-files/invalid-provider-addrs.hcl b/depsfile/testdata/locks-files/invalid-provider-addrs.hcl similarity index 100% rename from internal/depsfile/testdata/locks-files/invalid-provider-addrs.hcl rename to depsfile/testdata/locks-files/invalid-provider-addrs.hcl diff --git a/internal/depsfile/testdata/locks-files/invalid-versions.hcl b/depsfile/testdata/locks-files/invalid-versions.hcl similarity index 100% rename from internal/depsfile/testdata/locks-files/invalid-versions.hcl rename to depsfile/testdata/locks-files/invalid-versions.hcl diff --git a/internal/depsfile/testdata/locks-files/unsupported-block.hcl b/depsfile/testdata/locks-files/unsupported-block.hcl similarity index 100% rename from internal/depsfile/testdata/locks-files/unsupported-block.hcl rename to depsfile/testdata/locks-files/unsupported-block.hcl diff --git a/internal/depsfile/testdata/locks-files/valid-provider-locks.hcl b/depsfile/testdata/locks-files/valid-provider-locks.hcl similarity index 100% rename from internal/depsfile/testdata/locks-files/valid-provider-locks.hcl rename to depsfile/testdata/locks-files/valid-provider-locks.hcl diff --git a/internal/depsfile/testing.go b/depsfile/testing.go similarity index 100% rename from internal/depsfile/testing.go rename to depsfile/testing.go diff --git a/internal/didyoumean/name_suggestion.go b/didyoumean/name_suggestion.go similarity index 100% rename from internal/didyoumean/name_suggestion.go rename to didyoumean/name_suggestion.go diff --git a/internal/didyoumean/name_suggestion_test.go b/didyoumean/name_suggestion_test.go similarity index 100% rename from internal/didyoumean/name_suggestion_test.go rename to didyoumean/name_suggestion_test.go diff --git a/internal/e2e/e2e.go b/e2e/e2e.go similarity index 97% rename from internal/e2e/e2e.go rename to e2e/e2e.go index efdd2489a385..8167e412632c 100644 --- a/internal/e2e/e2e.go +++ b/e2e/e2e.go @@ -10,10 +10,10 @@ import ( "path/filepath" "testing" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" ) // Type binary represents the combination of a compiled binary diff --git a/earlyconfig/config.go b/earlyconfig/config.go new file mode 100644 index 000000000000..e5361f38ee03 --- /dev/null +++ b/earlyconfig/config.go @@ -0,0 +1,210 @@ +package earlyconfig + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/moduledeps" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/tfdiags" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *tfconfig.Module + + // CallPos is the source position for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallPos tfconfig.SourcePos + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr addrs.ModuleSource + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// ProviderRequirements searches the full tree of modules under the receiver +// for both explicit and implicit dependencies on providers. +// +// The result is a full manifest of all of the providers that must be available +// in order to work with the receiving configuration. +// +// If the returned diagnostics includes errors then the resulting Requirements +// may be incomplete. +func (c *Config) ProviderRequirements() (getproviders.Requirements, tfdiags.Diagnostics) { + reqs := make(getproviders.Requirements) + diags := c.addProviderRequirements(reqs) + return reqs, diags +} + +// addProviderRequirements is the main part of the ProviderRequirements +// implementation, gradually mutating a shared requirements object to +// eventually return. +func (c *Config) addProviderRequirements(reqs getproviders.Requirements) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // First we'll deal with the requirements directly in _our_ module... + for localName, providerReqs := range c.Module.RequiredProviders { + var fqn addrs.Provider + if source := providerReqs.Source; source != "" { + addr, moreDiags := addrs.ParseProviderSourceString(source) + if moreDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider source address", + fmt.Sprintf("Invalid source %q for provider %q in %s", source, localName, c.Path), + )) + continue + } + fqn = addr + } + if fqn.IsZero() { + fqn = addrs.ImpliedProviderForUnqualifiedType(localName) + } + if _, ok := reqs[fqn]; !ok { + // We'll at least have an unconstrained dependency then, but might + // add to this in the loop below. + reqs[fqn] = nil + } + for _, constraintsStr := range providerReqs.VersionConstraints { + if constraintsStr != "" { + constraints, err := getproviders.ParseVersionConstraints(constraintsStr) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider version constraint", + fmt.Sprintf("Provider %q in %s has invalid version constraint %q: %s.", localName, c.Path, constraintsStr, err), + )) + continue + } + reqs[fqn] = append(reqs[fqn], constraints...) + } + } + } + + // ...and now we'll recursively visit all of the child modules to merge + // in their requirements too. + for _, childConfig := range c.Children { + moreDiags := childConfig.addProviderRequirements(reqs) + diags = diags.Append(moreDiags) + } + + return diags +} + +// ProviderDependencies is a deprecated variant of ProviderRequirements which +// uses the moduledeps models for representation. This is preserved to allow +// a gradual transition over to ProviderRequirements, but note that its +// support for fully-qualified provider addresses has some idiosyncracies. +func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var name string + if len(c.Path) > 0 { + name = c.Path[len(c.Path)-1] + } + + ret := &moduledeps.Module{ + Name: name, + } + + providers := make(moduledeps.Providers) + for name, reqs := range c.Module.RequiredProviders { + var fqn addrs.Provider + if source := reqs.Source; source != "" { + addr, parseDiags := addrs.ParseProviderSourceString(source) + if parseDiags.HasErrors() { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid provider source", + Detail: fmt.Sprintf("Invalid source %q for provider", name), + })) + continue + } + fqn = addr + } + if fqn.IsZero() { + fqn = addrs.NewDefaultProvider(name) + } + var constraints version.Constraints + for _, reqStr := range reqs.VersionConstraints { + if reqStr != "" { + constraint, err := version.NewConstraint(reqStr) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf("Invalid version constraint %q for provider %s.", reqStr, fqn.String()), + })) + continue + } + constraints = append(constraints, constraint...) + } + } + providers[fqn] = moduledeps.ProviderDependency{ + Constraints: discovery.NewConstraints(constraints), + Reason: moduledeps.ProviderDependencyExplicit, + } + } + ret.Providers = providers + + childNames := make([]string, 0, len(c.Children)) + for name := range c.Children { + childNames = append(childNames, name) + } + sort.Strings(childNames) + + for _, name := range childNames { + child, childDiags := c.Children[name].ProviderDependencies() + ret.Children = append(ret.Children, child) + diags = diags.Append(childDiags) + } + + return ret, diags +} diff --git a/earlyconfig/config_build.go b/earlyconfig/config_build.go new file mode 100644 index 000000000000..cd100fe7fa87 --- /dev/null +++ b/earlyconfig/config_build.go @@ -0,0 +1,173 @@ +package earlyconfig + +import ( + "fmt" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := map[string]*Config{} + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + var vc version.Constraints + haveVersionArg := false + if strings.TrimSpace(call.Version) != "" { + haveVersionArg = true + + var err error + vc, err = version.NewConstraint(call.Version) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), + })) + continue + } + } + + var sourceAddr addrs.ModuleSource + var err error + if haveVersionArg { + sourceAddr, err = addrs.ParseModuleSourceRegistry(call.Source) + } else { + sourceAddr, err = addrs.ParseModuleSource(call.Source) + } + if err != nil { + if haveVersionArg { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid registry module source address", + Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid source address %q: %s.\n\nTerraform assumed that you intended a module registry source address because you also set the argument \"version\", which applies only to registry modules.", callName, call.Pos.Filename, call.Pos.Line, call.Source, err), + })) + } else { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid module source address", + Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid source address %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Source, err), + })) + } + // If we didn't have a valid source address then we can't continue + // down the module tree with this one. + continue + } + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: sourceAddr, + VersionConstraints: vc, + Parent: parent, + CallPos: call.Pos, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallPos: call.Pos, + SourceAddr: sourceAddr, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + diags = diags.Append(modDiags) + + ret[call.Name] = child + } + + return ret, diags +} + +// ModuleRequest is used as part of the ModuleWalker interface used with +// function BuildConfig. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr addrs.ModuleSource + + // VersionConstraint is the version constraint applied to the module in + // configuration. + VersionConstraints version.Constraints + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source position for the header of the "module" block + // in configuration that prompted this request. + CallPos tfconfig.SourcePos +} + +// ModuleWalker is an interface used with BuildConfig. +type ModuleWalker interface { + LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) + +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + return f(req) +} diff --git a/earlyconfig/config_test.go b/earlyconfig/config_test.go new file mode 100644 index 000000000000..cd0e85193d6b --- /dev/null +++ b/earlyconfig/config_test.go @@ -0,0 +1,84 @@ +package earlyconfig + +import ( + "log" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestConfigProviderRequirements(t *testing.T) { + cfg := testConfig(t, "testdata/provider-reqs") + + impliedProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "implied", + ) + nullProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "null", + ) + randomProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "random", + ) + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + happycloudProvider := addrs.NewProvider( + svchost.Hostname("tf.example.com"), + "awesomecorp", "happycloud", + ) + + got, diags := cfg.ProviderRequirements() + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags.Err().Error()) + } + want := getproviders.Requirements{ + // the nullProvider constraints from the two modules are merged + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + impliedProvider: nil, + happycloudProvider: nil, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func testConfig(t *testing.T, baseDir string) *Config { + rootMod, diags := LoadModule(baseDir) + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags.Err().Error()) + } + + cfg, diags := BuildConfig(rootMod, ModuleWalkerFunc(testModuleWalkerFunc)) + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags.Err().Error()) + } + + return cfg +} + +// testModuleWalkerFunc is a simple implementation of ModuleWalkerFunc that +// only understands how to resolve relative filesystem paths, using source +// location information from the call. +func testModuleWalkerFunc(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + callFilename := req.CallPos.Filename + sourcePath := req.SourceAddr.String() + finalPath := filepath.Join(filepath.Dir(callFilename), sourcePath) + log.Printf("[TRACE] %s in %s -> %s", sourcePath, callFilename, finalPath) + + newMod, diags := LoadModule(finalPath) + return newMod, version.Must(version.NewVersion("0.0.0")), diags +} diff --git a/earlyconfig/diagnostics.go b/earlyconfig/diagnostics.go new file mode 100644 index 000000000000..fd451f71df37 --- /dev/null +++ b/earlyconfig/diagnostics.go @@ -0,0 +1,82 @@ +package earlyconfig + +import ( + "fmt" + + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/tfdiags" +) + +func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { + ret := make(tfdiags.Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = wrapDiagnostic(diag) + } + return ret +} + +func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { + return wrappedDiagnostic{ + d: diag, + } +} + +type wrappedDiagnostic struct { + d tfconfig.Diagnostic +} + +func (d wrappedDiagnostic) Severity() tfdiags.Severity { + switch d.d.Severity { + case tfconfig.DiagError: + return tfdiags.Error + case tfconfig.DiagWarning: + return tfdiags.Warning + default: + // Should never happen since there are no other severities + return 0 + } +} + +func (d wrappedDiagnostic) Description() tfdiags.Description { + // Since the inspect library doesn't produce precise source locations, + // we include the position information as part of the error message text. + // See the comment inside method "Source" for more information. + switch { + case d.d.Pos == nil: + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: d.d.Detail, + } + case d.d.Detail != "": + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), + } + default: + return tfdiags.Description{ + Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), + } + } +} + +func (d wrappedDiagnostic) Source() tfdiags.Source { + // Since the inspect library is constrained by the lowest common denominator + // between legacy HCL and modern HCL, it only returns ranges at whole-line + // granularity, and that isn't sufficient to populate a tfdiags.Source + // and so we'll just omit ranges altogether and include the line number in + // the Description text. + // + // Callers that want to return nicer errors should consider reacting to + // earlyconfig errors by attempting a follow-up parse with the normal + // config loader, which can produce more precise source location + // information. + return tfdiags.Source{} +} + +func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { + return nil +} + +func (d wrappedDiagnostic) ExtraInfo() interface{} { + return nil +} diff --git a/internal/earlyconfig/doc.go b/earlyconfig/doc.go similarity index 100% rename from internal/earlyconfig/doc.go rename to earlyconfig/doc.go diff --git a/earlyconfig/module.go b/earlyconfig/module.go new file mode 100644 index 000000000000..d2d628797ae0 --- /dev/null +++ b/earlyconfig/module.go @@ -0,0 +1,13 @@ +package earlyconfig + +import ( + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/tfdiags" +) + +// LoadModule loads some top-level metadata for the module in the given +// directory. +func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { + mod, diags := tfconfig.LoadModule(dir) + return mod, wrapDiagnostics(diags) +} diff --git a/internal/earlyconfig/testdata/provider-reqs/child/provider-reqs-child.tf b/earlyconfig/testdata/provider-reqs/child/provider-reqs-child.tf similarity index 100% rename from internal/earlyconfig/testdata/provider-reqs/child/provider-reqs-child.tf rename to earlyconfig/testdata/provider-reqs/child/provider-reqs-child.tf diff --git a/internal/earlyconfig/testdata/provider-reqs/provider-reqs-root.tf b/earlyconfig/testdata/provider-reqs/provider-reqs-root.tf similarity index 100% rename from internal/earlyconfig/testdata/provider-reqs/provider-reqs-root.tf rename to earlyconfig/testdata/provider-reqs/provider-reqs-root.tf diff --git a/internal/experiments/doc.go b/experiments/doc.go similarity index 100% rename from internal/experiments/doc.go rename to experiments/doc.go diff --git a/internal/experiments/errors.go b/experiments/errors.go similarity index 100% rename from internal/experiments/errors.go rename to experiments/errors.go diff --git a/internal/experiments/experiment.go b/experiments/experiment.go similarity index 100% rename from internal/experiments/experiment.go rename to experiments/experiment.go diff --git a/internal/experiments/set.go b/experiments/set.go similarity index 100% rename from internal/experiments/set.go rename to experiments/set.go diff --git a/internal/experiments/testing.go b/experiments/testing.go similarity index 100% rename from internal/experiments/testing.go rename to experiments/testing.go diff --git a/internal/getmodules/doc.go b/getmodules/doc.go similarity index 100% rename from internal/getmodules/doc.go rename to getmodules/doc.go diff --git a/internal/getmodules/file_detector.go b/getmodules/file_detector.go similarity index 100% rename from internal/getmodules/file_detector.go rename to getmodules/file_detector.go diff --git a/internal/getmodules/getter.go b/getmodules/getter.go similarity index 99% rename from internal/getmodules/getter.go rename to getmodules/getter.go index 82ea599afcb9..70f2cd88df7d 100644 --- a/internal/getmodules/getter.go +++ b/getmodules/getter.go @@ -8,7 +8,7 @@ import ( cleanhttp "github.com/hashicorp/go-cleanhttp" getter "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform/internal/copy" + "github.com/hashicorp/terraform/copy" ) // We configure our own go-getter detector and getter sets here, because diff --git a/internal/getmodules/installer.go b/getmodules/installer.go similarity index 100% rename from internal/getmodules/installer.go rename to getmodules/installer.go diff --git a/internal/getmodules/package.go b/getmodules/package.go similarity index 100% rename from internal/getmodules/package.go rename to getmodules/package.go diff --git a/internal/getmodules/subdir.go b/getmodules/subdir.go similarity index 100% rename from internal/getmodules/subdir.go rename to getmodules/subdir.go diff --git a/internal/getproviders/didyoumean.go b/getproviders/didyoumean.go similarity index 99% rename from internal/getproviders/didyoumean.go rename to getproviders/didyoumean.go index e31ba20e4195..f23fedbebe64 100644 --- a/internal/getproviders/didyoumean.go +++ b/getproviders/didyoumean.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/go-retryablehttp" svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // MissingProviderSuggestion takes a provider address that failed installation diff --git a/internal/getproviders/didyoumean_test.go b/getproviders/didyoumean_test.go similarity index 99% rename from internal/getproviders/didyoumean_test.go rename to getproviders/didyoumean_test.go index 18804a9a6f39..85bc4582cbc2 100644 --- a/internal/getproviders/didyoumean_test.go +++ b/getproviders/didyoumean_test.go @@ -5,7 +5,7 @@ import ( "testing" svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestMissingProviderSuggestion(t *testing.T) { diff --git a/internal/getproviders/doc.go b/getproviders/doc.go similarity index 100% rename from internal/getproviders/doc.go rename to getproviders/doc.go diff --git a/getproviders/errors.go b/getproviders/errors.go new file mode 100644 index 000000000000..7ff7f39c0a2c --- /dev/null +++ b/getproviders/errors.go @@ -0,0 +1,246 @@ +package getproviders + +import ( + "fmt" + "net/url" + + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/hashicorp/terraform/addrs" +) + +// ErrHostNoProviders is an error type used to indicate that a hostname given +// in a provider address does not support the provider registry protocol. +type ErrHostNoProviders struct { + Hostname svchost.Hostname + + // HasOtherVersionis set to true if the discovery process detected + // declarations of services named "providers" whose version numbers did not + // match any version supported by the current version of Terraform. + // + // If this is set, it's helpful to hint to the user in an error message + // that the provider host may be expecting an older or a newer version + // of Terraform, rather than that it isn't a provider registry host at all. + HasOtherVersion bool +} + +func (err ErrHostNoProviders) Error() string { + switch { + case err.HasOtherVersion: + return fmt.Sprintf("host %s does not support the provider registry protocol required by this Terraform version, but may be compatible with a different Terraform version", err.Hostname.ForDisplay()) + default: + return fmt.Sprintf("host %s does not offer a Terraform provider registry", err.Hostname.ForDisplay()) + } +} + +// ErrHostUnreachable is an error type used to indicate that a hostname +// given in a provider address did not resolve in DNS, did not respond to an +// HTTPS request for service discovery, or otherwise failed to correctly speak +// the service discovery protocol. +type ErrHostUnreachable struct { + Hostname svchost.Hostname + Wrapped error +} + +func (err ErrHostUnreachable) Error() string { + return fmt.Sprintf("could not connect to %s: %s", err.Hostname.ForDisplay(), err.Wrapped.Error()) +} + +// Unwrap returns the underlying error that occurred when trying to reach the +// indicated host. +func (err ErrHostUnreachable) Unwrap() error { + return err.Wrapped +} + +// ErrUnauthorized is an error type used to indicate that a hostname +// given in a provider address returned a "401 Unauthorized" or "403 Forbidden" +// error response when we tried to access it. +type ErrUnauthorized struct { + Hostname svchost.Hostname + + // HaveCredentials is true when the request that failed included some + // credentials, and thus it seems that those credentials were invalid. + // Conversely, HaveCredentials is false if the request did not include + // credentials at all, in which case it seems that credentials must be + // provided. + HaveCredentials bool +} + +func (err ErrUnauthorized) Error() string { + switch { + case err.HaveCredentials: + return fmt.Sprintf("host %s rejected the given authentication credentials", err.Hostname) + default: + return fmt.Sprintf("host %s requires authentication credentials", err.Hostname) + } +} + +// ErrProviderNotFound is an error type used to indicate that requested provider +// was not found in the source(s) included in the Description field. This can be +// used to produce user-friendly error messages. +type ErrProviderNotFound struct { + Provider addrs.Provider + Sources []string +} + +func (err ErrProviderNotFound) Error() string { + return fmt.Sprintf( + "provider %s was not found in any of the search locations", + err.Provider, + ) +} + +// ErrRegistryProviderNotKnown is an error type used to indicate that the hostname +// given in a provider address does appear to be a provider registry but that +// registry does not know about the given provider namespace or type. +// +// A caller serving requests from an end-user should recognize this error type +// and use it to produce user-friendly hints for common errors such as failing +// to specify an explicit source for a provider not in the default namespace +// (one not under registry.terraform.io/hashicorp/). The default error message +// for this type is a direct description of the problem with no such hints, +// because we expect that the caller will have better context to decide what +// hints are appropriate, e.g. by looking at the configuration given by the +// user. +type ErrRegistryProviderNotKnown struct { + Provider addrs.Provider +} + +func (err ErrRegistryProviderNotKnown) Error() string { + return fmt.Sprintf( + "provider registry %s does not have a provider named %s", + err.Provider.Hostname.ForDisplay(), + err.Provider, + ) +} + +// ErrPlatformNotSupported is an error type used to indicate that a particular +// version of a provider isn't available for a particular target platform. +// +// This is returned when DownloadLocation encounters a 404 Not Found response +// from the underlying registry, because it presumes that a caller will only +// ask for the DownloadLocation for a version it already found the existence +// of via AvailableVersions. +type ErrPlatformNotSupported struct { + Provider addrs.Provider + Version Version + Platform Platform + + // MirrorURL, if non-nil, is the base URL of the mirror that serviced + // the request in place of the provider's origin registry. MirrorURL + // is nil for a direct query. + MirrorURL *url.URL +} + +func (err ErrPlatformNotSupported) Error() string { + if err.MirrorURL != nil { + return fmt.Sprintf( + "provider mirror %s does not have a package of %s %s for %s", + err.MirrorURL.String(), + err.Provider, + err.Version, + err.Platform, + ) + } + return fmt.Sprintf( + "provider %s %s is not available for %s", + err.Provider, + err.Version, + err.Platform, + ) +} + +// ErrProtocolNotSupported is an error type used to indicate that a particular +// version of a provider is not supported by the current version of Terraform. +// +// Specfically, this is returned when the version's plugin protocol is not supported. +// +// When available, the error will include a suggested version that can be displayed to +// the user. Otherwise it will return UnspecifiedVersion +type ErrProtocolNotSupported struct { + Provider addrs.Provider + Version Version + Suggestion Version +} + +func (err ErrProtocolNotSupported) Error() string { + return fmt.Sprintf( + "provider %s %s is not supported by this version of terraform", + err.Provider, + err.Version, + ) +} + +// ErrQueryFailed is an error type used to indicate that the hostname given +// in a provider address does appear to be a provider registry but that when +// we queried it for metadata for the given provider the server returned an +// unexpected error. +// +// This is used for any error responses other than "Not Found", which would +// indicate the absense of a provider and is thus reported using +// ErrProviderNotKnown instead. +type ErrQueryFailed struct { + Provider addrs.Provider + Wrapped error + + // MirrorURL, if non-nil, is the base URL of the mirror that serviced + // the request in place of the provider's origin registry. MirrorURL + // is nil for a direct query. + MirrorURL *url.URL +} + +func (err ErrQueryFailed) Error() string { + if err.MirrorURL != nil { + return fmt.Sprintf( + "failed to query provider mirror %s for %s: %s", + err.MirrorURL.String(), + err.Provider.String(), + err.Wrapped.Error(), + ) + } + return fmt.Sprintf( + "could not query provider registry for %s: %s", + err.Provider.String(), + err.Wrapped.Error(), + ) +} + +// Unwrap returns the underlying error that occurred when trying to reach the +// indicated host. +func (err ErrQueryFailed) Unwrap() error { + return err.Wrapped +} + +// ErrRequestCanceled is an error type used to indicate that an operation +// failed due to being cancelled via the given context.Context object. +// +// This error type doesn't include information about what was cancelled, +// because the expected treatment of this error type is to quickly abort and +// exit with minimal ceremony. +type ErrRequestCanceled struct { +} + +func (err ErrRequestCanceled) Error() string { + return "request canceled" +} + +// ErrIsNotExist returns true if and only if the given error is one of the +// errors from this package that represents an affirmative response that a +// requested object does not exist. +// +// This is as opposed to errors indicating that the source is unavailable +// or misconfigured in some way, where we therefore cannot say for certain +// whether the requested object exists. +// +// If a caller needs to take a special action based on something not existing, +// such as falling back on some other source, use this function rather than +// direct type assertions so that the set of possible "not exist" errors can +// grow in future. +func ErrIsNotExist(err error) bool { + switch err.(type) { + case ErrProviderNotFound, ErrRegistryProviderNotKnown, ErrPlatformNotSupported: + return true + default: + return false + } +} diff --git a/internal/getproviders/filesystem_mirror_source.go b/getproviders/filesystem_mirror_source.go similarity index 98% rename from internal/getproviders/filesystem_mirror_source.go rename to getproviders/filesystem_mirror_source.go index 118aff208f32..b611a5666137 100644 --- a/internal/getproviders/filesystem_mirror_source.go +++ b/getproviders/filesystem_mirror_source.go @@ -3,7 +3,7 @@ package getproviders import ( "context" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // FilesystemMirrorSource is a source that reads providers and their metadata diff --git a/internal/getproviders/filesystem_mirror_source_test.go b/getproviders/filesystem_mirror_source_test.go similarity index 99% rename from internal/getproviders/filesystem_mirror_source_test.go rename to getproviders/filesystem_mirror_source_test.go index f498b81cc4e3..ea73eefe4a28 100644 --- a/internal/getproviders/filesystem_mirror_source_test.go +++ b/getproviders/filesystem_mirror_source_test.go @@ -8,7 +8,7 @@ import ( "github.com/google/go-cmp/cmp" svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestFilesystemMirrorSourceAllAvailablePackages(t *testing.T) { diff --git a/internal/getproviders/filesystem_search.go b/getproviders/filesystem_search.go similarity index 99% rename from internal/getproviders/filesystem_search.go rename to getproviders/filesystem_search.go index 2ae727293fea..690c79f5efb1 100644 --- a/internal/getproviders/filesystem_search.go +++ b/getproviders/filesystem_search.go @@ -9,7 +9,7 @@ import ( svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // SearchLocalDirectory performs an immediate, one-off scan of the given base diff --git a/internal/getproviders/filesystem_search_test.go b/getproviders/filesystem_search_test.go similarity index 96% rename from internal/getproviders/filesystem_search_test.go rename to getproviders/filesystem_search_test.go index 37ced6ff5966..8577065569ed 100644 --- a/internal/getproviders/filesystem_search_test.go +++ b/getproviders/filesystem_search_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestSearchLocalDirectory(t *testing.T) { diff --git a/internal/getproviders/hanging_source.go b/getproviders/hanging_source.go similarity index 93% rename from internal/getproviders/hanging_source.go rename to getproviders/hanging_source.go index 388b617013f2..f0ae802a4ebf 100644 --- a/internal/getproviders/hanging_source.go +++ b/getproviders/hanging_source.go @@ -3,7 +3,7 @@ package getproviders import ( "context" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // HangingSource is an implementation of Source which hangs until the given diff --git a/internal/getproviders/hash.go b/getproviders/hash.go similarity index 100% rename from internal/getproviders/hash.go rename to getproviders/hash.go diff --git a/internal/getproviders/hash_test.go b/getproviders/hash_test.go similarity index 100% rename from internal/getproviders/hash_test.go rename to getproviders/hash_test.go diff --git a/internal/getproviders/http_mirror_source.go b/getproviders/http_mirror_source.go similarity index 98% rename from internal/getproviders/http_mirror_source.go rename to getproviders/http_mirror_source.go index 82f890a763fc..ccca3b95d6a0 100644 --- a/internal/getproviders/http_mirror_source.go +++ b/getproviders/http_mirror_source.go @@ -17,9 +17,9 @@ import ( svcauth "github.com/hashicorp/terraform-svchost/auth" "golang.org/x/net/idna" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/logging" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/logging" "github.com/hashicorp/terraform/version" ) diff --git a/internal/getproviders/http_mirror_source_test.go b/getproviders/http_mirror_source_test.go similarity index 99% rename from internal/getproviders/http_mirror_source_test.go rename to getproviders/http_mirror_source_test.go index 3bf8a004aa5d..00aff1462c81 100644 --- a/internal/getproviders/http_mirror_source_test.go +++ b/getproviders/http_mirror_source_test.go @@ -12,7 +12,7 @@ import ( svchost "github.com/hashicorp/terraform-svchost" svcauth "github.com/hashicorp/terraform-svchost/auth" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestHTTPMirrorSource(t *testing.T) { diff --git a/internal/getproviders/memoize_source.go b/getproviders/memoize_source.go similarity index 98% rename from internal/getproviders/memoize_source.go rename to getproviders/memoize_source.go index 2930d5a18d6d..942aa32d395a 100644 --- a/internal/getproviders/memoize_source.go +++ b/getproviders/memoize_source.go @@ -4,7 +4,7 @@ import ( "context" "sync" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // MemoizeSource is a Source that wraps another Source and remembers its diff --git a/internal/getproviders/memoize_source_test.go b/getproviders/memoize_source_test.go similarity index 99% rename from internal/getproviders/memoize_source_test.go rename to getproviders/memoize_source_test.go index 006602b34554..ea331ea34604 100644 --- a/internal/getproviders/memoize_source_test.go +++ b/getproviders/memoize_source_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestMemoizeSource(t *testing.T) { diff --git a/internal/getproviders/mock_source.go b/getproviders/mock_source.go similarity index 99% rename from internal/getproviders/mock_source.go rename to getproviders/mock_source.go index 930cbe313dde..32a99c1f19b3 100644 --- a/internal/getproviders/mock_source.go +++ b/getproviders/mock_source.go @@ -9,7 +9,7 @@ import ( "io/ioutil" "os" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // MockSource is an in-memory-only, statically-configured source intended for diff --git a/internal/getproviders/multi_source.go b/getproviders/multi_source.go similarity index 99% rename from internal/getproviders/multi_source.go rename to getproviders/multi_source.go index bcec76e8ffb7..c347fbabd672 100644 --- a/internal/getproviders/multi_source.go +++ b/getproviders/multi_source.go @@ -7,7 +7,7 @@ import ( svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // MultiSource is a Source that wraps a series of other sources and combines diff --git a/internal/getproviders/multi_source_test.go b/getproviders/multi_source_test.go similarity index 99% rename from internal/getproviders/multi_source_test.go rename to getproviders/multi_source_test.go index f78fb519c572..027b80875e38 100644 --- a/internal/getproviders/multi_source_test.go +++ b/getproviders/multi_source_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestMultiSourceAvailableVersions(t *testing.T) { diff --git a/internal/getproviders/package_authentication.go b/getproviders/package_authentication.go similarity index 100% rename from internal/getproviders/package_authentication.go rename to getproviders/package_authentication.go diff --git a/internal/getproviders/package_authentication_test.go b/getproviders/package_authentication_test.go similarity index 100% rename from internal/getproviders/package_authentication_test.go rename to getproviders/package_authentication_test.go diff --git a/internal/getproviders/public_keys.go b/getproviders/public_keys.go similarity index 100% rename from internal/getproviders/public_keys.go rename to getproviders/public_keys.go diff --git a/internal/getproviders/registry_client.go b/getproviders/registry_client.go similarity index 99% rename from internal/getproviders/registry_client.go rename to getproviders/registry_client.go index 5da2a83ca2b2..b0eec2d04157 100644 --- a/internal/getproviders/registry_client.go +++ b/getproviders/registry_client.go @@ -20,9 +20,9 @@ import ( svchost "github.com/hashicorp/terraform-svchost" svcauth "github.com/hashicorp/terraform-svchost/auth" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/logging" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/logging" "github.com/hashicorp/terraform/version" ) diff --git a/internal/getproviders/registry_client_test.go b/getproviders/registry_client_test.go similarity index 99% rename from internal/getproviders/registry_client_test.go rename to getproviders/registry_client_test.go index 85fe00aa8f92..17d14f06efe9 100644 --- a/internal/getproviders/registry_client_test.go +++ b/getproviders/registry_client_test.go @@ -16,7 +16,7 @@ import ( "github.com/google/go-cmp/cmp" svchost "github.com/hashicorp/terraform-svchost" disco "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestConfigureDiscoveryRetry(t *testing.T) { diff --git a/internal/getproviders/registry_source.go b/getproviders/registry_source.go similarity index 99% rename from internal/getproviders/registry_source.go rename to getproviders/registry_source.go index e48e043f7161..e227438c599a 100644 --- a/internal/getproviders/registry_source.go +++ b/getproviders/registry_source.go @@ -7,7 +7,7 @@ import ( svchost "github.com/hashicorp/terraform-svchost" disco "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // RegistrySource is a Source that knows how to find and install providers from diff --git a/internal/getproviders/registry_source_test.go b/getproviders/registry_source_test.go similarity index 99% rename from internal/getproviders/registry_source_test.go rename to getproviders/registry_source_test.go index d55d1fff1a7a..b180c3b42c13 100644 --- a/internal/getproviders/registry_source_test.go +++ b/getproviders/registry_source_test.go @@ -11,7 +11,7 @@ import ( "github.com/google/go-cmp/cmp" svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestSourceAvailableVersions(t *testing.T) { diff --git a/internal/getproviders/source.go b/getproviders/source.go similarity index 89% rename from internal/getproviders/source.go rename to getproviders/source.go index b8543d8efd80..905bec6202eb 100644 --- a/internal/getproviders/source.go +++ b/getproviders/source.go @@ -3,7 +3,7 @@ package getproviders import ( "context" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // A Source can query a particular source for information about providers diff --git a/internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null b/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null rename to getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null diff --git a/internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null b/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null rename to getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null diff --git a/internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/invalid b/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/invalid similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/invalid rename to getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/invalid diff --git a/internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip b/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip rename to getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip diff --git a/internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid.zip b/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid.zip similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid.zip rename to getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid.zip diff --git a/internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip b/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip rename to getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip diff --git a/internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta b/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta rename to getproviders/testdata/filesystem-mirror-invalid/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta diff --git a/internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random b/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random rename to getproviders/testdata/filesystem-mirror-invalid/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy b/getproviders/testdata/filesystem-mirror/registry.terraform.io/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null b/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null b/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/invalid b/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/invalid similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/invalid rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/invalid diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip b/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid.zip b/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid.zip similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid.zip rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid.zip diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip b/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta b/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random b/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random rename to getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random diff --git a/internal/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt b/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt rename to getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt diff --git a/internal/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud b/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud similarity index 100% rename from internal/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud rename to getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud diff --git a/internal/getproviders/testdata/search-local-directory/symlinks/real/example.com/foo/bar/1.0.0/linux_amd64/terraform-provider-bar b/getproviders/testdata/search-local-directory/symlinks/real/example.com/foo/bar/1.0.0/linux_amd64/terraform-provider-bar similarity index 100% rename from internal/getproviders/testdata/search-local-directory/symlinks/real/example.com/foo/bar/1.0.0/linux_amd64/terraform-provider-bar rename to getproviders/testdata/search-local-directory/symlinks/real/example.com/foo/bar/1.0.0/linux_amd64/terraform-provider-bar diff --git a/internal/getproviders/testdata/search-local-directory/symlinks/real/example.net b/getproviders/testdata/search-local-directory/symlinks/real/example.net similarity index 100% rename from internal/getproviders/testdata/search-local-directory/symlinks/real/example.net rename to getproviders/testdata/search-local-directory/symlinks/real/example.net diff --git a/internal/getproviders/testdata/search-local-directory/symlinks/symlink b/getproviders/testdata/search-local-directory/symlinks/symlink similarity index 100% rename from internal/getproviders/testdata/search-local-directory/symlinks/symlink rename to getproviders/testdata/search-local-directory/symlinks/symlink diff --git a/internal/getproviders/types.go b/getproviders/types.go similarity index 99% rename from internal/getproviders/types.go rename to getproviders/types.go index 28b1913d67a2..951d6f0961fd 100644 --- a/internal/getproviders/types.go +++ b/getproviders/types.go @@ -9,7 +9,7 @@ import ( "github.com/apparentlymart/go-versions/versions" "github.com/apparentlymart/go-versions/versions/constraints" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // Version represents a particular single version of a provider. diff --git a/internal/getproviders/types_test.go b/getproviders/types_test.go similarity index 100% rename from internal/getproviders/types_test.go rename to getproviders/types_test.go diff --git a/grpcwrap/provider.go b/grpcwrap/provider.go new file mode 100644 index 000000000000..9b9719b179be --- /dev/null +++ b/grpcwrap/provider.go @@ -0,0 +1,419 @@ +package grpcwrap + +import ( + "context" + + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfplugin5" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" +) + +// New wraps a providers.Interface to implement a grpc ProviderServer. +// This is useful for creating a test binary out of an internal provider +// implementation. +func Provider(p providers.Interface) tfplugin5.ProviderServer { + return &provider{ + provider: p, + schema: p.GetProviderSchema(), + } +} + +type provider struct { + provider providers.Interface + schema providers.GetProviderSchemaResponse +} + +func (p *provider) GetSchema(_ context.Context, req *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { + resp := &tfplugin5.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*tfplugin5.Schema), + DataSourceSchemas: make(map[string]*tfplugin5.Schema), + } + + resp.Provider = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + if p.schema.Provider.Block != nil { + resp.Provider.Block = convert.ConfigSchemaToProto(p.schema.Provider.Block) + } + + resp.ProviderMeta = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + if p.schema.ProviderMeta.Block != nil { + resp.ProviderMeta.Block = convert.ConfigSchemaToProto(p.schema.ProviderMeta.Block) + } + + for typ, res := range p.schema.ResourceTypes { + resp.ResourceSchemas[typ] = &tfplugin5.Schema{ + Version: res.Version, + Block: convert.ConfigSchemaToProto(res.Block), + } + } + for typ, dat := range p.schema.DataSources { + resp.DataSourceSchemas[typ] = &tfplugin5.Schema{ + Version: dat.Version, + Block: convert.ConfigSchemaToProto(dat.Block), + } + } + + resp.ServerCapabilities = &tfplugin5.GetProviderSchema_ServerCapabilities{ + PlanDestroy: p.schema.ServerCapabilities.PlanDestroy, + } + + // include any diagnostics from the original GetSchema call + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, p.schema.Diagnostics) + + return resp, nil +} + +func (p *provider) PrepareProviderConfig(_ context.Context, req *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { + resp := &tfplugin5.PrepareProviderConfig_Response{} + ty := p.schema.Provider.Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + prepareResp := p.provider.ValidateProviderConfig(providers.ValidateProviderConfigRequest{ + Config: configVal, + }) + + // the PreparedConfig value is no longer used + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, prepareResp.Diagnostics) + return resp, nil +} + +func (p *provider) ValidateResourceTypeConfig(_ context.Context, req *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { + resp := &tfplugin5.ValidateResourceTypeConfig_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provider.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ + TypeName: req.TypeName, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provider) ValidateDataSourceConfig(_ context.Context, req *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { + resp := &tfplugin5.ValidateDataSourceConfig_Response{} + ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provider.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ + TypeName: req.TypeName, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provider) UpgradeResourceState(_ context.Context, req *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { + resp := &tfplugin5.UpgradeResourceState_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + upgradeResp := p.provider.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: req.TypeName, + Version: req.Version, + RawStateJSON: req.RawState.Json, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, upgradeResp.Diagnostics) + if upgradeResp.Diagnostics.HasErrors() { + return resp, nil + } + + dv, err := encodeDynamicValue(upgradeResp.UpgradedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = dv + + return resp, nil +} + +func (p *provider) Configure(_ context.Context, req *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { + resp := &tfplugin5.Configure_Response{} + ty := p.schema.Provider.Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configureResp := p.provider.ConfigureProvider(providers.ConfigureProviderRequest{ + TerraformVersion: req.TerraformVersion, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, configureResp.Diagnostics) + return resp, nil +} + +func (p *provider) ReadResource(_ context.Context, req *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { + resp := &tfplugin5.ReadResource_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + stateVal, err := decodeDynamicValue(req.CurrentState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + readResp := p.provider.ReadResource(providers.ReadResourceRequest{ + TypeName: req.TypeName, + PriorState: stateVal, + Private: req.Private, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) + if readResp.Diagnostics.HasErrors() { + return resp, nil + } + resp.Private = readResp.Private + + dv, err := encodeDynamicValue(readResp.NewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = dv + + return resp, nil +} + +func (p *provider) PlanResourceChange(_ context.Context, req *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { + resp := &tfplugin5.PlanResourceChange_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + priorStateVal, err := decodeDynamicValue(req.PriorState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + proposedStateVal, err := decodeDynamicValue(req.ProposedNewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + planResp := p.provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: req.TypeName, + PriorState: priorStateVal, + ProposedNewState: proposedStateVal, + Config: configVal, + PriorPrivate: req.PriorPrivate, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, planResp.Diagnostics) + if planResp.Diagnostics.HasErrors() { + return resp, nil + } + + resp.PlannedPrivate = planResp.PlannedPrivate + + resp.PlannedState, err = encodeDynamicValue(planResp.PlannedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, path := range planResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.PathToAttributePath(path)) + } + + return resp, nil +} + +func (p *provider) ApplyResourceChange(_ context.Context, req *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { + resp := &tfplugin5.ApplyResourceChange_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + priorStateVal, err := decodeDynamicValue(req.PriorState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := decodeDynamicValue(req.PlannedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + applyResp := p.provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: req.TypeName, + PriorState: priorStateVal, + PlannedState: plannedStateVal, + Config: configVal, + PlannedPrivate: req.PlannedPrivate, + ProviderMeta: metaVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, applyResp.Diagnostics) + if applyResp.Diagnostics.HasErrors() { + return resp, nil + } + resp.Private = applyResp.Private + + resp.NewState, err = encodeDynamicValue(applyResp.NewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + return resp, nil +} + +func (p *provider) ImportResourceState(_ context.Context, req *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { + resp := &tfplugin5.ImportResourceState_Response{} + + importResp := p.provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: req.TypeName, + ID: req.Id, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, importResp.Diagnostics) + + for _, res := range importResp.ImportedResources { + ty := p.schema.ResourceTypes[res.TypeName].Block.ImpliedType() + state, err := encodeDynamicValue(res.State, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + continue + } + + resp.ImportedResources = append(resp.ImportedResources, &tfplugin5.ImportResourceState_ImportedResource{ + TypeName: res.TypeName, + State: state, + Private: res.Private, + }) + } + + return resp, nil +} + +func (p *provider) ReadDataSource(_ context.Context, req *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { + resp := &tfplugin5.ReadDataSource_Response{} + ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + readResp := p.provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: req.TypeName, + Config: configVal, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) + if readResp.Diagnostics.HasErrors() { + return resp, nil + } + + resp.State, err = encodeDynamicValue(readResp.State, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + return resp, nil +} + +func (p *provider) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { + resp := &tfplugin5.Stop_Response{} + err := p.provider.Stop() + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +// decode a DynamicValue from either the JSON or MsgPack encoding. +func decodeDynamicValue(v *tfplugin5.DynamicValue, ty cty.Type) (cty.Value, error) { + // always return a valid value + var err error + res := cty.NullVal(ty) + if v == nil { + return res, nil + } + + switch { + case len(v.Msgpack) > 0: + res, err = msgpack.Unmarshal(v.Msgpack, ty) + case len(v.Json) > 0: + res, err = ctyjson.Unmarshal(v.Json, ty) + } + return res, err +} + +// encode a cty.Value into a DynamicValue msgpack payload. +func encodeDynamicValue(v cty.Value, ty cty.Type) (*tfplugin5.DynamicValue, error) { + mp, err := msgpack.Marshal(v, ty) + return &tfplugin5.DynamicValue{ + Msgpack: mp, + }, err +} diff --git a/internal/grpcwrap/provider6.go b/grpcwrap/provider6.go similarity index 98% rename from internal/grpcwrap/provider6.go rename to grpcwrap/provider6.go index af287d0f5444..4a44e688e42c 100644 --- a/internal/grpcwrap/provider6.go +++ b/grpcwrap/provider6.go @@ -3,9 +3,9 @@ package grpcwrap import ( "context" - "github.com/hashicorp/terraform/internal/plugin6/convert" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfplugin6" + "github.com/hashicorp/terraform/plugin6/convert" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfplugin6" "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" "github.com/zclconf/go-cty/cty/msgpack" diff --git a/grpcwrap/provisioner.go b/grpcwrap/provisioner.go new file mode 100644 index 000000000000..91f707e27c7f --- /dev/null +++ b/grpcwrap/provisioner.go @@ -0,0 +1,116 @@ +package grpcwrap + +import ( + "context" + "log" + "strings" + "unicode/utf8" + + "github.com/hashicorp/terraform/communicator/shared" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/tfplugin5" +) + +// New wraps a provisioners.Interface to implement a grpc ProviderServer. +// This is useful for creating a test binary out of an internal provider +// implementation. +func Provisioner(p provisioners.Interface) tfplugin5.ProvisionerServer { + return &provisioner{ + provisioner: p, + schema: p.GetSchema().Provisioner, + } +} + +type provisioner struct { + provisioner provisioners.Interface + schema *configschema.Block +} + +func (p *provisioner) GetSchema(_ context.Context, req *tfplugin5.GetProvisionerSchema_Request) (*tfplugin5.GetProvisionerSchema_Response, error) { + resp := &tfplugin5.GetProvisionerSchema_Response{} + + resp.Provisioner = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + + if p.schema != nil { + resp.Provisioner.Block = convert.ConfigSchemaToProto(p.schema) + } + + return resp, nil +} + +func (p *provisioner) ValidateProvisionerConfig(_ context.Context, req *tfplugin5.ValidateProvisionerConfig_Request) (*tfplugin5.ValidateProvisionerConfig_Response, error) { + resp := &tfplugin5.ValidateProvisionerConfig_Response{} + ty := p.schema.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provisioner.ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provisioner) ProvisionResource(req *tfplugin5.ProvisionResource_Request, srv tfplugin5.Provisioner_ProvisionResourceServer) error { + // We send back a diagnostics over the stream if there was a + // provisioner-side problem. + srvResp := &tfplugin5.ProvisionResource_Response{} + + ty := p.schema.ImpliedType() + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + + connVal, err := decodeDynamicValue(req.Connection, shared.ConnectionBlockSupersetSchema.ImpliedType()) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + + resp := p.provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: configVal, + Connection: connVal, + UIOutput: uiOutput{srv}, + }) + + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, resp.Diagnostics) + srv.Send(srvResp) + return nil +} + +func (p *provisioner) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { + resp := &tfplugin5.Stop_Response{} + err := p.provisioner.Stop() + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +// uiOutput implements the terraform.UIOutput interface to adapt the grpc +// stream to the legacy Provisioner.Apply method. +type uiOutput struct { + srv tfplugin5.Provisioner_ProvisionResourceServer +} + +func (o uiOutput) Output(s string) { + err := o.srv.Send(&tfplugin5.ProvisionResource_Response{ + Output: strings.ToValidUTF8(s, string(utf8.RuneError)), + }) + if err != nil { + log.Printf("[ERROR] %s", err) + } +} diff --git a/internal/helper/slowmessage/slowmessage.go b/helper/slowmessage/slowmessage.go similarity index 100% rename from internal/helper/slowmessage/slowmessage.go rename to helper/slowmessage/slowmessage.go diff --git a/internal/helper/slowmessage/slowmessage_test.go b/helper/slowmessage/slowmessage_test.go similarity index 100% rename from internal/helper/slowmessage/slowmessage_test.go rename to helper/slowmessage/slowmessage_test.go diff --git a/internal/httpclient/client.go b/httpclient/client.go similarity index 100% rename from internal/httpclient/client.go rename to httpclient/client.go diff --git a/internal/httpclient/client_test.go b/httpclient/client_test.go similarity index 100% rename from internal/httpclient/client_test.go rename to httpclient/client_test.go diff --git a/internal/httpclient/useragent.go b/httpclient/useragent.go similarity index 100% rename from internal/httpclient/useragent.go rename to httpclient/useragent.go diff --git a/internal/httpclient/useragent_test.go b/httpclient/useragent_test.go similarity index 100% rename from internal/httpclient/useragent_test.go rename to httpclient/useragent_test.go diff --git a/internal/initwd/doc.go b/initwd/doc.go similarity index 100% rename from internal/initwd/doc.go rename to initwd/doc.go diff --git a/internal/initwd/from_module.go b/initwd/from_module.go similarity index 97% rename from internal/initwd/from_module.go rename to initwd/from_module.go index 38a4e49b5cee..3af3a627eb3e 100644 --- a/internal/initwd/from_module.go +++ b/initwd/from_module.go @@ -10,15 +10,15 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform/internal/copy" - "github.com/hashicorp/terraform/internal/earlyconfig" - "github.com/hashicorp/terraform/internal/getmodules" + "github.com/hashicorp/terraform/copy" + "github.com/hashicorp/terraform/earlyconfig" + "github.com/hashicorp/terraform/getmodules" version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/modsdir" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/tfdiags" ) const initFromModuleRootCallName = "root" diff --git a/internal/initwd/from_module_test.go b/initwd/from_module_test.go similarity index 97% rename from internal/initwd/from_module_test.go rename to initwd/from_module_test.go index 9714ed3465f7..dddfce312bcc 100644 --- a/internal/initwd/from_module_test.go +++ b/initwd/from_module_test.go @@ -9,11 +9,11 @@ import ( "github.com/google/go-cmp/cmp" version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/copy" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/copy" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/tfdiags" ) func TestDirFromModule_registry(t *testing.T) { diff --git a/internal/initwd/load_config.go b/initwd/load_config.go similarity index 91% rename from internal/initwd/load_config.go rename to initwd/load_config.go index 6dc032ba1704..741d8189a89f 100644 --- a/internal/initwd/load_config.go +++ b/initwd/load_config.go @@ -5,9 +5,9 @@ import ( version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/earlyconfig" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/earlyconfig" + "github.com/hashicorp/terraform/modsdir" + "github.com/hashicorp/terraform/tfdiags" ) // LoadConfig loads a full configuration tree that has previously had all of diff --git a/internal/initwd/module_install.go b/initwd/module_install.go similarity index 98% rename from internal/initwd/module_install.go rename to initwd/module_install.go index 2a1f7b685f3d..411df0e52766 100644 --- a/internal/initwd/module_install.go +++ b/initwd/module_install.go @@ -14,14 +14,14 @@ import ( version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/earlyconfig" - "github.com/hashicorp/terraform/internal/getmodules" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/registry/regsrc" - "github.com/hashicorp/terraform/internal/registry/response" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/earlyconfig" + "github.com/hashicorp/terraform/getmodules" + "github.com/hashicorp/terraform/modsdir" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/response" + "github.com/hashicorp/terraform/tfdiags" ) type ModuleInstaller struct { diff --git a/internal/initwd/module_install_hooks.go b/initwd/module_install_hooks.go similarity index 100% rename from internal/initwd/module_install_hooks.go rename to initwd/module_install_hooks.go diff --git a/internal/initwd/module_install_test.go b/initwd/module_install_test.go similarity index 98% rename from internal/initwd/module_install_test.go rename to initwd/module_install_test.go index 6f77c60f60ea..f26b8eb03476 100644 --- a/internal/initwd/module_install_test.go +++ b/initwd/module_install_test.go @@ -16,14 +16,14 @@ import ( "github.com/google/go-cmp/cmp" version "github.com/hashicorp/go-version" svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/copy" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/tfdiags" - - _ "github.com/hashicorp/terraform/internal/logging" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/copy" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/tfdiags" + + _ "github.com/hashicorp/terraform/logging" ) func TestMain(m *testing.M) { diff --git a/internal/initwd/testdata/already-installed/root.tf b/initwd/testdata/already-installed/root.tf similarity index 100% rename from internal/initwd/testdata/already-installed/root.tf rename to initwd/testdata/already-installed/root.tf diff --git a/internal/initwd/testdata/empty/.gitignore b/initwd/testdata/empty/.gitignore similarity index 100% rename from internal/initwd/testdata/empty/.gitignore rename to initwd/testdata/empty/.gitignore diff --git a/internal/initwd/testdata/go-getter-modules/.gitignore b/initwd/testdata/go-getter-modules/.gitignore similarity index 100% rename from internal/initwd/testdata/go-getter-modules/.gitignore rename to initwd/testdata/go-getter-modules/.gitignore diff --git a/internal/initwd/testdata/go-getter-modules/root.tf b/initwd/testdata/go-getter-modules/root.tf similarity index 100% rename from internal/initwd/testdata/go-getter-modules/root.tf rename to initwd/testdata/go-getter-modules/root.tf diff --git a/internal/initwd/testdata/invalid-version-constraint-local/.gitignore b/initwd/testdata/invalid-version-constraint-local/.gitignore similarity index 100% rename from internal/initwd/testdata/invalid-version-constraint-local/.gitignore rename to initwd/testdata/invalid-version-constraint-local/.gitignore diff --git a/internal/initwd/testdata/invalid-version-constraint-local/root.tf b/initwd/testdata/invalid-version-constraint-local/root.tf similarity index 100% rename from internal/initwd/testdata/invalid-version-constraint-local/root.tf rename to initwd/testdata/invalid-version-constraint-local/root.tf diff --git a/internal/initwd/testdata/invalid-version-constraint/.gitignore b/initwd/testdata/invalid-version-constraint/.gitignore similarity index 100% rename from internal/initwd/testdata/invalid-version-constraint/.gitignore rename to initwd/testdata/invalid-version-constraint/.gitignore diff --git a/internal/initwd/testdata/invalid-version-constraint/root.tf b/initwd/testdata/invalid-version-constraint/root.tf similarity index 100% rename from internal/initwd/testdata/invalid-version-constraint/root.tf rename to initwd/testdata/invalid-version-constraint/root.tf diff --git a/internal/initwd/testdata/load-module-package-escape/child/package-escape-child.tf b/initwd/testdata/load-module-package-escape/child/package-escape-child.tf similarity index 100% rename from internal/initwd/testdata/load-module-package-escape/child/package-escape-child.tf rename to initwd/testdata/load-module-package-escape/child/package-escape-child.tf diff --git a/internal/initwd/testdata/load-module-package-escape/grandchild/package-escape-grandchild.tf b/initwd/testdata/load-module-package-escape/grandchild/package-escape-grandchild.tf similarity index 100% rename from internal/initwd/testdata/load-module-package-escape/grandchild/package-escape-grandchild.tf rename to initwd/testdata/load-module-package-escape/grandchild/package-escape-grandchild.tf diff --git a/internal/initwd/testdata/load-module-package-escape/package-escape.tf b/initwd/testdata/load-module-package-escape/package-escape.tf similarity index 100% rename from internal/initwd/testdata/load-module-package-escape/package-escape.tf rename to initwd/testdata/load-module-package-escape/package-escape.tf diff --git a/internal/initwd/testdata/load-module-package-prefix/package-prefix.tf b/initwd/testdata/load-module-package-prefix/package-prefix.tf similarity index 100% rename from internal/initwd/testdata/load-module-package-prefix/package-prefix.tf rename to initwd/testdata/load-module-package-prefix/package-prefix.tf diff --git a/internal/initwd/testdata/load-module-package-prefix/package/child/package-prefix-child.tf b/initwd/testdata/load-module-package-prefix/package/child/package-prefix-child.tf similarity index 100% rename from internal/initwd/testdata/load-module-package-prefix/package/child/package-prefix-child.tf rename to initwd/testdata/load-module-package-prefix/package/child/package-prefix-child.tf diff --git a/internal/initwd/testdata/load-module-package-prefix/package/grandchild/package-prefix-grandchild.tf b/initwd/testdata/load-module-package-prefix/package/grandchild/package-prefix-grandchild.tf similarity index 100% rename from internal/initwd/testdata/load-module-package-prefix/package/grandchild/package-prefix-grandchild.tf rename to initwd/testdata/load-module-package-prefix/package/grandchild/package-prefix-grandchild.tf diff --git a/internal/initwd/testdata/local-module-error/child_a/main.tf b/initwd/testdata/local-module-error/child_a/main.tf similarity index 100% rename from internal/initwd/testdata/local-module-error/child_a/main.tf rename to initwd/testdata/local-module-error/child_a/main.tf diff --git a/internal/initwd/testdata/local-module-error/main.tf b/initwd/testdata/local-module-error/main.tf similarity index 100% rename from internal/initwd/testdata/local-module-error/main.tf rename to initwd/testdata/local-module-error/main.tf diff --git a/internal/initwd/testdata/local-module-symlink/child_a/child_a.tf b/initwd/testdata/local-module-symlink/child_a/child_a.tf similarity index 100% rename from internal/initwd/testdata/local-module-symlink/child_a/child_a.tf rename to initwd/testdata/local-module-symlink/child_a/child_a.tf diff --git a/internal/initwd/testdata/local-module-symlink/child_a/child_b/child_b.tf b/initwd/testdata/local-module-symlink/child_a/child_b/child_b.tf similarity index 100% rename from internal/initwd/testdata/local-module-symlink/child_a/child_b/child_b.tf rename to initwd/testdata/local-module-symlink/child_a/child_b/child_b.tf diff --git a/internal/initwd/testdata/local-module-symlink/modules/child_a b/initwd/testdata/local-module-symlink/modules/child_a similarity index 100% rename from internal/initwd/testdata/local-module-symlink/modules/child_a rename to initwd/testdata/local-module-symlink/modules/child_a diff --git a/internal/initwd/testdata/local-module-symlink/root.tf b/initwd/testdata/local-module-symlink/root.tf similarity index 100% rename from internal/initwd/testdata/local-module-symlink/root.tf rename to initwd/testdata/local-module-symlink/root.tf diff --git a/internal/initwd/testdata/local-modules/child_a/child_a.tf b/initwd/testdata/local-modules/child_a/child_a.tf similarity index 100% rename from internal/initwd/testdata/local-modules/child_a/child_a.tf rename to initwd/testdata/local-modules/child_a/child_a.tf diff --git a/internal/initwd/testdata/local-modules/child_a/child_b/child_b.tf b/initwd/testdata/local-modules/child_a/child_b/child_b.tf similarity index 100% rename from internal/initwd/testdata/local-modules/child_a/child_b/child_b.tf rename to initwd/testdata/local-modules/child_a/child_b/child_b.tf diff --git a/internal/initwd/testdata/local-modules/root.tf b/initwd/testdata/local-modules/root.tf similarity index 100% rename from internal/initwd/testdata/local-modules/root.tf rename to initwd/testdata/local-modules/root.tf diff --git a/internal/initwd/testdata/prerelease-version-constraint-match/root.tf b/initwd/testdata/prerelease-version-constraint-match/root.tf similarity index 100% rename from internal/initwd/testdata/prerelease-version-constraint-match/root.tf rename to initwd/testdata/prerelease-version-constraint-match/root.tf diff --git a/internal/initwd/testdata/prerelease-version-constraint/root.tf b/initwd/testdata/prerelease-version-constraint/root.tf similarity index 100% rename from internal/initwd/testdata/prerelease-version-constraint/root.tf rename to initwd/testdata/prerelease-version-constraint/root.tf diff --git a/internal/initwd/testdata/registry-modules/.gitignore b/initwd/testdata/registry-modules/.gitignore similarity index 100% rename from internal/initwd/testdata/registry-modules/.gitignore rename to initwd/testdata/registry-modules/.gitignore diff --git a/internal/initwd/testdata/registry-modules/root.tf b/initwd/testdata/registry-modules/root.tf similarity index 100% rename from internal/initwd/testdata/registry-modules/root.tf rename to initwd/testdata/registry-modules/root.tf diff --git a/initwd/testing.go b/initwd/testing.go new file mode 100644 index 000000000000..5fe6c768f0fb --- /dev/null +++ b/initwd/testing.go @@ -0,0 +1,74 @@ +package initwd + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/tfdiags" +) + +// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests, +// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows +// a test configuration to be loaded in a single step. +// +// If module installation fails, t.Fatal (or similar) is called to halt +// execution of the test, under the assumption that installation failures are +// not expected. If installation failures _are_ expected then use +// NewLoaderForTests and work with the loader object directly. If module +// installation succeeds but generates warnings, these warnings are discarded. +// +// If installation succeeds but errors are detected during loading then a +// possibly-incomplete config is returned along with error diagnostics. The +// test run is not aborted in this case, so that the caller can make assertions +// against the returned diagnostics. +// +// As with NewLoaderForTests, a cleanup function is returned which must be +// called before the test completes in order to remove the temporary +// modules directory. +func LoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) { + t.Helper() + + var diags tfdiags.Diagnostics + + loader, cleanup := configload.NewLoaderForTests(t) + inst := NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil)) + + _, moreDiags := inst.InstallModules(context.Background(), rootDir, true, ModuleInstallHooksImpl{}) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + return nil, nil, func() {}, diags + } + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + config, hclDiags := loader.LoadConfig(rootDir) + diags = diags.Append(hclDiags) + return config, loader, cleanup, diags +} + +// MustLoadConfigForTests is a variant of LoadConfigForTests which calls +// t.Fatal (or similar) if there are any errors during loading, and thus +// does not return diagnostics at all. +// +// This is useful for concisely writing tests that don't expect errors at +// all. For tests that expect errors and need to assert against them, use +// LoadConfigForTests instead. +func MustLoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func()) { + t.Helper() + + config, loader, cleanup, diags := LoadConfigForTests(t, rootDir) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + } + return config, loader, cleanup +} diff --git a/internal/instances/expander.go b/instances/expander.go similarity index 99% rename from internal/instances/expander.go rename to instances/expander.go index 2c912c897dc8..45b50221fdbc 100644 --- a/internal/instances/expander.go +++ b/instances/expander.go @@ -5,7 +5,7 @@ import ( "sort" "sync" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/instances/expander_test.go b/instances/expander_test.go similarity index 99% rename from internal/instances/expander_test.go rename to instances/expander_test.go index 2e983288929a..a82cf50baaa3 100644 --- a/internal/instances/expander_test.go +++ b/instances/expander_test.go @@ -8,7 +8,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestExpander(t *testing.T) { diff --git a/internal/instances/expansion_mode.go b/instances/expansion_mode.go similarity index 97% rename from internal/instances/expansion_mode.go rename to instances/expansion_mode.go index 1183e3c768cf..be33934324e8 100644 --- a/internal/instances/expansion_mode.go +++ b/instances/expansion_mode.go @@ -6,7 +6,7 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // expansion is an internal interface used to represent the different diff --git a/internal/instances/instance_key_data.go b/instances/instance_key_data.go similarity index 100% rename from internal/instances/instance_key_data.go rename to instances/instance_key_data.go diff --git a/instances/set.go b/instances/set.go new file mode 100644 index 000000000000..714e68ef8ee1 --- /dev/null +++ b/instances/set.go @@ -0,0 +1,51 @@ +package instances + +import ( + "github.com/hashicorp/terraform/addrs" +) + +// Set is a set of instances, intended mainly for the return value of +// Expander.AllInstances, where it therefore represents all of the module +// and resource instances known to the expander. +type Set struct { + // Set currently really just wraps Expander with a reduced API that + // only supports lookups, to make it clear that a holder of a Set should + // not be modifying the expander any further. + exp *Expander +} + +// HasModuleInstance returns true if and only if the set contains the module +// instance with the given address. +func (s Set) HasModuleInstance(want addrs.ModuleInstance) bool { + return s.exp.knowsModuleInstance(want) +} + +// HasModuleCall returns true if and only if the set contains the module +// call with the given address, even if that module call has no instances. +func (s Set) HasModuleCall(want addrs.AbsModuleCall) bool { + return s.exp.knowsModuleCall(want) +} + +// HasResourceInstance returns true if and only if the set contains the resource +// instance with the given address. +// TODO: +func (s Set) HasResourceInstance(want addrs.AbsResourceInstance) bool { + return s.exp.knowsResourceInstance(want) +} + +// HasResource returns true if and only if the set contains the resource with +// the given address, even if that resource has no instances. +// TODO: +func (s Set) HasResource(want addrs.AbsResource) bool { + return s.exp.knowsResource(want) +} + +// InstancesForModule returns all of the module instances that correspond with +// the given static module path. +// +// If there are multiple module calls in the path that have repetition enabled +// then the result is the full expansion of all combinations of all of their +// declared instance keys. +func (s Set) InstancesForModule(modAddr addrs.Module) []addrs.ModuleInstance { + return s.exp.expandModule(modAddr, true) +} diff --git a/internal/instances/set_test.go b/instances/set_test.go similarity index 99% rename from internal/instances/set_test.go rename to instances/set_test.go index e255cef1b888..fcb381dcd6db 100644 --- a/internal/instances/set_test.go +++ b/instances/set_test.go @@ -3,7 +3,7 @@ package instances import ( "testing" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/addrs/output_value.go b/internal/addrs/output_value.go deleted file mode 100644 index ff76556b2ac9..000000000000 --- a/internal/addrs/output_value.go +++ /dev/null @@ -1,223 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// OutputValue is the address of an output value, in the context of the module -// that is defining it. -// -// This is related to but separate from ModuleCallOutput, which represents -// a module output from the perspective of its parent module. Since output -// values cannot be represented from the module where they are defined, -// OutputValue is not Referenceable, while ModuleCallOutput is. -type OutputValue struct { - Name string -} - -func (v OutputValue) String() string { - return "output." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: v, - } -} - -// InModule converts the receiver into a config address within the given -// module. -func (v OutputValue) InModule(m Module) ConfigOutputValue { - return ConfigOutputValue{ - Module: m, - OutputValue: v, - } -} - -// AbsOutputValue is the absolute address of an output value within a module instance. -// -// This represents an output globally within the namespace of a particular -// configuration. It is related to but separate from ModuleCallOutput, which -// represents a module output from the perspective of its parent module. -type AbsOutputValue struct { - Module ModuleInstance - OutputValue OutputValue -} - -// OutputValue returns the absolute address of an output value of the given -// name within the receiving module instance. -func (m ModuleInstance) OutputValue(name string) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: OutputValue{ - Name: name, - }, - } -} - -func (v AbsOutputValue) Check(t CheckType, i int) Check { - return Check{ - Container: v, - Type: t, - Index: i, - } -} - -func (v AbsOutputValue) String() string { - if v.Module.IsRoot() { - return v.OutputValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) -} - -func (v AbsOutputValue) Equal(o AbsOutputValue) bool { - return v.OutputValue == o.OutputValue && v.Module.Equal(o.Module) -} - -func (v AbsOutputValue) ConfigOutputValue() ConfigOutputValue { - return ConfigOutputValue{ - Module: v.Module.Module(), - OutputValue: v.OutputValue, - } -} - -func (v AbsOutputValue) checkableSigil() { - // Output values are checkable -} - -func (v AbsOutputValue) ConfigCheckable() ConfigCheckable { - // Output values are declared by "output" blocks in the configuration, - // represented as ConfigOutputValue. - return v.ConfigOutputValue() -} - -func (v AbsOutputValue) CheckableKind() CheckableKind { - return CheckableOutputValue -} - -func (v AbsOutputValue) UniqueKey() UniqueKey { - return absOutputValueUniqueKey(v.String()) -} - -type absOutputValueUniqueKey string - -func (k absOutputValueUniqueKey) uniqueKeySigil() {} - -func ParseAbsOutputValue(traversal hcl.Traversal) (AbsOutputValue, tfdiags.Diagnostics) { - path, remain, diags := parseModuleInstancePrefix(traversal) - if diags.HasErrors() { - return AbsOutputValue{}, diags - } - - if len(remain) != 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "An output name is required.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsOutputValue{}, diags - } - - if remain.RootName() != "output" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Output address must start with \"output.\".", - Subject: remain[0].SourceRange().Ptr(), - }) - return AbsOutputValue{}, diags - } - - var name string - switch tt := remain[1].(type) { - case hcl.TraverseAttr: - name = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "An output name is required.", - Subject: remain[1].SourceRange().Ptr(), - }) - return AbsOutputValue{}, diags - } - - return AbsOutputValue{ - Module: path, - OutputValue: OutputValue{ - Name: name, - }, - }, diags -} - -func ParseAbsOutputValueStr(str string) (AbsOutputValue, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsOutputValue{}, diags - } - - addr, addrDiags := ParseAbsOutputValue(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, -// returning also the module instance that the ModuleCallOutput is relative -// to. -// -// The root module does not have a call, and so this method cannot be used -// with outputs in the root module, and will panic in that case. -func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallInstanceOutput) { - if v.Module.IsRoot() { - panic("ReferenceFromCall used with root module output") - } - - caller, call := v.Module.CallInstance() - return caller, ModuleCallInstanceOutput{ - Call: call, - Name: v.OutputValue.Name, - } -} - -// ConfigOutputValue represents a particular "output" block in the -// configuration, which might have many AbsOutputValue addresses associated -// with it at runtime if it belongs to a module that was called using -// "count" or "for_each". -type ConfigOutputValue struct { - Module Module - OutputValue OutputValue -} - -func (v ConfigOutputValue) String() string { - if v.Module.IsRoot() { - return v.OutputValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) -} - -func (v ConfigOutputValue) configCheckableSigil() { - // ConfigOutputValue is the ConfigCheckable for AbsOutputValue. -} - -func (v ConfigOutputValue) CheckableKind() CheckableKind { - return CheckableOutputValue -} - -func (v ConfigOutputValue) UniqueKey() UniqueKey { - return configOutputValueUniqueKey(v.String()) -} - -type configOutputValueUniqueKey string - -func (k configOutputValueUniqueKey) uniqueKeySigil() {} diff --git a/internal/addrs/provider.go b/internal/addrs/provider.go deleted file mode 100644 index 7130de1ea108..000000000000 --- a/internal/addrs/provider.go +++ /dev/null @@ -1,205 +0,0 @@ -package addrs - -import ( - "github.com/hashicorp/hcl/v2" - tfaddr "github.com/hashicorp/terraform-registry-address" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Provider encapsulates a single provider type. In the future this will be -// extended to include additional fields including Namespace and SourceHost -type Provider = tfaddr.Provider - -// DefaultProviderRegistryHost is the hostname used for provider addresses that do -// not have an explicit hostname. -const DefaultProviderRegistryHost = tfaddr.DefaultProviderRegistryHost - -// BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider -// namespace. Built-in provider addresses must also have their namespace set -// to BuiltInProviderNamespace in order to be considered as built-in. -const BuiltInProviderHost = tfaddr.BuiltInProviderHost - -// BuiltInProviderNamespace is the provider namespace used for "built-in" -// providers. Built-in provider addresses must also have their hostname -// set to BuiltInProviderHost in order to be considered as built-in. -// -// The this namespace is literally named "builtin", in the hope that users -// who see FQNs containing this will be able to infer the way in which they are -// special, even if they haven't encountered the concept formally yet. -const BuiltInProviderNamespace = tfaddr.BuiltInProviderNamespace - -// LegacyProviderNamespace is the special string used in the Namespace field -// of type Provider to mark a legacy provider address. This special namespace -// value would normally be invalid, and can be used only when the hostname is -// DefaultRegistryHost because that host owns the mapping from legacy name to -// FQN. -const LegacyProviderNamespace = tfaddr.LegacyProviderNamespace - -func IsDefaultProvider(addr Provider) bool { - return addr.Hostname == DefaultProviderRegistryHost && addr.Namespace == "hashicorp" -} - -// NewProvider constructs a provider address from its parts, and normalizes -// the namespace and type parts to lowercase using unicode case folding rules -// so that resulting addrs.Provider values can be compared using standard -// Go equality rules (==). -// -// The hostname is given as a svchost.Hostname, which is required by the -// contract of that type to have already been normalized for equality testing. -// -// This function will panic if the given namespace or type name are not valid. -// When accepting namespace or type values from outside the program, use -// ParseProviderPart first to check that the given value is valid. -func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider { - return tfaddr.NewProvider(hostname, namespace, typeName) -} - -// ImpliedProviderForUnqualifiedType represents the rules for inferring what -// provider FQN a user intended when only a naked type name is available. -// -// For all except the type name "terraform" this returns a so-called "default" -// provider, which is under the registry.terraform.io/hashicorp/ namespace. -// -// As a special case, the string "terraform" maps to -// "terraform.io/builtin/terraform" because that is the more likely user -// intent than the now-unmaintained "registry.terraform.io/hashicorp/terraform" -// which remains only for compatibility with older Terraform versions. -func ImpliedProviderForUnqualifiedType(typeName string) Provider { - switch typeName { - case "terraform": - // Note for future maintainers: any additional strings we add here - // as implied to be builtin must never also be use as provider names - // in the registry.terraform.io/hashicorp/... namespace, because - // otherwise older versions of Terraform could implicitly select - // the registry name instead of the internal one. - return NewBuiltInProvider(typeName) - default: - return NewDefaultProvider(typeName) - } -} - -// NewDefaultProvider returns the default address of a HashiCorp-maintained, -// Registry-hosted provider. -func NewDefaultProvider(name string) Provider { - return tfaddr.Provider{ - Type: MustParseProviderPart(name), - Namespace: "hashicorp", - Hostname: DefaultProviderRegistryHost, - } -} - -// NewBuiltInProvider returns the address of a "built-in" provider. See -// the docs for Provider.IsBuiltIn for more information. -func NewBuiltInProvider(name string) Provider { - return tfaddr.Provider{ - Type: MustParseProviderPart(name), - Namespace: BuiltInProviderNamespace, - Hostname: BuiltInProviderHost, - } -} - -// NewLegacyProvider returns a mock address for a provider. -// This will be removed when ProviderType is fully integrated. -func NewLegacyProvider(name string) Provider { - return Provider{ - // We intentionally don't normalize and validate the legacy names, - // because existing code expects legacy provider names to pass through - // verbatim, even if not compliant with our new naming rules. - Type: name, - Namespace: LegacyProviderNamespace, - Hostname: DefaultProviderRegistryHost, - } -} - -// ParseProviderSourceString parses a value of the form expected in the "source" -// argument of a required_providers entry and returns the corresponding -// fully-qualified provider address. This is intended primarily to parse the -// FQN-like strings returned by terraform-config-inspect. -// -// The following are valid source string formats: -// -// - name -// - namespace/name -// - hostname/namespace/name -func ParseProviderSourceString(str string) (tfaddr.Provider, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - ret, err := tfaddr.ParseProviderSource(str) - if pe, ok := err.(*tfaddr.ParserError); ok { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: pe.Summary, - Detail: pe.Detail, - }) - return ret, diags - } - - if !ret.HasKnownNamespace() { - ret.Namespace = "hashicorp" - } - - return ret, nil -} - -// MustParseProviderSourceString is a wrapper around ParseProviderSourceString that panics if -// it returns an error. -func MustParseProviderSourceString(str string) Provider { - result, diags := ParseProviderSourceString(str) - if diags.HasErrors() { - panic(diags.Err().Error()) - } - return result -} - -// ParseProviderPart processes an addrs.Provider namespace or type string -// provided by an end-user, producing a normalized version if possible or -// an error if the string contains invalid characters. -// -// A provider part is processed in the same way as an individual label in a DNS -// domain name: it is transformed to lowercase per the usual DNS case mapping -// and normalization rules and may contain only letters, digits, and dashes. -// Additionally, dashes may not appear at the start or end of the string. -// -// These restrictions are intended to allow these names to appear in fussy -// contexts such as directory/file names on case-insensitive filesystems, -// repository names on GitHub, etc. We're using the DNS rules in particular, -// rather than some similar rules defined locally, because the hostname part -// of an addrs.Provider is already a hostname and it's ideal to use exactly -// the same case folding and normalization rules for all of the parts. -// -// In practice a provider type string conventionally does not contain dashes -// either. Such names are permitted, but providers with such type names will be -// hard to use because their resource type names will not be able to contain -// the provider type name and thus each resource will need an explicit provider -// address specified. (A real-world example of such a provider is the -// "google-beta" variant of the GCP provider, which has resource types that -// start with the "google_" prefix instead.) -// -// It's valid to pass the result of this function as the argument to a -// subsequent call, in which case the result will be identical. -func ParseProviderPart(given string) (string, error) { - return tfaddr.ParseProviderPart(given) -} - -// MustParseProviderPart is a wrapper around ParseProviderPart that panics if -// it returns an error. -func MustParseProviderPart(given string) string { - result, err := ParseProviderPart(given) - if err != nil { - panic(err.Error()) - } - return result -} - -// IsProviderPartNormalized compares a given string to the result of ParseProviderPart(string) -func IsProviderPartNormalized(str string) (bool, error) { - normalized, err := ParseProviderPart(str) - if err != nil { - return false, err - } - if str == normalized { - return true, nil - } - return false, nil -} diff --git a/internal/backend/backend.go b/internal/backend/backend.go deleted file mode 100644 index 58e4398a698b..000000000000 --- a/internal/backend/backend.go +++ /dev/null @@ -1,423 +0,0 @@ -// Package backend provides interfaces that the CLI uses to interact with -// Terraform. A backend provides the abstraction that allows the same CLI -// to simultaneously support both local and remote operations for seamlessly -// using Terraform in a team environment. -package backend - -import ( - "context" - "errors" - "io/ioutil" - "log" - "os" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/mitchellh/go-homedir" - "github.com/zclconf/go-cty/cty" -) - -// DefaultStateName is the name of the default, initial state that every -// backend must have. This state cannot be deleted. -const DefaultStateName = "default" - -var ( - // ErrDefaultWorkspaceNotSupported is returned when an operation does not - // support using the default workspace, but requires a named workspace to - // be selected. - ErrDefaultWorkspaceNotSupported = errors.New("default workspace not supported\n" + - "You can create a new workspace with the \"workspace new\" command.") - - // ErrWorkspacesNotSupported is an error returned when a caller attempts - // to perform an operation on a workspace other than "default" for a - // backend that doesn't support multiple workspaces. - // - // The caller can detect this to do special fallback behavior or produce - // a specific, helpful error message. - ErrWorkspacesNotSupported = errors.New("workspaces not supported") -) - -// InitFn is used to initialize a new backend. -type InitFn func() Backend - -// Backend is the minimal interface that must be implemented to enable Terraform. -type Backend interface { - // ConfigSchema returns a description of the expected configuration - // structure for the receiving backend. - // - // This method does not have any side-effects for the backend and can - // be safely used before configuring. - ConfigSchema() *configschema.Block - - // PrepareConfig checks the validity of the values in the given - // configuration, and inserts any missing defaults, assuming that its - // structure has already been validated per the schema returned by - // ConfigSchema. - // - // This method does not have any side-effects for the backend and can - // be safely used before configuring. It also does not consult any - // external data such as environment variables, disk files, etc. Validation - // that requires such external data should be deferred until the - // Configure call. - // - // If error diagnostics are returned then the configuration is not valid - // and must not subsequently be passed to the Configure method. - // - // This method may return configuration-contextual diagnostics such - // as tfdiags.AttributeValue, and so the caller should provide the - // necessary context via the diags.InConfigBody method before returning - // diagnostics to the user. - PrepareConfig(cty.Value) (cty.Value, tfdiags.Diagnostics) - - // Configure uses the provided configuration to set configuration fields - // within the backend. - // - // The given configuration is assumed to have already been validated - // against the schema returned by ConfigSchema and passed validation - // via PrepareConfig. - // - // This method may be called only once per backend instance, and must be - // called before all other methods except where otherwise stated. - // - // If error diagnostics are returned, the internal state of the instance - // is undefined and no other methods may be called. - Configure(cty.Value) tfdiags.Diagnostics - - // StateMgr returns the state manager for the given workspace name. - // - // If the returned state manager also implements statemgr.Locker then - // it's the caller's responsibility to call Lock and Unlock as appropriate. - // - // If the named workspace doesn't exist, or if it has no state, it will - // be created either immediately on this call or the first time - // PersistState is called, depending on the state manager implementation. - StateMgr(workspace string) (statemgr.Full, error) - - // DeleteWorkspace removes the workspace with the given name if it exists. - // - // DeleteWorkspace cannot prevent deleting a state that is in use. It is - // the responsibility of the caller to hold a Lock for the state manager - // belonging to this workspace before calling this method. - DeleteWorkspace(name string, force bool) error - - // States returns a list of the names of all of the workspaces that exist - // in this backend. - Workspaces() ([]string, error) -} - -// Enhanced implements additional behavior on top of a normal backend. -// -// 'Enhanced' backends are an implementation detail only, and are no longer reflected as an external -// 'feature' of backends. In other words, backends refer to plugins for remote state snapshot -// storage only, and the Enhanced interface here is a necessary vestige of the 'local' and -// remote/cloud backends only. -type Enhanced interface { - Backend - - // Operation performs a Terraform operation such as refresh, plan, apply. - // It is up to the implementation to determine what "performing" means. - // This DOES NOT BLOCK. The context returned as part of RunningOperation - // should be used to block for completion. - // If the state used in the operation can be locked, it is the - // responsibility of the Backend to lock the state for the duration of the - // running operation. - Operation(context.Context, *Operation) (*RunningOperation, error) -} - -// Local implements additional behavior on a Backend that allows local -// operations in addition to remote operations. -// -// This enables more behaviors of Terraform that require more data such -// as `console`, `import`, `graph`. These require direct access to -// configurations, variables, and more. Not all backends may support this -// so we separate it out into its own optional interface. -type Local interface { - // LocalRun uses information in the Operation to prepare a set of objects - // needed to start running that operation. - // - // The operation doesn't need a Type set, but it needs various other - // options set. This is a rather odd API that tries to treat all - // operations as the same when they really aren't; see the local and remote - // backend's implementations of this to understand what this actually - // does, because this operation has no well-defined contract aside from - // "whatever it already does". - LocalRun(*Operation) (*LocalRun, statemgr.Full, tfdiags.Diagnostics) -} - -// LocalRun represents the assortment of objects that we can collect or -// calculate from an Operation object, which we can then use for local -// operations. -// -// The operation methods on terraform.Context (Plan, Apply, Import, etc) each -// generate new artifacts which supersede parts of the LocalRun object that -// started the operation, so callers should be careful to use those subsequent -// artifacts instead of the fields of LocalRun where appropriate. The LocalRun -// data intentionally doesn't update as a result of calling methods on Context, -// in order to make data flow explicit. -// -// This type is a weird architectural wart resulting from the overly-general -// way our backend API models operations, whereby we behave as if all -// Terraform operations have the same inputs and outputs even though they -// are actually all rather different. The exact meaning of the fields in -// this type therefore vary depending on which OperationType was passed to -// Local.Context in order to create an object of this type. -type LocalRun struct { - // Core is an already-initialized Terraform Core context, ready to be - // used to run operations such as Plan and Apply. - Core *terraform.Context - - // Config is the configuration we're working with, which typically comes - // from either config files directly on local disk (when we're creating - // a plan, or similar) or from a snapshot embedded in a plan file - // (when we're applying a saved plan). - Config *configs.Config - - // InputState is the state that should be used for whatever is the first - // method call to a context created with CoreOpts. When creating a plan - // this will be the previous run state, but when applying a saved plan - // this will be the prior state recorded in that plan. - InputState *states.State - - // PlanOpts are options to pass to a Plan or Plan-like operation. - // - // This is nil when we're applying a saved plan, because the plan itself - // contains enough information about its options to apply it. - PlanOpts *terraform.PlanOpts - - // Plan is a plan loaded from a saved plan file, if our operation is to - // apply that saved plan. - // - // This is nil when we're not applying a saved plan. - Plan *plans.Plan -} - -// An operation represents an operation for Terraform to execute. -// -// Note that not all fields are supported by all backends and can result -// in an error if set. All backend implementations should show user-friendly -// errors explaining any incorrectly set values. For example, the local -// backend doesn't support a PlanId being set. -// -// The operation options are purposely designed to have maximal compatibility -// between Terraform and Terraform Servers (a commercial product offered by -// HashiCorp). Therefore, it isn't expected that other implementation support -// every possible option. The struct here is generalized in order to allow -// even partial implementations to exist in the open, without walling off -// remote functionality 100% behind a commercial wall. Anyone can implement -// against this interface and have Terraform interact with it just as it -// would with HashiCorp-provided Terraform Servers. -type Operation struct { - // Type is the operation to perform. - Type OperationType - - // PlanId is an opaque value that backends can use to execute a specific - // plan for an apply operation. - // - // PlanOutBackend is the backend to store with the plan. This is the - // backend that will be used when applying the plan. - PlanId string - PlanRefresh bool // PlanRefresh will do a refresh before a plan - PlanOutPath string // PlanOutPath is the path to save the plan - PlanOutBackend *plans.Backend - - // ConfigDir is the path to the directory containing the configuration's - // root module. - ConfigDir string - - // ConfigLoader is a configuration loader that can be used to load - // configuration from ConfigDir. - ConfigLoader *configload.Loader - - // DependencyLocks represents the locked dependencies associated with - // the configuration directory given in ConfigDir. - // - // Note that if field PlanFile is set then the plan file should contain - // its own dependency locks. The backend is responsible for correctly - // selecting between these two sets of locks depending on whether it - // will be using ConfigDir or PlanFile to get the configuration for - // this operation. - DependencyLocks *depsfile.Locks - - // Hooks can be used to perform actions triggered by various events during - // the operation's lifecycle. - Hooks []terraform.Hook - - // Plan is a plan that was passed as an argument. This is valid for - // plan and apply arguments but may not work for all backends. - PlanFile *planfile.Reader - - // The options below are more self-explanatory and affect the runtime - // behavior of the operation. - PlanMode plans.Mode - AutoApprove bool - Targets []addrs.Targetable - ForceReplace []addrs.AbsResourceInstance - Variables map[string]UnparsedVariableValue - - // Some operations use root module variables only opportunistically or - // don't need them at all. If this flag is set, the backend must treat - // all variables as optional and provide an unknown value for any required - // variables that aren't set in order to allow partial evaluation against - // the resulting incomplete context. - // - // This flag is honored only if PlanFile isn't set. If PlanFile is set then - // the variables set in the plan are used instead, and they must be valid. - AllowUnsetVariables bool - - // View implements the logic for all UI interactions. - View views.Operation - - // Input/output/control options. - UIIn terraform.UIInput - UIOut terraform.UIOutput - - // StateLocker is used to lock the state while providing UI feedback to the - // user. This will be replaced by the Backend to update the context. - // - // If state locking is not necessary, this should be set to a no-op - // implementation of clistate.Locker. - StateLocker clistate.Locker - - // Workspace is the name of the workspace that this operation should run - // in, which controls which named state is used. - Workspace string -} - -// HasConfig returns true if and only if the operation has a ConfigDir value -// that refers to a directory containing at least one Terraform configuration -// file. -func (o *Operation) HasConfig() bool { - return o.ConfigLoader.IsConfigDir(o.ConfigDir) -} - -// Config loads the configuration that the operation applies to, using the -// ConfigDir and ConfigLoader fields within the receiving operation. -func (o *Operation) Config() (*configs.Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - config, hclDiags := o.ConfigLoader.LoadConfig(o.ConfigDir) - diags = diags.Append(hclDiags) - return config, diags -} - -// ReportResult is a helper for the common chore of setting the status of -// a running operation and showing any diagnostics produced during that -// operation. -// -// If the given diagnostics contains errors then the operation's result -// will be set to backend.OperationFailure. It will be set to -// backend.OperationSuccess otherwise. It will then use o.View.Diagnostics -// to show the given diagnostics before returning. -// -// Callers should feel free to do each of these operations separately in -// more complex cases where e.g. diagnostics are interleaved with other -// output, but terminating immediately after reporting error diagnostics is -// common and can be expressed concisely via this method. -func (o *Operation) ReportResult(op *RunningOperation, diags tfdiags.Diagnostics) { - if diags.HasErrors() { - op.Result = OperationFailure - } else { - op.Result = OperationSuccess - } - if o.View != nil { - o.View.Diagnostics(diags) - } else { - // Shouldn't generally happen, but if it does then we'll at least - // make some noise in the logs to help us spot it. - if len(diags) != 0 { - log.Printf( - "[ERROR] Backend needs to report diagnostics but View is not set:\n%s", - diags.ErrWithWarnings(), - ) - } - } -} - -// RunningOperation is the result of starting an operation. -type RunningOperation struct { - // For implementers of a backend, this context should not wrap the - // passed in context. Otherwise, cancelling the parent context will - // immediately mark this context as "done" but those aren't the semantics - // we want: we want this context to be done only when the operation itself - // is fully done. - context.Context - - // Stop requests the operation to complete early, by calling Stop on all - // the plugins. If the process needs to terminate immediately, call Cancel. - Stop context.CancelFunc - - // Cancel is the context.CancelFunc associated with the embedded context, - // and can be called to terminate the operation early. - // Once Cancel is called, the operation should return as soon as possible - // to avoid running operations during process exit. - Cancel context.CancelFunc - - // Result is the exit status of the operation, populated only after the - // operation has completed. - Result OperationResult - - // PlanEmpty is populated after a Plan operation completes to note whether - // a plan is empty or has changes. This is only used in the CLI to determine - // the exit status because the plan value is not available at that point. - PlanEmpty bool - - // State is the final state after the operation completed. Persisting - // this state is managed by the backend. This should only be read - // after the operation completes to avoid read/write races. - State *states.State -} - -// OperationResult describes the result status of an operation. -type OperationResult int - -const ( - // OperationSuccess indicates that the operation completed as expected. - OperationSuccess OperationResult = 0 - - // OperationFailure indicates that the operation encountered some sort - // of error, and thus may have been only partially performed or not - // performed at all. - OperationFailure OperationResult = 1 -) - -func (r OperationResult) ExitStatus() int { - return int(r) -} - -// If the argument is a path, Read loads it and returns the contents, -// otherwise the argument is assumed to be the desired contents and is simply -// returned. -func ReadPathOrContents(poc string) (string, error) { - if len(poc) == 0 { - return poc, nil - } - - path := poc - if path[0] == '~' { - var err error - path, err = homedir.Expand(path) - if err != nil { - return path, err - } - } - - if _, err := os.Stat(path); err == nil { - contents, err := ioutil.ReadFile(path) - if err != nil { - return string(contents), err - } - return string(contents), nil - } - - return poc, nil -} diff --git a/internal/backend/cli.go b/internal/backend/cli.go deleted file mode 100644 index 0a73b122fa9c..000000000000 --- a/internal/backend/cli.go +++ /dev/null @@ -1,91 +0,0 @@ -package backend - -import ( - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" - - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" -) - -// CLI is an optional interface that can be implemented to be initialized -// with information from the Terraform CLI. If this is implemented, this -// initialization function will be called with data to help interact better -// with a CLI. -// -// This interface was created to improve backend interaction with the -// official Terraform CLI while making it optional for API users to have -// to provide full CLI interaction to every backend. -// -// If you're implementing a Backend, it is acceptable to require CLI -// initialization. In this case, your backend should be coded to error -// on other methods (such as State, Operation) if CLI initialization was not -// done with all required fields. -type CLI interface { - Backend - - // CLIInit is called once with options. The options passed to this - // function may not be modified after calling this since they can be - // read/written at any time by the Backend implementation. - // - // This may be called before or after Configure is called, so if settings - // here affect configurable settings, care should be taken to handle - // whether they should be overwritten or not. - CLIInit(*CLIOpts) error -} - -// CLIOpts are the options passed into CLIInit for the CLI interface. -// -// These options represent the functionality the CLI exposes and often -// maps to meta-flags available on every CLI (such as -input). -// -// When implementing a backend, it isn't expected that every option applies. -// Your backend should be documented clearly to explain to end users what -// options have an affect and what won't. In some cases, it may even make sense -// to error in your backend when an option is set so that users don't make -// a critically incorrect assumption about behavior. -type CLIOpts struct { - // CLI and Colorize control the CLI output. If CLI is nil then no CLI - // output will be done. If CLIColor is nil then no coloring will be done. - CLI cli.Ui - CLIColor *colorstring.Colorize - - // Streams describes the low-level streams for Stdout, Stderr and Stdin, - // including some metadata about whether they are terminals. Most output - // should go via the object in field CLI above, but Streams can be useful - // for tailoring the output to fit the attached terminal, for example. - Streams *terminal.Streams - - // StatePath is the local path where state is read from. - // - // StateOutPath is the local path where the state will be written. - // If this is empty, it will default to StatePath. - // - // StateBackupPath is the local path where a backup file will be written. - // If this is empty, no backup will be taken. - StatePath string - StateOutPath string - StateBackupPath string - - // ContextOpts are the base context options to set when initializing a - // Terraform context. Many of these will be overridden or merged by - // Operation. See Operation for more details. - ContextOpts *terraform.ContextOpts - - // Input will ask for necessary input prior to performing any operations. - // - // Validation will perform validation prior to running an operation. The - // variable naming doesn't match the style of others since we have a func - // Validate. - Input bool - Validation bool - - // RunningInAutomation indicates that commands are being run by an - // automated system rather than directly at a command prompt. - // - // This is a hint not to produce messages that expect that a user can - // run a follow-up command, perhaps because Terraform is running in - // some sort of workflow automation tool that abstracts away the - // exact commands that are being run. - RunningInAutomation bool -} diff --git a/internal/backend/init/init.go b/internal/backend/init/init.go deleted file mode 100644 index b437b3035147..000000000000 --- a/internal/backend/init/init.go +++ /dev/null @@ -1,143 +0,0 @@ -// Package init contains the list of backends that can be initialized and -// basic helper functions for initializing those backends. -package init - -import ( - "sync" - - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - backendLocal "github.com/hashicorp/terraform/internal/backend/local" - backendRemote "github.com/hashicorp/terraform/internal/backend/remote" - backendAzure "github.com/hashicorp/terraform/internal/backend/remote-state/azure" - backendConsul "github.com/hashicorp/terraform/internal/backend/remote-state/consul" - backendCos "github.com/hashicorp/terraform/internal/backend/remote-state/cos" - backendGCS "github.com/hashicorp/terraform/internal/backend/remote-state/gcs" - backendHTTP "github.com/hashicorp/terraform/internal/backend/remote-state/http" - backendInmem "github.com/hashicorp/terraform/internal/backend/remote-state/inmem" - backendKubernetes "github.com/hashicorp/terraform/internal/backend/remote-state/kubernetes" - backendOSS "github.com/hashicorp/terraform/internal/backend/remote-state/oss" - backendPg "github.com/hashicorp/terraform/internal/backend/remote-state/pg" - backendS3 "github.com/hashicorp/terraform/internal/backend/remote-state/s3" - backendCloud "github.com/hashicorp/terraform/internal/cloud" -) - -// backends is the list of available backends. This is a global variable -// because backends are currently hardcoded into Terraform and can't be -// modified without recompilation. -// -// To read an available backend, use the Backend function. This ensures -// safe concurrent read access to the list of built-in backends. -// -// Backends are hardcoded into Terraform because the API for backends uses -// complex structures and supporting that over the plugin system is currently -// prohibitively difficult. For those wanting to implement a custom backend, -// they can do so with recompilation. -var backends map[string]backend.InitFn -var backendsLock sync.Mutex - -// RemovedBackends is a record of previously supported backends which have -// since been deprecated and removed. -var RemovedBackends map[string]string - -// Init initializes the backends map with all our hardcoded backends. -func Init(services *disco.Disco) { - backendsLock.Lock() - defer backendsLock.Unlock() - - backends = map[string]backend.InitFn{ - "local": func() backend.Backend { return backendLocal.New() }, - "remote": func() backend.Backend { return backendRemote.New(services) }, - - // Remote State backends. - "azurerm": func() backend.Backend { return backendAzure.New() }, - "consul": func() backend.Backend { return backendConsul.New() }, - "cos": func() backend.Backend { return backendCos.New() }, - "gcs": func() backend.Backend { return backendGCS.New() }, - "http": func() backend.Backend { return backendHTTP.New() }, - "inmem": func() backend.Backend { return backendInmem.New() }, - "kubernetes": func() backend.Backend { return backendKubernetes.New() }, - "oss": func() backend.Backend { return backendOSS.New() }, - "pg": func() backend.Backend { return backendPg.New() }, - "s3": func() backend.Backend { return backendS3.New() }, - - // Terraform Cloud 'backend' - // This is an implementation detail only, used for the cloud package - "cloud": func() backend.Backend { return backendCloud.New(services) }, - } - - RemovedBackends = map[string]string{ - "artifactory": `The "artifactory" backend is not supported in Terraform v1.3 or later.`, - "azure": `The "azure" backend name has been removed, please use "azurerm".`, - "etcd": `The "etcd" backend is not supported in Terraform v1.3 or later.`, - "etcdv3": `The "etcdv3" backend is not supported in Terraform v1.3 or later.`, - "manta": `The "manta" backend is not supported in Terraform v1.3 or later.`, - "swift": `The "swift" backend is not supported in Terraform v1.3 or later.`, - } -} - -// Backend returns the initialization factory for the given backend, or -// nil if none exists. -func Backend(name string) backend.InitFn { - backendsLock.Lock() - defer backendsLock.Unlock() - return backends[name] -} - -// Set sets a new backend in the list of backends. If f is nil then the -// backend will be removed from the map. If this backend already exists -// then it will be overwritten. -// -// This method sets this backend globally and care should be taken to do -// this only before Terraform is executing to prevent odd behavior of backends -// changing mid-execution. -func Set(name string, f backend.InitFn) { - backendsLock.Lock() - defer backendsLock.Unlock() - - if f == nil { - delete(backends, name) - return - } - - backends[name] = f -} - -// deprecatedBackendShim is used to wrap a backend and inject a deprecation -// warning into the Validate method. -type deprecatedBackendShim struct { - backend.Backend - Message string -} - -// PrepareConfig delegates to the wrapped backend to validate its config -// and then appends shim's deprecation warning. -func (b deprecatedBackendShim) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { - newObj, diags := b.Backend.PrepareConfig(obj) - return newObj, diags.Append(tfdiags.SimpleWarning(b.Message)) -} - -// DeprecateBackend can be used to wrap a backend to retrun a deprecation -// warning during validation. -func deprecateBackend(b backend.Backend, message string) backend.Backend { - // Since a Backend wrapped by deprecatedBackendShim can no longer be - // asserted as an Enhanced or Local backend, disallow those types here - // entirely. If something other than a basic backend.Backend needs to be - // deprecated, we can add that functionality to schema.Backend or the - // backend itself. - if _, ok := b.(backend.Enhanced); ok { - panic("cannot use DeprecateBackend on an Enhanced Backend") - } - - if _, ok := b.(backend.Local); ok { - panic("cannot use DeprecateBackend on a Local Backend") - } - - return deprecatedBackendShim{ - Backend: b, - Message: message, - } -} diff --git a/internal/backend/local/backend.go b/internal/backend/local/backend.go deleted file mode 100644 index de0e62d48194..000000000000 --- a/internal/backend/local/backend.go +++ /dev/null @@ -1,489 +0,0 @@ -package local - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "sync" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -const ( - DefaultWorkspaceDir = "terraform.tfstate.d" - DefaultWorkspaceFile = "environment" - DefaultStateFilename = "terraform.tfstate" - DefaultBackupExtension = ".backup" -) - -// Local is an implementation of EnhancedBackend that performs all operations -// locally. This is the "default" backend and implements normal Terraform -// behavior as it is well known. -type Local struct { - // The State* paths are set from the backend config, and may be left blank - // to use the defaults. If the actual paths for the local backend state are - // needed, use the StatePaths method. - // - // StatePath is the local path where state is read from. - // - // StateOutPath is the local path where the state will be written. - // If this is empty, it will default to StatePath. - // - // StateBackupPath is the local path where a backup file will be written. - // Set this to "-" to disable state backup. - // - // StateWorkspaceDir is the path to the folder containing data for - // non-default workspaces. This defaults to DefaultWorkspaceDir if not set. - StatePath string - StateOutPath string - StateBackupPath string - StateWorkspaceDir string - - // The OverrideState* paths are set based on per-operation CLI arguments - // and will override what'd be built from the State* fields if non-empty. - // While the interpretation of the State* fields depends on the active - // workspace, the OverrideState* fields are always used literally. - OverrideStatePath string - OverrideStateOutPath string - OverrideStateBackupPath string - - // We only want to create a single instance of a local state, so store them - // here as they're loaded. - states map[string]statemgr.Full - - // Terraform context. Many of these will be overridden or merged by - // Operation. See Operation for more details. - ContextOpts *terraform.ContextOpts - - // OpInput will ask for necessary input prior to performing any operations. - // - // OpValidation will perform validation prior to running an operation. The - // variable naming doesn't match the style of others since we have a func - // Validate. - OpInput bool - OpValidation bool - - // Backend, if non-nil, will use this backend for non-enhanced behavior. - // This allows local behavior with remote state storage. It is a way to - // "upgrade" a non-enhanced backend to an enhanced backend with typical - // behavior. - // - // If this is nil, local performs normal state loading and storage. - Backend backend.Backend - - // opLock locks operations - opLock sync.Mutex -} - -var _ backend.Backend = (*Local)(nil) - -// New returns a new initialized local backend. -func New() *Local { - return NewWithBackend(nil) -} - -// NewWithBackend returns a new local backend initialized with a -// dedicated backend for non-enhanced behavior. -func NewWithBackend(backend backend.Backend) *Local { - return &Local{ - Backend: backend, - } -} - -func (b *Local) ConfigSchema() *configschema.Block { - if b.Backend != nil { - return b.Backend.ConfigSchema() - } - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "path": { - Type: cty.String, - Optional: true, - }, - "workspace_dir": { - Type: cty.String, - Optional: true, - }, - }, - } -} - -func (b *Local) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { - if b.Backend != nil { - return b.Backend.PrepareConfig(obj) - } - - var diags tfdiags.Diagnostics - - if val := obj.GetAttr("path"); !val.IsNull() { - p := val.AsString() - if p == "" { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid local state file path", - `The "path" attribute value must not be empty.`, - cty.Path{cty.GetAttrStep{Name: "path"}}, - )) - } - } - - if val := obj.GetAttr("workspace_dir"); !val.IsNull() { - p := val.AsString() - if p == "" { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid local workspace directory path", - `The "workspace_dir" attribute value must not be empty.`, - cty.Path{cty.GetAttrStep{Name: "workspace_dir"}}, - )) - } - } - - return obj, diags -} - -func (b *Local) Configure(obj cty.Value) tfdiags.Diagnostics { - if b.Backend != nil { - return b.Backend.Configure(obj) - } - - var diags tfdiags.Diagnostics - - if val := obj.GetAttr("path"); !val.IsNull() { - p := val.AsString() - b.StatePath = p - b.StateOutPath = p - } else { - b.StatePath = DefaultStateFilename - b.StateOutPath = DefaultStateFilename - } - - if val := obj.GetAttr("workspace_dir"); !val.IsNull() { - p := val.AsString() - b.StateWorkspaceDir = p - } else { - b.StateWorkspaceDir = DefaultWorkspaceDir - } - - return diags -} - -func (b *Local) Workspaces() ([]string, error) { - // If we have a backend handling state, defer to that. - if b.Backend != nil { - return b.Backend.Workspaces() - } - - // the listing always start with "default" - envs := []string{backend.DefaultStateName} - - entries, err := ioutil.ReadDir(b.stateWorkspaceDir()) - // no error if there's no envs configured - if os.IsNotExist(err) { - return envs, nil - } - if err != nil { - return nil, err - } - - var listed []string - for _, entry := range entries { - if entry.IsDir() { - listed = append(listed, filepath.Base(entry.Name())) - } - } - - sort.Strings(listed) - envs = append(envs, listed...) - - return envs, nil -} - -// DeleteWorkspace removes a workspace. -// -// The "default" workspace cannot be removed. -func (b *Local) DeleteWorkspace(name string, force bool) error { - // If we have a backend handling state, defer to that. - if b.Backend != nil { - return b.Backend.DeleteWorkspace(name, force) - } - - if name == "" { - return errors.New("empty state name") - } - - if name == backend.DefaultStateName { - return errors.New("cannot delete default state") - } - - delete(b.states, name) - return os.RemoveAll(filepath.Join(b.stateWorkspaceDir(), name)) -} - -func (b *Local) StateMgr(name string) (statemgr.Full, error) { - // If we have a backend handling state, delegate to that. - if b.Backend != nil { - return b.Backend.StateMgr(name) - } - - if s, ok := b.states[name]; ok { - return s, nil - } - - if err := b.createState(name); err != nil { - return nil, err - } - - statePath, stateOutPath, backupPath := b.StatePaths(name) - log.Printf("[TRACE] backend/local: state manager for workspace %q will:\n - read initial snapshot from %s\n - write new snapshots to %s\n - create any backup at %s", name, statePath, stateOutPath, backupPath) - - s := statemgr.NewFilesystemBetweenPaths(statePath, stateOutPath) - if backupPath != "" { - s.SetBackupPath(backupPath) - } - - if b.states == nil { - b.states = map[string]statemgr.Full{} - } - b.states[name] = s - return s, nil -} - -// Operation implements backend.Enhanced -// -// This will initialize an in-memory terraform.Context to perform the -// operation within this process. -// -// The given operation parameter will be merged with the ContextOpts on -// the structure with the following rules. If a rule isn't specified and the -// name conflicts, assume that the field is overwritten if set. -func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { - if op.View == nil { - panic("Operation called with nil View") - } - - // Determine the function to call for our operation - var f func(context.Context, context.Context, *backend.Operation, *backend.RunningOperation) - switch op.Type { - case backend.OperationTypeRefresh: - f = b.opRefresh - case backend.OperationTypePlan: - f = b.opPlan - case backend.OperationTypeApply: - f = b.opApply - default: - return nil, fmt.Errorf( - "unsupported operation type: %s\n\n"+ - "This is a bug in Terraform and should be reported. The local backend\n"+ - "is built-in to Terraform and should always support all operations.", - op.Type) - } - - // Lock - b.opLock.Lock() - - // Build our running operation - // the runninCtx is only used to block until the operation returns. - runningCtx, done := context.WithCancel(context.Background()) - runningOp := &backend.RunningOperation{ - Context: runningCtx, - } - - // stopCtx wraps the context passed in, and is used to signal a graceful Stop. - stopCtx, stop := context.WithCancel(ctx) - runningOp.Stop = stop - - // cancelCtx is used to cancel the operation immediately, usually - // indicating that the process is exiting. - cancelCtx, cancel := context.WithCancel(context.Background()) - runningOp.Cancel = cancel - - op.StateLocker = op.StateLocker.WithContext(stopCtx) - - // Do it - go func() { - defer logging.PanicHandler() - defer done() - defer stop() - defer cancel() - - defer b.opLock.Unlock() - f(stopCtx, cancelCtx, op, runningOp) - }() - - // Return - return runningOp, nil -} - -// opWait waits for the operation to complete, and a stop signal or a -// cancelation signal. -func (b *Local) opWait( - doneCh <-chan struct{}, - stopCtx context.Context, - cancelCtx context.Context, - tfCtx *terraform.Context, - opStateMgr statemgr.Persister, - view views.Operation) (canceled bool) { - // Wait for the operation to finish or for us to be interrupted so - // we can handle it properly. - select { - case <-stopCtx.Done(): - view.Stopping() - - // try to force a PersistState just in case the process is terminated - // before we can complete. - if err := opStateMgr.PersistState(nil); err != nil { - // We can't error out from here, but warn the user if there was an error. - // If this isn't transient, we will catch it again below, and - // attempt to save the state another way. - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error saving current state", - fmt.Sprintf(earlyStateWriteErrorFmt, err), - )) - view.Diagnostics(diags) - } - - // Stop execution - log.Println("[TRACE] backend/local: waiting for the running operation to stop") - go tfCtx.Stop() - - select { - case <-cancelCtx.Done(): - log.Println("[WARN] running operation was forcefully canceled") - // if the operation was canceled, we need to return immediately - canceled = true - case <-doneCh: - log.Println("[TRACE] backend/local: graceful stop has completed") - } - case <-cancelCtx.Done(): - // this should not be called without first attempting to stop the - // operation - log.Println("[ERROR] running operation canceled without Stop") - canceled = true - case <-doneCh: - } - return -} - -// StatePaths returns the StatePath, StateOutPath, and StateBackupPath as -// configured from the CLI. -func (b *Local) StatePaths(name string) (stateIn, stateOut, backupOut string) { - statePath := b.OverrideStatePath - stateOutPath := b.OverrideStateOutPath - backupPath := b.OverrideStateBackupPath - - isDefault := name == backend.DefaultStateName || name == "" - - baseDir := "" - if !isDefault { - baseDir = filepath.Join(b.stateWorkspaceDir(), name) - } - - if statePath == "" { - if isDefault { - statePath = b.StatePath // s.StatePath applies only to the default workspace, since StateWorkspaceDir is used otherwise - } - if statePath == "" { - statePath = filepath.Join(baseDir, DefaultStateFilename) - } - } - if stateOutPath == "" { - stateOutPath = statePath - } - if backupPath == "" { - backupPath = b.StateBackupPath - } - switch backupPath { - case "-": - backupPath = "" - case "": - backupPath = stateOutPath + DefaultBackupExtension - } - - return statePath, stateOutPath, backupPath -} - -// PathsConflictWith returns true if any state path used by a workspace in -// the receiver is the same as any state path used by the other given -// local backend instance. -// -// This should be used when "migrating" from one local backend configuration to -// another in order to avoid deleting the "old" state snapshots if they are -// in the same files as the "new" state snapshots. -func (b *Local) PathsConflictWith(other *Local) bool { - otherPaths := map[string]struct{}{} - otherWorkspaces, err := other.Workspaces() - if err != nil { - // If we can't enumerate the workspaces then we'll conservatively - // assume that paths _do_ overlap, since we can't be certain. - return true - } - for _, name := range otherWorkspaces { - p, _, _ := other.StatePaths(name) - otherPaths[p] = struct{}{} - } - - ourWorkspaces, err := other.Workspaces() - if err != nil { - // If we can't enumerate the workspaces then we'll conservatively - // assume that paths _do_ overlap, since we can't be certain. - return true - } - - for _, name := range ourWorkspaces { - p, _, _ := b.StatePaths(name) - if _, exists := otherPaths[p]; exists { - return true - } - } - return false -} - -// this only ensures that the named directory exists -func (b *Local) createState(name string) error { - if name == backend.DefaultStateName { - return nil - } - - stateDir := filepath.Join(b.stateWorkspaceDir(), name) - s, err := os.Stat(stateDir) - if err == nil && s.IsDir() { - // no need to check for os.IsNotExist, since that is covered by os.MkdirAll - // which will catch the other possible errors as well. - return nil - } - - err = os.MkdirAll(stateDir, 0755) - if err != nil { - return err - } - - return nil -} - -// stateWorkspaceDir returns the directory where state environments are stored. -func (b *Local) stateWorkspaceDir() string { - if b.StateWorkspaceDir != "" { - return b.StateWorkspaceDir - } - - return DefaultWorkspaceDir -} - -const earlyStateWriteErrorFmt = `Error: %s - -Terraform encountered an error attempting to save the state before cancelling the current operation. Once the operation is complete another attempt will be made to save the final state.` diff --git a/internal/backend/local/backend_apply.go b/internal/backend/local/backend_apply.go deleted file mode 100644 index 23d72fd97509..000000000000 --- a/internal/backend/local/backend_apply.go +++ /dev/null @@ -1,330 +0,0 @@ -package local - -import ( - "context" - "errors" - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// test hook called between plan+apply during opApply -var testHookStopPlanApply func() - -func (b *Local) opApply( - stopCtx context.Context, - cancelCtx context.Context, - op *backend.Operation, - runningOp *backend.RunningOperation) { - log.Printf("[INFO] backend/local: starting Apply operation") - - var diags, moreDiags tfdiags.Diagnostics - - // If we have a nil module at this point, then set it to an empty tree - // to avoid any potential crashes. - if op.PlanFile == nil && op.PlanMode != plans.DestroyMode && !op.HasConfig() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "No configuration files", - "Apply requires configuration to be present. Applying without a configuration "+ - "would mark everything for destruction, which is normally not what is desired. "+ - "If you would like to destroy everything, run 'terraform destroy' instead.", - )) - op.ReportResult(runningOp, diags) - return - } - - stateHook := new(StateHook) - op.Hooks = append(op.Hooks, stateHook) - - // Get our context - lr, _, opState, contextDiags := b.localRun(op) - diags = diags.Append(contextDiags) - if contextDiags.HasErrors() { - op.ReportResult(runningOp, diags) - return - } - // the state was locked during successful context creation; unlock the state - // when the operation completes - defer func() { - diags := op.StateLocker.Unlock() - if diags.HasErrors() { - op.View.Diagnostics(diags) - runningOp.Result = backend.OperationFailure - } - }() - - // We'll start off with our result being the input state, and replace it - // with the result state only if we eventually complete the apply - // operation. - runningOp.State = lr.InputState - - schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - op.ReportResult(runningOp, diags) - return - } - - var plan *plans.Plan - // If we weren't given a plan, then we refresh/plan - if op.PlanFile == nil { - // Perform the plan - log.Printf("[INFO] backend/local: apply calling Plan") - plan, moreDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - // If Terraform Core generated a partial plan despite the errors - // then we'll make a best effort to render it. Terraform Core - // promises that if it returns a non-nil plan along with errors - // then the plan won't necessarily contain all of the needed - // actions but that any it does include will be properly-formed. - // plan.Errored will be true in this case, which our plan - // renderer can rely on to tailor its messaging. - if plan != nil && (len(plan.Changes.Resources) != 0 || len(plan.Changes.Outputs) != 0) { - schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) - // If schema loading returns errors then we'll just give up and - // ignore them to avoid distracting from the plan-time errors we're - // mainly trying to report here. - if !moreDiags.HasErrors() { - op.View.Plan(plan, schemas) - } - } - op.ReportResult(runningOp, diags) - return - } - - trivialPlan := !plan.CanApply() - hasUI := op.UIOut != nil && op.UIIn != nil - mustConfirm := hasUI && !op.AutoApprove && !trivialPlan - op.View.Plan(plan, schemas) - - if testHookStopPlanApply != nil { - testHookStopPlanApply() - } - - // Check if we've been stopped before going through confirmation, or - // skipping confirmation in the case of -auto-approve. - // This can currently happen if a single stop request was received - // during the final batch of resource plan calls, so no operations were - // forced to abort, and no errors were returned from Plan. - if stopCtx.Err() != nil { - diags = diags.Append(errors.New("execution halted")) - runningOp.Result = backend.OperationFailure - op.ReportResult(runningOp, diags) - return - } - - if mustConfirm { - var desc, query string - switch op.PlanMode { - case plans.DestroyMode: - if op.Workspace != "default" { - query = "Do you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" - } else { - query = "Do you really want to destroy all resources?" - } - desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" + - "There is no undo. Only 'yes' will be accepted to confirm." - case plans.RefreshOnlyMode: - if op.Workspace != "default" { - query = "Would you like to update the Terraform state for \"" + op.Workspace + "\" to reflect these detected changes?" - } else { - query = "Would you like to update the Terraform state to reflect these detected changes?" - } - desc = "Terraform will write these changes to the state without modifying any real infrastructure.\n" + - "There is no undo. Only 'yes' will be accepted to confirm." - default: - if op.Workspace != "default" { - query = "Do you want to perform these actions in workspace \"" + op.Workspace + "\"?" - } else { - query = "Do you want to perform these actions?" - } - desc = "Terraform will perform the actions described above.\n" + - "Only 'yes' will be accepted to approve." - } - - // We'll show any accumulated warnings before we display the prompt, - // so the user can consider them when deciding how to answer. - if len(diags) > 0 { - op.View.Diagnostics(diags) - diags = nil // reset so we won't show the same diagnostics again later - } - - v, err := op.UIIn.Input(stopCtx, &terraform.InputOpts{ - Id: "approve", - Query: "\n" + query, - Description: desc, - }) - if err != nil { - diags = diags.Append(fmt.Errorf("error asking for approval: %w", err)) - op.ReportResult(runningOp, diags) - return - } - if v != "yes" { - op.View.Cancelled(op.PlanMode) - runningOp.Result = backend.OperationFailure - return - } - } - } else { - plan = lr.Plan - if plan.Errored { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot apply incomplete plan", - "Terraform encountered an error when generating this plan, so it cannot be applied.", - )) - op.ReportResult(runningOp, diags) - return - } - for _, change := range plan.Changes.Resources { - if change.Action != plans.NoOp { - op.View.PlannedChange(change) - } - } - } - - // Set up our hook for continuous state updates - stateHook.StateMgr = opState - - // Start the apply in a goroutine so that we can be interrupted. - var applyState *states.State - var applyDiags tfdiags.Diagnostics - doneCh := make(chan struct{}) - go func() { - defer logging.PanicHandler() - defer close(doneCh) - log.Printf("[INFO] backend/local: apply calling Apply") - applyState, applyDiags = lr.Core.Apply(plan, lr.Config) - }() - - if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) { - return - } - diags = diags.Append(applyDiags) - - // Even on error with an empty state, the state value should not be nil. - // Return early here to prevent corrupting any existing state. - if diags.HasErrors() && applyState == nil { - log.Printf("[ERROR] backend/local: apply returned nil state") - op.ReportResult(runningOp, diags) - return - } - - // Store the final state - runningOp.State = applyState - err := statemgr.WriteAndPersist(opState, applyState, schemas) - if err != nil { - // Export the state file from the state manager and assign the new - // state. This is needed to preserve the existing serial and lineage. - stateFile := statemgr.Export(opState) - if stateFile == nil { - stateFile = &statefile.File{} - } - stateFile.State = applyState - - diags = diags.Append(b.backupStateForError(stateFile, err, op.View)) - op.ReportResult(runningOp, diags) - return - } - - if applyDiags.HasErrors() { - op.ReportResult(runningOp, diags) - return - } - - // If we've accumulated any warnings along the way then we'll show them - // here just before we show the summary and next steps. If we encountered - // errors then we would've returned early at some other point above. - op.View.Diagnostics(diags) -} - -// backupStateForError is called in a scenario where we're unable to persist the -// state for some reason, and will attempt to save a backup copy of the state -// to local disk to help the user recover. This is a "last ditch effort" sort -// of thing, so we really don't want to end up in this codepath; we should do -// everything we possibly can to get the state saved _somewhere_. -func (b *Local) backupStateForError(stateFile *statefile.File, err error, view views.Operation) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to save state", - fmt.Sprintf("Error saving state: %s", err), - )) - - local := statemgr.NewFilesystem("errored.tfstate") - writeErr := local.WriteStateForMigration(stateFile, true) - if writeErr != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create local state file", - fmt.Sprintf("Error creating local state file for recovery: %s", writeErr), - )) - - // To avoid leaving the user with no state at all, our last resort - // is to print the JSON state out onto the terminal. This is an awful - // UX, so we should definitely avoid doing this if at all possible, - // but at least the user has _some_ path to recover if we end up - // here for some reason. - if dumpErr := view.EmergencyDumpState(stateFile); dumpErr != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize state", - fmt.Sprintf(stateWriteFatalErrorFmt, dumpErr), - )) - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to persist state to backend", - stateWriteConsoleFallbackError, - )) - return diags - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to persist state to backend", - stateWriteBackedUpError, - )) - - return diags -} - -const stateWriteBackedUpError = `The error shown above has prevented Terraform from writing the updated state to the configured backend. To allow for recovery, the state has been written to the file "errored.tfstate" in the current working directory. - -Running "terraform apply" again at this point will create a forked state, making it harder to recover. - -To retry writing this state, use the following command: - terraform state push errored.tfstate -` - -const stateWriteConsoleFallbackError = `The errors shown above prevented Terraform from writing the updated state to -the configured backend and from creating a local backup file. As a fallback, -the raw state data is printed above as a JSON object. - -To retry writing this state, copy the state data (from the first { to the last } inclusive) and save it into a local file called errored.tfstate, then run the following command: - terraform state push errored.tfstate -` - -const stateWriteFatalErrorFmt = `Failed to save state after apply. - -Error serializing state: %s - -A catastrophic error has prevented Terraform from persisting the state file or creating a backup. Unfortunately this means that the record of any resources created during this apply has been lost, and such resources may exist outside of Terraform's management. - -For resources that support import, it is possible to recover by manually importing each resource using its id from the target system. - -This is a serious bug in Terraform and should be reported. -` diff --git a/internal/backend/local/backend_apply_test.go b/internal/backend/local/backend_apply_test.go deleted file mode 100644 index 493000a802b3..000000000000 --- a/internal/backend/local/backend_apply_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package local - -import ( - "context" - "errors" - "os" - "path/filepath" - "strings" - "sync" - "testing" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestLocal_applyBasic(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", applyFixtureSchema()) - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - "ami": cty.StringVal("bar"), - })} - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("operation failed") - } - - if p.ReadResourceCalled { - t.Fatal("ReadResource should not be called") - } - - if !p.PlanResourceChangeCalled { - t.Fatal("diff should be called") - } - - if !p.ApplyResourceChangeCalled { - t.Fatal("apply should be called") - } - - checkState(t, b.StateOutPath, ` -test_instance.foo: - ID = yes - provider = provider["registry.terraform.io/hashicorp/test"] - ami = bar -`) - - if errOutput := done(t).Stderr(); errOutput != "" { - t.Fatalf("unexpected error output:\n%s", errOutput) - } -} - -func TestLocal_applyEmptyDir(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})} - - op, configCleanup, done := testOperationApply(t, "./testdata/empty") - defer configCleanup() - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("operation succeeded; want error") - } - - if p.ApplyResourceChangeCalled { - t.Fatal("apply should not be called") - } - - if _, err := os.Stat(b.StateOutPath); err == nil { - t.Fatal("should not exist") - } - - // the backend should be unlocked after a run - assertBackendStateUnlocked(t, b) - - if got, want := done(t).Stderr(), "Error: No configuration files"; !strings.Contains(got, want) { - t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) - } -} - -func TestLocal_applyEmptyDirDestroy(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{} - - op, configCleanup, done := testOperationApply(t, "./testdata/empty") - defer configCleanup() - op.PlanMode = plans.DestroyMode - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("apply operation failed") - } - - if p.ApplyResourceChangeCalled { - t.Fatal("apply should not be called") - } - - checkState(t, b.StateOutPath, ``) - - if errOutput := done(t).Stderr(); errOutput != "" { - t.Fatalf("unexpected error output:\n%s", errOutput) - } -} - -func TestLocal_applyError(t *testing.T) { - b := TestLocal(t) - - schema := &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - } - p := TestLocalProvider(t, b, "test", schema) - - var lock sync.Mutex - errored := false - p.ApplyResourceChangeFn = func( - r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - - lock.Lock() - defer lock.Unlock() - var diags tfdiags.Diagnostics - - ami := r.Config.GetAttr("ami").AsString() - if !errored && ami == "error" { - errored = true - diags = diags.Append(errors.New("ami error")) - return providers.ApplyResourceChangeResponse{ - Diagnostics: diags, - } - } - return providers.ApplyResourceChangeResponse{ - Diagnostics: diags, - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - "ami": cty.StringVal("bar"), - }), - } - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-error") - defer configCleanup() - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("operation succeeded; want failure") - } - - checkState(t, b.StateOutPath, ` -test_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/test"] - ami = bar - `) - - // the backend should be unlocked after a run - assertBackendStateUnlocked(t, b) - - if got, want := done(t).Stderr(), "Error: ami error"; !strings.Contains(got, want) { - t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) - } -} - -func TestLocal_applyBackendFail(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", applyFixtureSchema()) - - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - "ami": cty.StringVal("bar"), - }), - Diagnostics: tfdiags.Diagnostics.Append(nil, errors.New("error before backend failure")), - } - - wd, err := os.Getwd() - if err != nil { - t.Fatalf("failed to get current working directory") - } - err = os.Chdir(filepath.Dir(b.StatePath)) - if err != nil { - t.Fatalf("failed to set temporary working directory") - } - defer os.Chdir(wd) - - op, configCleanup, done := testOperationApply(t, wd+"/testdata/apply") - defer configCleanup() - - b.Backend = &backendWithFailingState{} - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - - output := done(t) - - if run.Result == backend.OperationSuccess { - t.Fatalf("apply succeeded; want error") - } - - diagErr := output.Stderr() - - if !strings.Contains(diagErr, "Error saving state: fake failure") { - t.Fatalf("missing \"fake failure\" message in diags:\n%s", diagErr) - } - - if !strings.Contains(diagErr, "error before backend failure") { - t.Fatalf("missing 'error before backend failure' diagnostic from apply") - } - - // The fallback behavior should've created a file errored.tfstate in the - // current working directory. - checkState(t, "errored.tfstate", ` -test_instance.foo: (tainted) - ID = yes - provider = provider["registry.terraform.io/hashicorp/test"] - ami = bar - `) - - // the backend should be unlocked after a run - assertBackendStateUnlocked(t, b) -} - -func TestLocal_applyRefreshFalse(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, testPlanState()) - - op, configCleanup, done := testOperationApply(t, "./testdata/plan") - defer configCleanup() - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - if p.ReadResourceCalled { - t.Fatal("ReadResource should not be called") - } - - if errOutput := done(t).Stderr(); errOutput != "" { - t.Fatalf("unexpected error output:\n%s", errOutput) - } -} - -type backendWithFailingState struct { - Local -} - -func (b *backendWithFailingState) StateMgr(name string) (statemgr.Full, error) { - return &failingState{ - statemgr.NewFilesystem("failing-state.tfstate"), - }, nil -} - -type failingState struct { - *statemgr.Filesystem -} - -func (s failingState) WriteState(state *states.State) error { - return errors.New("fake failure") -} - -func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - - // Many of our tests use an overridden "test" provider that's just in-memory - // inside the test process, not a separate plugin on disk. - depLocks := depsfile.NewLocks() - depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/test")) - - return &backend.Operation{ - Type: backend.OperationTypeApply, - ConfigDir: configDir, - ConfigLoader: configLoader, - StateLocker: clistate.NewNoopLocker(), - View: view, - DependencyLocks: depLocks, - }, configCleanup, done -} - -// applyFixtureSchema returns a schema suitable for processing the -// configuration in testdata/apply . This schema should be -// assigned to a mock provider named "test". -func applyFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - } -} - -func TestApply_applyCanceledAutoApprove(t *testing.T) { - b := TestLocal(t) - - TestLocalProvider(t, b, "test", applyFixtureSchema()) - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - op.AutoApprove = true - defer configCleanup() - defer func() { - output := done(t) - if !strings.Contains(output.Stderr(), "execution halted") { - t.Fatal("expected 'execution halted', got:\n", output.All()) - } - }() - - ctx, cancel := context.WithCancel(context.Background()) - testHookStopPlanApply = cancel - defer func() { - testHookStopPlanApply = nil - }() - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - -} diff --git a/internal/backend/local/backend_plan.go b/internal/backend/local/backend_plan.go deleted file mode 100644 index 52aaad11e04d..000000000000 --- a/internal/backend/local/backend_plan.go +++ /dev/null @@ -1,183 +0,0 @@ -package local - -import ( - "context" - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func (b *Local) opPlan( - stopCtx context.Context, - cancelCtx context.Context, - op *backend.Operation, - runningOp *backend.RunningOperation) { - - log.Printf("[INFO] backend/local: starting Plan operation") - - var diags tfdiags.Diagnostics - - if op.PlanFile != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Can't re-plan a saved plan", - "The plan command was given a saved plan file as its input. This command generates "+ - "a new plan, and so it requires a configuration directory as its argument.", - )) - op.ReportResult(runningOp, diags) - return - } - - // Local planning requires a config, unless we're planning to destroy. - if op.PlanMode != plans.DestroyMode && !op.HasConfig() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "No configuration files", - "Plan requires configuration to be present. Planning without a configuration would "+ - "mark everything for destruction, which is normally not what is desired. If you "+ - "would like to destroy everything, run plan with the -destroy option. Otherwise, "+ - "create a Terraform configuration file (.tf file) and try again.", - )) - op.ReportResult(runningOp, diags) - return - } - - if b.ContextOpts == nil { - b.ContextOpts = new(terraform.ContextOpts) - } - - // Get our context - lr, configSnap, opState, ctxDiags := b.localRun(op) - diags = diags.Append(ctxDiags) - if ctxDiags.HasErrors() { - op.ReportResult(runningOp, diags) - return - } - // the state was locked during succesfull context creation; unlock the state - // when the operation completes - defer func() { - diags := op.StateLocker.Unlock() - if diags.HasErrors() { - op.View.Diagnostics(diags) - runningOp.Result = backend.OperationFailure - } - }() - - // Since planning doesn't immediately change the persisted state, the - // resulting state is always just the input state. - runningOp.State = lr.InputState - - // Perform the plan in a goroutine so we can be interrupted - var plan *plans.Plan - var planDiags tfdiags.Diagnostics - doneCh := make(chan struct{}) - go func() { - defer logging.PanicHandler() - defer close(doneCh) - log.Printf("[INFO] backend/local: plan calling Plan") - plan, planDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts) - }() - - if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) { - // If we get in here then the operation was cancelled, which is always - // considered to be a failure. - log.Printf("[INFO] backend/local: plan operation was force-cancelled by interrupt") - runningOp.Result = backend.OperationFailure - return - } - log.Printf("[INFO] backend/local: plan operation completed") - - // NOTE: We intentionally don't stop here on errors because we always want - // to try to present a partial plan report and, if the user chose to, - // generate a partial saved plan file for external analysis. - diags = diags.Append(planDiags) - - // Even if there are errors we need to handle anything that may be - // contained within the plan, so only exit if there is no data at all. - if plan == nil { - runningOp.PlanEmpty = true - op.ReportResult(runningOp, diags) - return - } - - // Record whether this plan includes any side-effects that could be applied. - runningOp.PlanEmpty = !plan.CanApply() - - // Save the plan to disk - if path := op.PlanOutPath; path != "" { - if op.PlanOutBackend == nil { - // This is always a bug in the operation caller; it's not valid - // to set PlanOutPath without also setting PlanOutBackend. - diags = diags.Append(fmt.Errorf( - "PlanOutPath set without also setting PlanOutBackend (this is a bug in Terraform)"), - ) - op.ReportResult(runningOp, diags) - return - } - plan.Backend = *op.PlanOutBackend - - // We may have updated the state in the refresh step above, but we - // will freeze that updated state in the plan file for now and - // only write it if this plan is subsequently applied. - plannedStateFile := statemgr.PlannedStateUpdate(opState, plan.PriorState) - - // We also include a file containing the state as it existed before - // we took any action at all, but this one isn't intended to ever - // be saved to the backend (an equivalent snapshot should already be - // there) and so we just use a stub state file header in this case. - // NOTE: This won't be exactly identical to the latest state snapshot - // in the backend because it's still been subject to state upgrading - // to make it consumable by the current Terraform version, and - // intentionally doesn't preserve the header info. - prevStateFile := &statefile.File{ - State: plan.PrevRunState, - } - - log.Printf("[INFO] backend/local: writing plan output to: %s", path) - err := planfile.Create(path, planfile.CreateArgs{ - ConfigSnapshot: configSnap, - PreviousRunStateFile: prevStateFile, - StateFile: plannedStateFile, - Plan: plan, - DependencyLocks: op.DependencyLocks, - }) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write plan file", - fmt.Sprintf("The plan file could not be written: %s.", err), - )) - op.ReportResult(runningOp, diags) - return - } - } - - // Render the plan, if we produced one. - // (This might potentially be a partial plan with Errored set to true) - schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - op.ReportResult(runningOp, diags) - return - } - op.View.Plan(plan, schemas) - - // If we've accumulated any diagnostics along the way then we'll show them - // here just before we show the summary and next steps. This can potentially - // include errors, because we intentionally try to show a partial plan - // above even if Terraform Core encountered an error partway through - // creating it. - op.ReportResult(runningOp, diags) - - if !runningOp.PlanEmpty { - op.View.PlanNextStep(op.PlanOutPath) - } -} diff --git a/internal/backend/local/backend_plan_test.go b/internal/backend/local/backend_plan_test.go deleted file mode 100644 index 9dbed9953213..000000000000 --- a/internal/backend/local/backend_plan_test.go +++ /dev/null @@ -1,906 +0,0 @@ -package local - -import ( - "context" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/zclconf/go-cty/cty" -) - -func TestLocal_planBasic(t *testing.T) { - b := TestLocal(t) - p := TestLocalProvider(t, b, "test", planFixtureSchema()) - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - op.PlanRefresh = true - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - if !p.PlanResourceChangeCalled { - t.Fatal("PlanResourceChange should be called") - } - - // the backend should be unlocked after a run - assertBackendStateUnlocked(t, b) - - if errOutput := done(t).Stderr(); errOutput != "" { - t.Fatalf("unexpected error output:\n%s", errOutput) - } -} - -func TestLocal_planInAutomation(t *testing.T) { - b := TestLocal(t) - TestLocalProvider(t, b, "test", planFixtureSchema()) - - const msg = `You didn't use the -out option` - - // When we're "in automation" we omit certain text from the plan output. - // However, the responsibility for this omission is in the view, so here we - // test for its presence while the "in automation" setting is false, to - // validate that we are calling the correct view method. - // - // Ideally this test would be replaced by a call-logging mock view, but - // that's future work. - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - op.PlanRefresh = true - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - if output := done(t).Stdout(); !strings.Contains(output, msg) { - t.Fatalf("missing next-steps message when not in automation\nwant: %s\noutput:\n%s", msg, output) - } -} - -func TestLocal_planNoConfig(t *testing.T) { - b := TestLocal(t) - TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) - - op, configCleanup, done := testOperationPlan(t, "./testdata/empty") - defer configCleanup() - op.PlanRefresh = true - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - - output := done(t) - - if run.Result == backend.OperationSuccess { - t.Fatal("plan operation succeeded; want failure") - } - - if stderr := output.Stderr(); !strings.Contains(stderr, "No configuration files") { - t.Fatalf("bad: %s", stderr) - } - - // the backend should be unlocked after a run - assertBackendStateUnlocked(t, b) -} - -// This test validates the state lacking behavior when the inner call to -// Context() fails -func TestLocal_plan_context_error(t *testing.T) { - b := TestLocal(t) - - // This is an intentionally-invalid value to make terraform.NewContext fail - // when b.Operation calls it. - // NOTE: This test was originally using a provider initialization failure - // as its forced error condition, but terraform.NewContext is no longer - // responsible for checking that. Invalid parallelism is the last situation - // where terraform.NewContext can return error diagnostics, and arguably - // we should be validating this argument at the UI layer anyway, so perhaps - // in future we'll make terraform.NewContext never return errors and then - // this test will become redundant, because its purpose is specifically - // to test that we properly unlock the state if terraform.NewContext - // returns an error. - if b.ContextOpts == nil { - b.ContextOpts = &terraform.ContextOpts{} - } - b.ContextOpts.Parallelism = -1 - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - // we coerce a failure in Context() by omitting the provider schema - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationFailure { - t.Fatalf("plan operation succeeded") - } - - // the backend should be unlocked after a run - assertBackendStateUnlocked(t, b) - - if got, want := done(t).Stderr(), "Error: Invalid parallelism value"; !strings.Contains(got, want) { - t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) - } -} - -func TestLocal_planOutputsChanged(t *testing.T) { - b := TestLocal(t) - testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { - ss.SetOutputValue(addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance, - OutputValue: addrs.OutputValue{Name: "changed"}, - }, cty.StringVal("before"), false) - ss.SetOutputValue(addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance, - OutputValue: addrs.OutputValue{Name: "sensitive_before"}, - }, cty.StringVal("before"), true) - ss.SetOutputValue(addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance, - OutputValue: addrs.OutputValue{Name: "sensitive_after"}, - }, cty.StringVal("before"), false) - ss.SetOutputValue(addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance, - OutputValue: addrs.OutputValue{Name: "removed"}, // not present in the config fixture - }, cty.StringVal("before"), false) - ss.SetOutputValue(addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance, - OutputValue: addrs.OutputValue{Name: "unchanged"}, - }, cty.StringVal("before"), false) - // NOTE: This isn't currently testing the situation where the new - // value of an output is unknown, because to do that requires there to - // be at least one managed resource Create action in the plan and that - // would defeat the point of this test, which is to ensure that a - // plan containing only output changes is considered "non-empty". - // For now we're not too worried about testing the "new value is - // unknown" situation because that's already common for printing out - // resource changes and we already have many tests for that. - })) - outDir := t.TempDir() - defer os.RemoveAll(outDir) - planPath := filepath.Join(outDir, "plan.tfplan") - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-outputs-changed") - defer configCleanup() - op.PlanRefresh = true - op.PlanOutPath = planPath - cfg := cty.ObjectVal(map[string]cty.Value{ - "path": cty.StringVal(b.StatePath), - }) - cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) - if err != nil { - t.Fatal(err) - } - op.PlanOutBackend = &plans.Backend{ - // Just a placeholder so that we can generate a valid plan file. - Type: "local", - Config: cfgRaw, - } - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - if run.PlanEmpty { - t.Error("plan should not be empty") - } - - expectedOutput := strings.TrimSpace(` -Changes to Outputs: - + added = "after" - ~ changed = "before" -> "after" - - removed = "before" -> null - ~ sensitive_after = (sensitive value) - ~ sensitive_before = (sensitive value) - -You can apply this plan to save these new output values to the Terraform -state, without changing any real infrastructure. -`) - - if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { - t.Errorf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput) - } -} - -// Module outputs should not cause the plan to be rendered -func TestLocal_planModuleOutputsChanged(t *testing.T) { - b := TestLocal(t) - testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { - ss.SetOutputValue(addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance.Child("mod", addrs.NoKey), - OutputValue: addrs.OutputValue{Name: "changed"}, - }, cty.StringVal("before"), false) - })) - outDir := t.TempDir() - defer os.RemoveAll(outDir) - planPath := filepath.Join(outDir, "plan.tfplan") - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-module-outputs-changed") - defer configCleanup() - op.PlanRefresh = true - op.PlanOutPath = planPath - cfg := cty.ObjectVal(map[string]cty.Value{ - "path": cty.StringVal(b.StatePath), - }) - cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) - if err != nil { - t.Fatal(err) - } - op.PlanOutBackend = &plans.Backend{ - Type: "local", - Config: cfgRaw, - } - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - if !run.PlanEmpty { - t.Fatal("plan should be empty") - } - - expectedOutput := strings.TrimSpace(` -No changes. Your infrastructure matches the configuration. -`) - if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { - t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput) - } -} - -func TestLocal_planTainted(t *testing.T) { - b := TestLocal(t) - p := TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, testPlanState_tainted()) - outDir := t.TempDir() - planPath := filepath.Join(outDir, "plan.tfplan") - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - op.PlanRefresh = true - op.PlanOutPath = planPath - cfg := cty.ObjectVal(map[string]cty.Value{ - "path": cty.StringVal(b.StatePath), - }) - cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) - if err != nil { - t.Fatal(err) - } - op.PlanOutBackend = &plans.Backend{ - // Just a placeholder so that we can generate a valid plan file. - Type: "local", - Config: cfgRaw, - } - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - if !p.ReadResourceCalled { - t.Fatal("ReadResource should be called") - } - if run.PlanEmpty { - t.Fatal("plan should not be empty") - } - - expectedOutput := `Terraform used the selected providers to generate the following execution -plan. Resource actions are indicated with the following symbols: --/+ destroy and then create replacement - -Terraform will perform the following actions: - - # test_instance.foo is tainted, so must be replaced --/+ resource "test_instance" "foo" { - # (1 unchanged attribute hidden) - - # (1 unchanged block hidden) - } - -Plan: 1 to add, 0 to change, 1 to destroy.` - if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { - t.Fatalf("Unexpected output\ngot\n%s\n\nwant:\n%s", output, expectedOutput) - } -} - -func TestLocal_planDeposedOnly(t *testing.T) { - b := TestLocal(t) - p := TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { - ss.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - states.DeposedKey("00000000"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{ - "ami": "bar", - "network_interface": [{ - "device_index": 0, - "description": "Main network interface" - }] - }`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - })) - outDir := t.TempDir() - planPath := filepath.Join(outDir, "plan.tfplan") - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - op.PlanRefresh = true - op.PlanOutPath = planPath - cfg := cty.ObjectVal(map[string]cty.Value{ - "path": cty.StringVal(b.StatePath), - }) - cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) - if err != nil { - t.Fatal(err) - } - op.PlanOutBackend = &plans.Backend{ - // Just a placeholder so that we can generate a valid plan file. - Type: "local", - Config: cfgRaw, - } - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - if !p.ReadResourceCalled { - t.Fatal("ReadResource should've been called to refresh the deposed object") - } - if run.PlanEmpty { - t.Fatal("plan should not be empty") - } - - // The deposed object and the current object are distinct, so our - // plan includes separate actions for each of them. This strange situation - // is not common: it should arise only if Terraform fails during - // a create-before-destroy when the create hasn't completed yet but - // in a severe way that prevents the previous object from being restored - // as "current". - // - // However, that situation was more common in some earlier Terraform - // versions where deposed objects were not managed properly, so this - // can arise when upgrading from an older version with deposed objects - // already in the state. - // - // This is one of the few cases where we expose the idea of "deposed" in - // the UI, including the user-unfriendly "deposed key" (00000000 in this - // case) just so that users can correlate this with what they might - // see in `terraform show` and in the subsequent apply output, because - // it's also possible for there to be _multiple_ deposed objects, in the - // unlikely event that create_before_destroy _keeps_ crashing across - // subsequent runs. - expectedOutput := `Terraform used the selected providers to generate the following execution -plan. Resource actions are indicated with the following symbols: - + create - - destroy - -Terraform will perform the following actions: - - # test_instance.foo will be created - + resource "test_instance" "foo" { - + ami = "bar" - - + network_interface { - + description = "Main network interface" - + device_index = 0 - } - } - - # test_instance.foo (deposed object 00000000) will be destroyed - # (left over from a partially-failed replacement of this instance) - - resource "test_instance" "foo" { - - ami = "bar" -> null - - - network_interface { - - description = "Main network interface" -> null - - device_index = 0 -> null - } - } - -Plan: 1 to add, 0 to change, 1 to destroy.` - if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { - t.Fatalf("Unexpected output:\n%s", output) - } -} - -func TestLocal_planTainted_createBeforeDestroy(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, testPlanState_tainted()) - outDir := t.TempDir() - planPath := filepath.Join(outDir, "plan.tfplan") - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cbd") - defer configCleanup() - op.PlanRefresh = true - op.PlanOutPath = planPath - cfg := cty.ObjectVal(map[string]cty.Value{ - "path": cty.StringVal(b.StatePath), - }) - cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) - if err != nil { - t.Fatal(err) - } - op.PlanOutBackend = &plans.Backend{ - // Just a placeholder so that we can generate a valid plan file. - Type: "local", - Config: cfgRaw, - } - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - if !p.ReadResourceCalled { - t.Fatal("ReadResource should be called") - } - if run.PlanEmpty { - t.Fatal("plan should not be empty") - } - - expectedOutput := `Terraform used the selected providers to generate the following execution -plan. Resource actions are indicated with the following symbols: -+/- create replacement and then destroy - -Terraform will perform the following actions: - - # test_instance.foo is tainted, so must be replaced -+/- resource "test_instance" "foo" { - # (1 unchanged attribute hidden) - - # (1 unchanged block hidden) - } - -Plan: 1 to add, 0 to change, 1 to destroy.` - if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { - t.Fatalf("Unexpected output:\n%s", output) - } -} - -func TestLocal_planRefreshFalse(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, testPlanState()) - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - if p.ReadResourceCalled { - t.Fatal("ReadResource should not be called") - } - - if !run.PlanEmpty { - t.Fatal("plan should be empty") - } - - if errOutput := done(t).Stderr(); errOutput != "" { - t.Fatalf("unexpected error output:\n%s", errOutput) - } -} - -func TestLocal_planDestroy(t *testing.T) { - b := TestLocal(t) - - TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, testPlanState()) - - outDir := t.TempDir() - planPath := filepath.Join(outDir, "plan.tfplan") - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - op.PlanMode = plans.DestroyMode - op.PlanRefresh = true - op.PlanOutPath = planPath - cfg := cty.ObjectVal(map[string]cty.Value{ - "path": cty.StringVal(b.StatePath), - }) - cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) - if err != nil { - t.Fatal(err) - } - op.PlanOutBackend = &plans.Backend{ - // Just a placeholder so that we can generate a valid plan file. - Type: "local", - Config: cfgRaw, - } - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - if run.PlanEmpty { - t.Fatal("plan should not be empty") - } - - plan := testReadPlan(t, planPath) - for _, r := range plan.Changes.Resources { - if r.Action.String() != "Delete" { - t.Fatalf("bad: %#v", r.Action.String()) - } - } - - if errOutput := done(t).Stderr(); errOutput != "" { - t.Fatalf("unexpected error output:\n%s", errOutput) - } -} - -func TestLocal_planDestroy_withDataSources(t *testing.T) { - b := TestLocal(t) - - TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, testPlanState_withDataSource()) - - outDir := t.TempDir() - planPath := filepath.Join(outDir, "plan.tfplan") - - op, configCleanup, done := testOperationPlan(t, "./testdata/destroy-with-ds") - defer configCleanup() - op.PlanMode = plans.DestroyMode - op.PlanRefresh = true - op.PlanOutPath = planPath - cfg := cty.ObjectVal(map[string]cty.Value{ - "path": cty.StringVal(b.StatePath), - }) - cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) - if err != nil { - t.Fatal(err) - } - op.PlanOutBackend = &plans.Backend{ - // Just a placeholder so that we can generate a valid plan file. - Type: "local", - Config: cfgRaw, - } - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - if run.PlanEmpty { - t.Fatal("plan should not be empty") - } - - // Data source should still exist in the the plan file - plan := testReadPlan(t, planPath) - if len(plan.Changes.Resources) != 2 { - t.Fatalf("Expected exactly 1 resource for destruction, %d given: %q", - len(plan.Changes.Resources), getAddrs(plan.Changes.Resources)) - } - - // Data source should not be rendered in the output - expectedOutput := `Terraform will perform the following actions: - - # test_instance.foo[0] will be destroyed - - resource "test_instance" "foo" { - - ami = "bar" -> null - - - network_interface { - - description = "Main network interface" -> null - - device_index = 0 -> null - } - } - -Plan: 0 to add, 0 to change, 1 to destroy.` - - if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { - t.Fatalf("Unexpected output:\n%s", output) - } -} - -func getAddrs(resources []*plans.ResourceInstanceChangeSrc) []string { - addrs := make([]string, len(resources)) - for i, r := range resources { - addrs[i] = r.Addr.String() - } - return addrs -} - -func TestLocal_planOutPathNoChange(t *testing.T) { - b := TestLocal(t) - TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, testPlanState()) - - outDir := t.TempDir() - planPath := filepath.Join(outDir, "plan.tfplan") - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - op.PlanOutPath = planPath - cfg := cty.ObjectVal(map[string]cty.Value{ - "path": cty.StringVal(b.StatePath), - }) - cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) - if err != nil { - t.Fatal(err) - } - op.PlanOutBackend = &plans.Backend{ - // Just a placeholder so that we can generate a valid plan file. - Type: "local", - Config: cfgRaw, - } - op.PlanRefresh = true - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - plan := testReadPlan(t, planPath) - - if !plan.Changes.Empty() { - t.Fatalf("expected empty plan to be written") - } - - if errOutput := done(t).Stderr(); errOutput != "" { - t.Fatalf("unexpected error output:\n%s", errOutput) - } -} - -func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - - // Many of our tests use an overridden "test" provider that's just in-memory - // inside the test process, not a separate plugin on disk. - depLocks := depsfile.NewLocks() - depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/test")) - - return &backend.Operation{ - Type: backend.OperationTypePlan, - ConfigDir: configDir, - ConfigLoader: configLoader, - StateLocker: clistate.NewNoopLocker(), - View: view, - DependencyLocks: depLocks, - }, configCleanup, done -} - -// testPlanState is just a common state that we use for testing plan. -func testPlanState() *states.State { - state := states.NewState() - rootModule := state.RootModule() - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{ - "ami": "bar", - "network_interface": [{ - "device_index": 0, - "description": "Main network interface" - }] - }`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func testPlanState_withDataSource() *states.State { - state := states.NewState() - rootModule := state.RootModule() - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{ - "ami": "bar", - "network_interface": [{ - "device_index": 0, - "description": "Main network interface" - }] - }`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_ds", - Name: "bar", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{ - "filter": "foo" - }`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func testPlanState_tainted() *states.State { - state := states.NewState() - rootModule := state.RootModule() - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{ - "ami": "bar", - "network_interface": [{ - "device_index": 0, - "description": "Main network interface" - }] - }`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func testReadPlan(t *testing.T, path string) *plans.Plan { - t.Helper() - - p, err := planfile.Open(path) - if err != nil { - t.Fatalf("err: %s", err) - } - defer p.Close() - - plan, err := p.ReadPlan() - if err != nil { - t.Fatalf("err: %s", err) - } - - return plan -} - -// planFixtureSchema returns a schema suitable for processing the -// configuration in testdata/plan . This schema should be -// assigned to a mock provider named "test". -func planFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.Number, Optional: true}, - "description": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "test_ds": { - Attributes: map[string]*configschema.Attribute{ - "filter": {Type: cty.String, Required: true}, - }, - }, - }, - } -} - -func TestLocal_invalidOptions(t *testing.T) { - b := TestLocal(t) - TestLocalProvider(t, b, "test", planFixtureSchema()) - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - op.PlanRefresh = true - op.PlanMode = plans.RefreshOnlyMode - op.ForceReplace = []addrs.AbsResourceInstance{mustResourceInstanceAddr("test_instance.foo")} - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - if errOutput := done(t).Stderr(); errOutput == "" { - t.Fatal("expected error output") - } -} diff --git a/internal/backend/local/backend_refresh_test.go b/internal/backend/local/backend_refresh_test.go deleted file mode 100644 index 886c184181c2..000000000000 --- a/internal/backend/local/backend_refresh_test.go +++ /dev/null @@ -1,308 +0,0 @@ -package local - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - - "github.com/zclconf/go-cty/cty" -) - -func TestLocal_refresh(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) - testStateFile(t, b.StatePath, testRefreshState()) - - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - })} - - op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") - defer configCleanup() - defer done(t) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - - if !p.ReadResourceCalled { - t.Fatal("ReadResource should be called") - } - - checkState(t, b.StateOutPath, ` -test_instance.foo: - ID = yes - provider = provider["registry.terraform.io/hashicorp/test"] - `) - - // the backend should be unlocked after a run - assertBackendStateUnlocked(t, b) -} - -func TestLocal_refreshInput(t *testing.T) { - b := TestLocal(t) - - schema := &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - } - - p := TestLocalProvider(t, b, "test", schema) - testStateFile(t, b.StatePath, testRefreshState()) - - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - })} - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("value") - if val.IsNull() || val.AsString() != "bar" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("incorrect value %#v", val)) - } - - return - } - - // Enable input asking since it is normally disabled by default - b.OpInput = true - b.ContextOpts.UIInput = &terraform.MockUIInput{InputReturnString: "bar"} - - op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh-var-unset") - defer configCleanup() - defer done(t) - op.UIIn = b.ContextOpts.UIInput - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - - if !p.ReadResourceCalled { - t.Fatal("ReadResource should be called") - } - - checkState(t, b.StateOutPath, ` -test_instance.foo: - ID = yes - provider = provider["registry.terraform.io/hashicorp/test"] - `) -} - -func TestLocal_refreshValidate(t *testing.T) { - b := TestLocal(t) - p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) - testStateFile(t, b.StatePath, testRefreshState()) - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - })} - - // Enable validation - b.OpValidation = true - - op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") - defer configCleanup() - defer done(t) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - - checkState(t, b.StateOutPath, ` -test_instance.foo: - ID = yes - provider = provider["registry.terraform.io/hashicorp/test"] - `) -} - -func TestLocal_refreshValidateProviderConfigured(t *testing.T) { - b := TestLocal(t) - - schema := &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - } - - p := TestLocalProvider(t, b, "test", schema) - testStateFile(t, b.StatePath, testRefreshState()) - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - })} - - // Enable validation - b.OpValidation = true - - op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh-provider-config") - defer configCleanup() - defer done(t) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - - if !p.ValidateProviderConfigCalled { - t.Fatal("Validate provider config should be called") - } - - checkState(t, b.StateOutPath, ` -test_instance.foo: - ID = yes - provider = provider["registry.terraform.io/hashicorp/test"] - `) -} - -// This test validates the state lacking behavior when the inner call to -// Context() fails -func TestLocal_refresh_context_error(t *testing.T) { - b := TestLocal(t) - testStateFile(t, b.StatePath, testRefreshState()) - op, configCleanup, done := testOperationRefresh(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - // we coerce a failure in Context() by omitting the provider schema - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("operation succeeded; want failure") - } - assertBackendStateUnlocked(t, b) -} - -func TestLocal_refreshEmptyState(t *testing.T) { - b := TestLocal(t) - - p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) - testStateFile(t, b.StatePath, states.NewState()) - - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - })} - - op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") - defer configCleanup() - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - - output := done(t) - - if stderr := output.Stderr(); stderr != "" { - t.Fatalf("expected only warning diags, got errors: %s", stderr) - } - if got, want := output.Stdout(), "Warning: Empty or non-existent state"; !strings.Contains(got, want) { - t.Errorf("wrong diags\n got: %s\nwant: %s", got, want) - } - - // the backend should be unlocked after a run - assertBackendStateUnlocked(t, b) -} - -func testOperationRefresh(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - - // Many of our tests use an overridden "test" provider that's just in-memory - // inside the test process, not a separate plugin on disk. - depLocks := depsfile.NewLocks() - depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/test")) - - return &backend.Operation{ - Type: backend.OperationTypeRefresh, - ConfigDir: configDir, - ConfigLoader: configLoader, - StateLocker: clistate.NewNoopLocker(), - View: view, - DependencyLocks: depLocks, - }, configCleanup, done -} - -// testRefreshState is just a common state that we use for testing refresh. -func testRefreshState() *states.State { - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - return state -} - -// refreshFixtureSchema returns a schema suitable for processing the -// configuration in testdata/refresh . This schema should be -// assigned to a mock provider named "test". -func refreshFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - } -} diff --git a/internal/backend/local/backend_test.go b/internal/backend/local/backend_test.go deleted file mode 100644 index 5484ac79650d..000000000000 --- a/internal/backend/local/backend_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package local - -import ( - "errors" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func TestLocal_impl(t *testing.T) { - var _ backend.Enhanced = New() - var _ backend.Local = New() - var _ backend.CLI = New() -} - -func TestLocal_backend(t *testing.T) { - testTmpDir(t) - b := New() - backend.TestBackendStates(t, b) - backend.TestBackendStateLocks(t, b, b) -} - -func checkState(t *testing.T, path, expected string) { - t.Helper() - // Read the state - f, err := os.Open(path) - if err != nil { - t.Fatalf("err: %s", err) - } - - state, err := statefile.Read(f) - f.Close() - if err != nil { - t.Fatalf("err: %s", err) - } - - actual := state.State.String() - expected = strings.TrimSpace(expected) - if actual != expected { - t.Fatalf("state does not match! actual:\n%s\n\nexpected:\n%s", actual, expected) - } -} - -func TestLocal_StatePaths(t *testing.T) { - b := New() - - // Test the defaults - path, out, back := b.StatePaths("") - - if path != DefaultStateFilename { - t.Fatalf("expected %q, got %q", DefaultStateFilename, path) - } - - if out != DefaultStateFilename { - t.Fatalf("expected %q, got %q", DefaultStateFilename, out) - } - - dfltBackup := DefaultStateFilename + DefaultBackupExtension - if back != dfltBackup { - t.Fatalf("expected %q, got %q", dfltBackup, back) - } - - // check with env - testEnv := "test_env" - path, out, back = b.StatePaths(testEnv) - - expectedPath := filepath.Join(DefaultWorkspaceDir, testEnv, DefaultStateFilename) - expectedOut := expectedPath - expectedBackup := expectedPath + DefaultBackupExtension - - if path != expectedPath { - t.Fatalf("expected %q, got %q", expectedPath, path) - } - - if out != expectedOut { - t.Fatalf("expected %q, got %q", expectedOut, out) - } - - if back != expectedBackup { - t.Fatalf("expected %q, got %q", expectedBackup, back) - } - -} - -func TestLocal_addAndRemoveStates(t *testing.T) { - testTmpDir(t) - dflt := backend.DefaultStateName - expectedStates := []string{dflt} - - b := New() - states, err := b.Workspaces() - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(states, expectedStates) { - t.Fatalf("expected []string{%q}, got %q", dflt, states) - } - - expectedA := "test_A" - if _, err := b.StateMgr(expectedA); err != nil { - t.Fatal(err) - } - - states, err = b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedStates = append(expectedStates, expectedA) - if !reflect.DeepEqual(states, expectedStates) { - t.Fatalf("expected %q, got %q", expectedStates, states) - } - - expectedB := "test_B" - if _, err := b.StateMgr(expectedB); err != nil { - t.Fatal(err) - } - - states, err = b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedStates = append(expectedStates, expectedB) - if !reflect.DeepEqual(states, expectedStates) { - t.Fatalf("expected %q, got %q", expectedStates, states) - } - - if err := b.DeleteWorkspace(expectedA, true); err != nil { - t.Fatal(err) - } - - states, err = b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedStates = []string{dflt, expectedB} - if !reflect.DeepEqual(states, expectedStates) { - t.Fatalf("expected %q, got %q", expectedStates, states) - } - - if err := b.DeleteWorkspace(expectedB, true); err != nil { - t.Fatal(err) - } - - states, err = b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedStates = []string{dflt} - if !reflect.DeepEqual(states, expectedStates) { - t.Fatalf("expected %q, got %q", expectedStates, states) - } - - if err := b.DeleteWorkspace(dflt, true); err == nil { - t.Fatal("expected error deleting default state") - } -} - -// a local backend which returns sentinel errors for NamedState methods to -// verify it's being called. -type testDelegateBackend struct { - *Local - - // return a sentinel error on these calls - stateErr bool - statesErr bool - deleteErr bool -} - -var errTestDelegateState = errors.New("state called") -var errTestDelegateStates = errors.New("states called") -var errTestDelegateDeleteState = errors.New("delete called") - -func (b *testDelegateBackend) StateMgr(name string) (statemgr.Full, error) { - if b.stateErr { - return nil, errTestDelegateState - } - s := statemgr.NewFilesystem("terraform.tfstate") - return s, nil -} - -func (b *testDelegateBackend) Workspaces() ([]string, error) { - if b.statesErr { - return nil, errTestDelegateStates - } - return []string{"default"}, nil -} - -func (b *testDelegateBackend) DeleteWorkspace(name string, force bool) error { - if b.deleteErr { - return errTestDelegateDeleteState - } - return nil -} - -// verify that the MultiState methods are dispatched to the correct Backend. -func TestLocal_multiStateBackend(t *testing.T) { - // assign a separate backend where we can read the state - b := NewWithBackend(&testDelegateBackend{ - stateErr: true, - statesErr: true, - deleteErr: true, - }) - - if _, err := b.StateMgr("test"); err != errTestDelegateState { - t.Fatal("expected errTestDelegateState, got:", err) - } - - if _, err := b.Workspaces(); err != errTestDelegateStates { - t.Fatal("expected errTestDelegateStates, got:", err) - } - - if err := b.DeleteWorkspace("test", true); err != errTestDelegateDeleteState { - t.Fatal("expected errTestDelegateDeleteState, got:", err) - } -} - -// testTmpDir changes into a tmp dir and change back automatically when the test -// and all its subtests complete. -func testTmpDir(t *testing.T) { - tmp := t.TempDir() - - old, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - if err := os.Chdir(tmp); err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - // ignore errors and try to clean up - os.Chdir(old) - }) -} diff --git a/internal/backend/local/cli.go b/internal/backend/local/cli.go deleted file mode 100644 index 41d2477c6a91..000000000000 --- a/internal/backend/local/cli.go +++ /dev/null @@ -1,32 +0,0 @@ -package local - -import ( - "log" - - "github.com/hashicorp/terraform/internal/backend" -) - -// backend.CLI impl. -func (b *Local) CLIInit(opts *backend.CLIOpts) error { - b.ContextOpts = opts.ContextOpts - b.OpInput = opts.Input - b.OpValidation = opts.Validation - - // configure any new cli options - if opts.StatePath != "" { - log.Printf("[TRACE] backend/local: CLI option -state is overriding state path to %s", opts.StatePath) - b.OverrideStatePath = opts.StatePath - } - - if opts.StateOutPath != "" { - log.Printf("[TRACE] backend/local: CLI option -state-out is overriding state output path to %s", opts.StateOutPath) - b.OverrideStateOutPath = opts.StateOutPath - } - - if opts.StateBackupPath != "" { - log.Printf("[TRACE] backend/local: CLI option -backup is overriding state backup path to %s", opts.StateBackupPath) - b.OverrideStateBackupPath = opts.StateBackupPath - } - - return nil -} diff --git a/internal/backend/local/local_test.go b/internal/backend/local/local_test.go deleted file mode 100644 index e447921e0938..000000000000 --- a/internal/backend/local/local_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package local - -import ( - "flag" - "os" - "testing" - - _ "github.com/hashicorp/terraform/internal/logging" -) - -func TestMain(m *testing.M) { - flag.Parse() - os.Exit(m.Run()) -} diff --git a/internal/backend/local/testing.go b/internal/backend/local/testing.go deleted file mode 100644 index 2958196b1def..000000000000 --- a/internal/backend/local/testing.go +++ /dev/null @@ -1,239 +0,0 @@ -package local - -import ( - "path/filepath" - "testing" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" -) - -// TestLocal returns a configured Local struct with temporary paths and -// in-memory ContextOpts. -// -// No operations will be called on the returned value, so you can still set -// public fields without any locks. -func TestLocal(t *testing.T) *Local { - t.Helper() - tempDir, err := filepath.EvalSymlinks(t.TempDir()) - if err != nil { - t.Fatal(err) - } - - local := New() - local.StatePath = filepath.Join(tempDir, "state.tfstate") - local.StateOutPath = filepath.Join(tempDir, "state.tfstate") - local.StateBackupPath = filepath.Join(tempDir, "state.tfstate.bak") - local.StateWorkspaceDir = filepath.Join(tempDir, "state.tfstate.d") - local.ContextOpts = &terraform.ContextOpts{} - - return local -} - -// TestLocalProvider modifies the ContextOpts of the *Local parameter to -// have a provider with the given name. -func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.ProviderSchema) *terraform.MockProvider { - // Build a mock resource provider for in-memory operations - p := new(terraform.MockProvider) - - if schema == nil { - schema = &terraform.ProviderSchema{} // default schema is empty - } - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: schema.Provider}, - ProviderMeta: providers.Schema{Block: schema.ProviderMeta}, - ResourceTypes: map[string]providers.Schema{}, - DataSources: map[string]providers.Schema{}, - } - for name, res := range schema.ResourceTypes { - p.GetProviderSchemaResponse.ResourceTypes[name] = providers.Schema{ - Block: res, - Version: int64(schema.ResourceTypeSchemaVersions[name]), - } - } - for name, dat := range schema.DataSources { - p.GetProviderSchemaResponse.DataSources[name] = providers.Schema{Block: dat} - } - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - // this is a destroy plan, - if req.ProposedNewState.IsNull() { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp - } - - rSchema, _ := schema.SchemaForResourceType(addrs.ManagedResourceMode, req.TypeName) - if rSchema == nil { - rSchema = &configschema.Block{} // default schema is empty - } - plannedVals := map[string]cty.Value{} - for name, attrS := range rSchema.Attributes { - val := req.ProposedNewState.GetAttr(name) - if attrS.Computed && val.IsNull() { - val = cty.UnknownVal(attrS.Type) - } - plannedVals[name] = val - } - for name := range rSchema.BlockTypes { - // For simplicity's sake we just copy the block attributes over - // verbatim, since this package's mock providers are all relatively - // simple -- we're testing the backend, not esoteric provider features. - plannedVals[name] = req.ProposedNewState.GetAttr(name) - } - - return providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(plannedVals), - PlannedPrivate: req.PriorPrivate, - } - } - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{NewState: req.PriorState} - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{State: req.Config} - } - - // Initialize the opts - if b.ContextOpts == nil { - b.ContextOpts = &terraform.ContextOpts{} - } - - // Set up our provider - b.ContextOpts.Providers = map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider(name): providers.FactoryFixed(p), - } - - return p - -} - -// TestLocalSingleState is a backend implementation that wraps Local -// and modifies it to only support single states (returns -// ErrWorkspacesNotSupported for multi-state operations). -// -// This isn't an actual use case, this is exported just to provide a -// easy way to test that behavior. -type TestLocalSingleState struct { - *Local -} - -// TestNewLocalSingle is a factory for creating a TestLocalSingleState. -// This function matches the signature required for backend/init. -func TestNewLocalSingle() backend.Backend { - return &TestLocalSingleState{Local: New()} -} - -func (b *TestLocalSingleState) Workspaces() ([]string, error) { - return nil, backend.ErrWorkspacesNotSupported -} - -func (b *TestLocalSingleState) DeleteWorkspace(string, bool) error { - return backend.ErrWorkspacesNotSupported -} - -func (b *TestLocalSingleState) StateMgr(name string) (statemgr.Full, error) { - if name != backend.DefaultStateName { - return nil, backend.ErrWorkspacesNotSupported - } - - return b.Local.StateMgr(name) -} - -// TestLocalNoDefaultState is a backend implementation that wraps -// Local and modifies it to support named states, but not the -// default state. It returns ErrDefaultWorkspaceNotSupported when -// the DefaultStateName is used. -type TestLocalNoDefaultState struct { - *Local -} - -// TestNewLocalNoDefault is a factory for creating a TestLocalNoDefaultState. -// This function matches the signature required for backend/init. -func TestNewLocalNoDefault() backend.Backend { - return &TestLocalNoDefaultState{Local: New()} -} - -func (b *TestLocalNoDefaultState) Workspaces() ([]string, error) { - workspaces, err := b.Local.Workspaces() - if err != nil { - return nil, err - } - - filtered := workspaces[:0] - for _, name := range workspaces { - if name != backend.DefaultStateName { - filtered = append(filtered, name) - } - } - - return filtered, nil -} - -func (b *TestLocalNoDefaultState) DeleteWorkspace(name string, force bool) error { - if name == backend.DefaultStateName { - return backend.ErrDefaultWorkspaceNotSupported - } - return b.Local.DeleteWorkspace(name, force) -} - -func (b *TestLocalNoDefaultState) StateMgr(name string) (statemgr.Full, error) { - if name == backend.DefaultStateName { - return nil, backend.ErrDefaultWorkspaceNotSupported - } - return b.Local.StateMgr(name) -} - -func testStateFile(t *testing.T, path string, s *states.State) { - stateFile := statemgr.NewFilesystem(path) - stateFile.WriteState(s) -} - -func mustProviderConfig(s string) addrs.AbsProviderConfig { - p, diags := addrs.ParseAbsProviderConfigStr(s) - if diags.HasErrors() { - panic(diags.Err()) - } - return p -} - -func mustResourceInstanceAddr(s string) addrs.AbsResourceInstance { - addr, diags := addrs.ParseAbsResourceInstanceStr(s) - if diags.HasErrors() { - panic(diags.Err()) - } - return addr -} - -// assertBackendStateUnlocked attempts to lock the backend state. Failure -// indicates that the state was indeed locked and therefore this function will -// return true. -func assertBackendStateUnlocked(t *testing.T, b *Local) bool { - t.Helper() - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Errorf("state is already locked: %s", err.Error()) - return false - } - return true -} - -// assertBackendStateLocked attempts to lock the backend state. Failure -// indicates that the state was already locked and therefore this function will -// return false. -func assertBackendStateLocked(t *testing.T, b *Local) bool { - t.Helper() - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - return true - } - t.Error("unexpected success locking state") - return true -} diff --git a/internal/backend/remote-state/azure/backend.go b/internal/backend/remote-state/azure/backend.go deleted file mode 100644 index f21b0ba4e255..000000000000 --- a/internal/backend/remote-state/azure/backend.go +++ /dev/null @@ -1,271 +0,0 @@ -package azure - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" -) - -// New creates a new backend for Azure remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "storage_account_name": { - Type: schema.TypeString, - Required: true, - Description: "The name of the storage account.", - }, - - "container_name": { - Type: schema.TypeString, - Required: true, - Description: "The container name.", - }, - - "key": { - Type: schema.TypeString, - Required: true, - Description: "The blob key.", - }, - - "metadata_host": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_METADATA_HOST", ""), - Description: "The Metadata URL which will be used to obtain the Cloud Environment.", - }, - - "environment": { - Type: schema.TypeString, - Optional: true, - Description: "The Azure cloud environment.", - DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"), - }, - - "access_key": { - Type: schema.TypeString, - Optional: true, - Description: "The access key.", - DefaultFunc: schema.EnvDefaultFunc("ARM_ACCESS_KEY", ""), - }, - - "sas_token": { - Type: schema.TypeString, - Optional: true, - Description: "A SAS Token used to interact with the Blob Storage Account.", - DefaultFunc: schema.EnvDefaultFunc("ARM_SAS_TOKEN", ""), - }, - - "snapshot": { - Type: schema.TypeBool, - Optional: true, - Description: "Enable/Disable automatic blob snapshotting", - DefaultFunc: schema.EnvDefaultFunc("ARM_SNAPSHOT", false), - }, - - "resource_group_name": { - Type: schema.TypeString, - Optional: true, - Description: "The resource group name.", - }, - - "client_id": { - Type: schema.TypeString, - Optional: true, - Description: "The Client ID.", - DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), - }, - - "endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom Endpoint used to access the Azure Resource Manager API's.", - DefaultFunc: schema.EnvDefaultFunc("ARM_ENDPOINT", ""), - }, - - "subscription_id": { - Type: schema.TypeString, - Optional: true, - Description: "The Subscription ID.", - DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), - }, - - "tenant_id": { - Type: schema.TypeString, - Optional: true, - Description: "The Tenant ID.", - DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), - }, - - // Service Principal (Client Certificate) specific - "client_certificate_password": { - Type: schema.TypeString, - Optional: true, - Description: "The password associated with the Client Certificate specified in `client_certificate_path`", - DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""), - }, - "client_certificate_path": { - Type: schema.TypeString, - Optional: true, - Description: "The path to the PFX file used as the Client Certificate when authenticating as a Service Principal", - DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""), - }, - - // Service Principal (Client Secret) specific - "client_secret": { - Type: schema.TypeString, - Optional: true, - Description: "The Client Secret.", - DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), - }, - - // Managed Service Identity specific - "use_msi": { - Type: schema.TypeBool, - Optional: true, - Description: "Should Managed Service Identity be used?", - DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false), - }, - "msi_endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "The Managed Service Identity Endpoint.", - DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""), - }, - - // OIDC auth specific fields - "use_oidc": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_USE_OIDC", false), - Description: "Allow OIDC to be used for authentication", - }, - "oidc_token": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN", ""), - Description: "A generic JWT token that can be used for OIDC authentication. Should not be used in conjunction with `oidc_request_token`.", - }, - "oidc_token_file_path": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN_FILE_PATH", ""), - Description: "Path to file containing a generic JWT token that can be used for OIDC authentication. Should not be used in conjunction with `oidc_request_token`.", - }, - "oidc_request_url": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_URL", "ACTIONS_ID_TOKEN_REQUEST_URL"}, ""), - Description: "The URL of the OIDC provider from which to request an ID token. Needs to be used in conjunction with `oidc_request_token`. This is meant to be used for Github Actions.", - }, - "oidc_request_token": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_TOKEN", "ACTIONS_ID_TOKEN_REQUEST_TOKEN"}, ""), - Description: "The bearer token to use for the request to the OIDC providers `oidc_request_url` URL to fetch an ID token. Needs to be used in conjunction with `oidc_request_url`. This is meant to be used for Github Actions.", - }, - - // Feature Flags - "use_azuread_auth": { - Type: schema.TypeBool, - Optional: true, - Description: "Should Terraform use AzureAD Authentication to access the Blob?", - DefaultFunc: schema.EnvDefaultFunc("ARM_USE_AZUREAD", false), - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - armClient *ArmClient - containerName string - keyName string - accountName string - snapshot bool -} - -type BackendConfig struct { - // Required - StorageAccountName string - - // Optional - AccessKey string - ClientID string - ClientCertificatePassword string - ClientCertificatePath string - ClientSecret string - CustomResourceManagerEndpoint string - MetadataHost string - Environment string - MsiEndpoint string - OIDCToken string - OIDCTokenFilePath string - OIDCRequestURL string - OIDCRequestToken string - ResourceGroupName string - SasToken string - SubscriptionID string - TenantID string - UseMsi bool - UseOIDC bool - UseAzureADAuthentication bool -} - -func (b *Backend) configure(ctx context.Context) error { - if b.containerName != "" { - return nil - } - - // Grab the resource data - data := schema.FromContextBackendConfig(ctx) - b.containerName = data.Get("container_name").(string) - b.accountName = data.Get("storage_account_name").(string) - b.keyName = data.Get("key").(string) - b.snapshot = data.Get("snapshot").(bool) - - config := BackendConfig{ - AccessKey: data.Get("access_key").(string), - ClientID: data.Get("client_id").(string), - ClientCertificatePassword: data.Get("client_certificate_password").(string), - ClientCertificatePath: data.Get("client_certificate_path").(string), - ClientSecret: data.Get("client_secret").(string), - CustomResourceManagerEndpoint: data.Get("endpoint").(string), - MetadataHost: data.Get("metadata_host").(string), - Environment: data.Get("environment").(string), - MsiEndpoint: data.Get("msi_endpoint").(string), - OIDCToken: data.Get("oidc_token").(string), - OIDCTokenFilePath: data.Get("oidc_token_file_path").(string), - OIDCRequestURL: data.Get("oidc_request_url").(string), - OIDCRequestToken: data.Get("oidc_request_token").(string), - ResourceGroupName: data.Get("resource_group_name").(string), - SasToken: data.Get("sas_token").(string), - StorageAccountName: data.Get("storage_account_name").(string), - SubscriptionID: data.Get("subscription_id").(string), - TenantID: data.Get("tenant_id").(string), - UseMsi: data.Get("use_msi").(bool), - UseOIDC: data.Get("use_oidc").(bool), - UseAzureADAuthentication: data.Get("use_azuread_auth").(bool), - } - - armClient, err := buildArmClient(context.TODO(), config) - if err != nil { - return err - } - - thingsNeededToLookupAccessKeySpecified := config.AccessKey == "" && config.SasToken == "" && config.ResourceGroupName == "" - if thingsNeededToLookupAccessKeySpecified && !config.UseAzureADAuthentication { - return fmt.Errorf("Either an Access Key / SAS Token or the Resource Group for the Storage Account must be specified - or Azure AD Authentication must be enabled") - } - - b.armClient = armClient - return nil -} diff --git a/internal/backend/remote-state/azure/backend_state.go b/internal/backend/remote-state/azure/backend_state.go deleted file mode 100644 index fbbb855d0091..000000000000 --- a/internal/backend/remote-state/azure/backend_state.go +++ /dev/null @@ -1,167 +0,0 @@ -package azure - -import ( - "context" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" - "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers" -) - -const ( - // This will be used as directory name, the odd looking colon is simply to - // reduce the chance of name conflicts with existing objects. - keyEnvPrefix = "env:" -) - -func (b *Backend) Workspaces() ([]string, error) { - prefix := b.keyName + keyEnvPrefix - params := containers.ListBlobsInput{ - Prefix: &prefix, - } - - ctx := context.TODO() - client, err := b.armClient.getContainersClient(ctx) - if err != nil { - return nil, err - } - resp, err := client.ListBlobs(ctx, b.armClient.storageAccountName, b.containerName, params) - if err != nil { - return nil, err - } - - envs := map[string]struct{}{} - for _, obj := range resp.Blobs.Blobs { - key := obj.Name - if strings.HasPrefix(key, prefix) { - name := strings.TrimPrefix(key, prefix) - // we store the state in a key, not a directory - if strings.Contains(name, "/") { - continue - } - - envs[name] = struct{}{} - } - } - - result := []string{backend.DefaultStateName} - for name := range envs { - result = append(result, name) - } - sort.Strings(result[1:]) - return result, nil -} - -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - ctx := context.TODO() - client, err := b.armClient.getBlobClient(ctx) - if err != nil { - return err - } - - if resp, err := client.Delete(ctx, b.armClient.storageAccountName, b.containerName, b.path(name), blobs.DeleteInput{}); err != nil { - if resp.Response.StatusCode != 404 { - return err - } - } - - return nil -} - -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - ctx := context.TODO() - blobClient, err := b.armClient.getBlobClient(ctx) - if err != nil { - return nil, err - } - - client := &RemoteClient{ - giovanniBlobClient: *blobClient, - containerName: b.containerName, - keyName: b.path(name), - accountName: b.accountName, - snapshot: b.snapshot, - } - - stateMgr := &remote.State{Client: client} - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - return nil, err - } - //if this isn't the default state name, we need to create the object so - //it's listed by States. - if v := stateMgr.State(); v == nil { - // take a lock on this state while we write it - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := client.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock azure state: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) - } - return parent - } - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - //if this isn't the default state name, we need to create the object so - //it's listed by States. - if v := stateMgr.State(); v == nil { - // If we have no state, we have to create an empty state - if err := stateMgr.WriteState(states.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(nil); err != nil { - err = lockUnlock(err) - return nil, err - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - } - } - - return stateMgr, nil -} - -func (b *Backend) client() *RemoteClient { - return &RemoteClient{} -} - -func (b *Backend) path(name string) string { - if name == backend.DefaultStateName { - return b.keyName - } - - return b.keyName + keyEnvPrefix + name -} - -const errStateUnlock = ` -Error unlocking Azure state. Lock ID: %s - -Error: %s - -You may have to force-unlock this state in order to use it again. -` diff --git a/internal/backend/remote-state/azure/backend_test.go b/internal/backend/remote-state/azure/backend_test.go deleted file mode 100644 index fa5b0a9f1666..000000000000 --- a/internal/backend/remote-state/azure/backend_test.go +++ /dev/null @@ -1,366 +0,0 @@ -package azure - -import ( - "context" - "os" - "testing" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/acctest" -) - -func TestBackend_impl(t *testing.T) { - var _ backend.Backend = new(Backend) -} - -func TestBackendConfig(t *testing.T) { - // This test just instantiates the client. Shouldn't make any actual - // requests nor incur any costs. - - config := map[string]interface{}{ - "storage_account_name": "tfaccount", - "container_name": "tfcontainer", - "key": "state", - "snapshot": false, - // Access Key must be Base64 - "access_key": "QUNDRVNTX0tFWQ0K", - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) - - if b.containerName != "tfcontainer" { - t.Fatalf("Incorrect bucketName was populated") - } - if b.keyName != "state" { - t.Fatalf("Incorrect keyName was populated") - } - if b.snapshot != false { - t.Fatalf("Incorrect snapshot was populated") - } -} - -func TestAccBackendAccessKeyBasic(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - armClient.destroyTestResources(ctx, res) - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - backend.TestBackendStates(t, b) -} - -func TestAccBackendSASTokenBasic(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - sasToken, err := buildSasToken(res.storageAccountName, res.storageAccountAccessKey) - if err != nil { - t.Fatalf("Error building SAS Token: %+v", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "sas_token": *sasToken, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - backend.TestBackendStates(t, b) -} - -func TestAccBackendOIDCBasic(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "use_oidc": true, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - backend.TestBackendStates(t, b) -} - -func TestAccBackendAzureADAuthBasic(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - res.useAzureADAuth = true - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - armClient.destroyTestResources(ctx, res) - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - "use_azuread_auth": true, - })).(*Backend) - - backend.TestBackendStates(t, b) -} - -func TestAccBackendManagedServiceIdentityBasic(t *testing.T) { - testAccAzureBackendRunningInAzure(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "use_msi": true, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - backend.TestBackendStates(t, b) -} - -func TestAccBackendServicePrincipalClientCertificateBasic(t *testing.T) { - testAccAzureBackend(t) - - clientCertPassword := os.Getenv("ARM_CLIENT_CERTIFICATE_PASSWORD") - clientCertPath := os.Getenv("ARM_CLIENT_CERTIFICATE_PATH") - if clientCertPath == "" { - t.Skip("Skipping since `ARM_CLIENT_CERTIFICATE_PATH` is not specified!") - } - - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "client_id": os.Getenv("ARM_CLIENT_ID"), - "client_certificate_password": clientCertPassword, - "client_certificate_path": clientCertPath, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - backend.TestBackendStates(t, b) -} - -func TestAccBackendServicePrincipalClientSecretBasic(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "client_id": os.Getenv("ARM_CLIENT_ID"), - "client_secret": os.Getenv("ARM_CLIENT_SECRET"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - backend.TestBackendStates(t, b) -} - -func TestAccBackendServicePrincipalClientSecretCustomEndpoint(t *testing.T) { - testAccAzureBackend(t) - - // this is only applicable for Azure Stack. - endpoint := os.Getenv("ARM_ENDPOINT") - if endpoint == "" { - t.Skip("Skipping as ARM_ENDPOINT isn't configured") - } - - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "client_id": os.Getenv("ARM_CLIENT_ID"), - "client_secret": os.Getenv("ARM_CLIENT_SECRET"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": endpoint, - })).(*Backend) - - backend.TestBackendStates(t, b) -} - -func TestAccBackendAccessKeyLocked(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - backend.TestBackendStateLocks(t, b1, b2) - backend.TestBackendStateForceUnlock(t, b1, b2) - - backend.TestBackendStateLocksInWS(t, b1, b2, "foo") - backend.TestBackendStateForceUnlockInWS(t, b1, b2, "foo") -} - -func TestAccBackendServicePrincipalLocked(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "client_id": os.Getenv("ARM_CLIENT_ID"), - "client_secret": os.Getenv("ARM_CLIENT_SECRET"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "client_id": os.Getenv("ARM_CLIENT_ID"), - "client_secret": os.Getenv("ARM_CLIENT_SECRET"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - backend.TestBackendStateLocks(t, b1, b2) - backend.TestBackendStateForceUnlock(t, b1, b2) - - backend.TestBackendStateLocksInWS(t, b1, b2, "foo") - backend.TestBackendStateForceUnlockInWS(t, b1, b2, "foo") -} diff --git a/internal/backend/remote-state/azure/client.go b/internal/backend/remote-state/azure/client.go deleted file mode 100644 index 5d22767954b8..000000000000 --- a/internal/backend/remote-state/azure/client.go +++ /dev/null @@ -1,279 +0,0 @@ -package azure - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "log" - "net/http" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" -) - -const ( - leaseHeader = "x-ms-lease-id" - // Must be lower case - lockInfoMetaKey = "terraformlockid" -) - -type RemoteClient struct { - giovanniBlobClient blobs.Client - accountName string - containerName string - keyName string - leaseID string - snapshot bool -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - options := blobs.GetInput{} - if c.leaseID != "" { - options.LeaseID = &c.leaseID - } - - ctx := context.TODO() - blob, err := c.giovanniBlobClient.Get(ctx, c.accountName, c.containerName, c.keyName, options) - if err != nil { - if blob.Response.IsHTTPStatus(http.StatusNotFound) { - return nil, nil - } - return nil, err - } - - payload := &remote.Payload{ - Data: blob.Contents, - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - return payload, nil -} - -func (c *RemoteClient) Put(data []byte) error { - getOptions := blobs.GetPropertiesInput{} - setOptions := blobs.SetPropertiesInput{} - putOptions := blobs.PutBlockBlobInput{} - - options := blobs.GetInput{} - if c.leaseID != "" { - options.LeaseID = &c.leaseID - getOptions.LeaseID = &c.leaseID - setOptions.LeaseID = &c.leaseID - putOptions.LeaseID = &c.leaseID - } - - ctx := context.TODO() - - if c.snapshot { - snapshotInput := blobs.SnapshotInput{LeaseID: options.LeaseID} - - log.Printf("[DEBUG] Snapshotting existing Blob %q (Container %q / Account %q)", c.keyName, c.containerName, c.accountName) - if _, err := c.giovanniBlobClient.Snapshot(ctx, c.accountName, c.containerName, c.keyName, snapshotInput); err != nil { - return fmt.Errorf("error snapshotting Blob %q (Container %q / Account %q): %+v", c.keyName, c.containerName, c.accountName, err) - } - - log.Print("[DEBUG] Created blob snapshot") - } - - blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, getOptions) - if err != nil { - if blob.StatusCode != 404 { - return err - } - } - - contentType := "application/json" - putOptions.Content = &data - putOptions.ContentType = &contentType - putOptions.MetaData = blob.MetaData - _, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putOptions) - - return err -} - -func (c *RemoteClient) Delete() error { - options := blobs.DeleteInput{} - - if c.leaseID != "" { - options.LeaseID = &c.leaseID - } - - ctx := context.TODO() - resp, err := c.giovanniBlobClient.Delete(ctx, c.accountName, c.containerName, c.keyName, options) - if err != nil { - if !resp.IsHTTPStatus(http.StatusNotFound) { - return err - } - } - return nil -} - -func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { - stateName := fmt.Sprintf("%s/%s", c.containerName, c.keyName) - info.Path = stateName - - if info.ID == "" { - lockID, err := uuid.GenerateUUID() - if err != nil { - return "", err - } - - info.ID = lockID - } - - getLockInfoErr := func(err error) error { - lockInfo, infoErr := c.getLockInfo() - if infoErr != nil { - err = multierror.Append(err, infoErr) - } - - return &statemgr.LockError{ - Err: err, - Info: lockInfo, - } - } - - leaseOptions := blobs.AcquireLeaseInput{ - ProposedLeaseID: &info.ID, - LeaseDuration: -1, - } - ctx := context.TODO() - - // obtain properties to see if the blob lease is already in use. If the blob doesn't exist, create it - properties, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{}) - if err != nil { - // error if we had issues getting the blob - if !properties.Response.IsHTTPStatus(http.StatusNotFound) { - return "", getLockInfoErr(err) - } - // if we don't find the blob, we need to build it - - contentType := "application/json" - putGOptions := blobs.PutBlockBlobInput{ - ContentType: &contentType, - } - - _, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putGOptions) - if err != nil { - return "", getLockInfoErr(err) - } - } - - // if the blob is already locked then error - if properties.LeaseStatus == blobs.Locked { - return "", getLockInfoErr(fmt.Errorf("state blob is already locked")) - } - - leaseID, err := c.giovanniBlobClient.AcquireLease(ctx, c.accountName, c.containerName, c.keyName, leaseOptions) - if err != nil { - return "", getLockInfoErr(err) - } - - info.ID = leaseID.LeaseID - c.leaseID = leaseID.LeaseID - - if err := c.writeLockInfo(info); err != nil { - return "", err - } - - return info.ID, nil -} - -func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { - options := blobs.GetPropertiesInput{} - if c.leaseID != "" { - options.LeaseID = &c.leaseID - } - - ctx := context.TODO() - blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, options) - if err != nil { - return nil, err - } - - raw := blob.MetaData[lockInfoMetaKey] - if raw == "" { - return nil, fmt.Errorf("blob metadata %q was empty", lockInfoMetaKey) - } - - data, err := base64.StdEncoding.DecodeString(raw) - if err != nil { - return nil, err - } - - lockInfo := &statemgr.LockInfo{} - err = json.Unmarshal(data, lockInfo) - if err != nil { - return nil, err - } - - return lockInfo, nil -} - -// writes info to blob meta data, deletes metadata entry if info is nil -func (c *RemoteClient) writeLockInfo(info *statemgr.LockInfo) error { - ctx := context.TODO() - blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{LeaseID: &c.leaseID}) - if err != nil { - return err - } - if err != nil { - return err - } - - if info == nil { - delete(blob.MetaData, lockInfoMetaKey) - } else { - value := base64.StdEncoding.EncodeToString(info.Marshal()) - blob.MetaData[lockInfoMetaKey] = value - } - - opts := blobs.SetMetaDataInput{ - LeaseID: &c.leaseID, - MetaData: blob.MetaData, - } - - _, err = c.giovanniBlobClient.SetMetaData(ctx, c.accountName, c.containerName, c.keyName, opts) - return err -} - -func (c *RemoteClient) Unlock(id string) error { - lockErr := &statemgr.LockError{} - - lockInfo, err := c.getLockInfo() - if err != nil { - lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) - return lockErr - } - lockErr.Info = lockInfo - - if lockInfo.ID != id { - lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) - return lockErr - } - - c.leaseID = lockInfo.ID - if err := c.writeLockInfo(nil); err != nil { - lockErr.Err = fmt.Errorf("failed to delete lock info from metadata: %s", err) - return lockErr - } - - ctx := context.TODO() - _, err = c.giovanniBlobClient.ReleaseLease(ctx, c.accountName, c.containerName, c.keyName, id) - if err != nil { - lockErr.Err = err - return lockErr - } - - c.leaseID = "" - - return nil -} diff --git a/internal/backend/remote-state/azure/client_test.go b/internal/backend/remote-state/azure/client_test.go deleted file mode 100644 index 5cc5d6a8467f..000000000000 --- a/internal/backend/remote-state/azure/client_test.go +++ /dev/null @@ -1,311 +0,0 @@ -package azure - -import ( - "context" - "os" - "testing" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/acctest" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" -) - -func TestRemoteClient_impl(t *testing.T) { - var _ remote.Client = new(RemoteClient) - var _ remote.ClientLocker = new(RemoteClient) -} - -func TestRemoteClientAccessKeyBasic(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, state.(*remote.State).Client) -} - -func TestRemoteClientManagedServiceIdentityBasic(t *testing.T) { - testAccAzureBackendRunningInAzure(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "use_msi": true, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, state.(*remote.State).Client) -} - -func TestRemoteClientSasTokenBasic(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - sasToken, err := buildSasToken(res.storageAccountName, res.storageAccountAccessKey) - if err != nil { - t.Fatalf("Error building SAS Token: %+v", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "sas_token": *sasToken, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, state.(*remote.State).Client) -} - -func TestRemoteClientServicePrincipalBasic(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "client_id": os.Getenv("ARM_CLIENT_ID"), - "client_secret": os.Getenv("ARM_CLIENT_SECRET"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, state.(*remote.State).Client) -} - -func TestRemoteClientAccessKeyLocks(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "access_key": res.storageAccountAccessKey, - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) -} - -func TestRemoteClientServicePrincipalLocks(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "client_id": os.Getenv("ARM_CLIENT_ID"), - "client_secret": os.Getenv("ARM_CLIENT_SECRET"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "storage_account_name": res.storageAccountName, - "container_name": res.storageContainerName, - "key": res.storageKeyName, - "resource_group_name": res.resourceGroup, - "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), - "tenant_id": os.Getenv("ARM_TENANT_ID"), - "client_id": os.Getenv("ARM_CLIENT_ID"), - "client_secret": os.Getenv("ARM_CLIENT_SECRET"), - "environment": os.Getenv("ARM_ENVIRONMENT"), - "endpoint": os.Getenv("ARM_ENDPOINT"), - })).(*Backend) - - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) -} - -func TestPutMaintainsMetaData(t *testing.T) { - testAccAzureBackend(t) - rs := acctest.RandString(4) - res := testResourceNames(rs, "testState") - armClient := buildTestClient(t, res) - - ctx := context.TODO() - err := armClient.buildTestResources(ctx, &res) - defer armClient.destroyTestResources(ctx, res) - if err != nil { - t.Fatalf("Error creating Test Resources: %q", err) - } - - headerName := "acceptancetest" - expectedValue := "f3b56bad-33ad-4b93-a600-7a66e9cbd1eb" - - client, err := armClient.getBlobClient(ctx) - if err != nil { - t.Fatalf("Error building Blob Client: %+v", err) - } - - _, err = client.PutBlockBlob(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.PutBlockBlobInput{}) - if err != nil { - t.Fatalf("Error Creating Block Blob: %+v", err) - } - - blobReference, err := client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{}) - if err != nil { - t.Fatalf("Error loading MetaData: %+v", err) - } - - blobReference.MetaData[headerName] = expectedValue - opts := blobs.SetMetaDataInput{ - MetaData: blobReference.MetaData, - } - _, err = client.SetMetaData(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, opts) - if err != nil { - t.Fatalf("Error setting MetaData: %+v", err) - } - - // update the metadata using the Backend - remoteClient := RemoteClient{ - keyName: res.storageKeyName, - containerName: res.storageContainerName, - accountName: res.storageAccountName, - - giovanniBlobClient: *client, - } - - bytes := []byte(acctest.RandString(20)) - err = remoteClient.Put(bytes) - if err != nil { - t.Fatalf("Error putting data: %+v", err) - } - - // Verify it still exists - blobReference, err = client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{}) - if err != nil { - t.Fatalf("Error loading MetaData: %+v", err) - } - - if blobReference.MetaData[headerName] != expectedValue { - t.Fatalf("%q was not set to %q in the MetaData: %+v", headerName, expectedValue, blobReference.MetaData) - } -} diff --git a/internal/backend/remote-state/consul/backend.go b/internal/backend/remote-state/consul/backend.go deleted file mode 100644 index 884696981350..000000000000 --- a/internal/backend/remote-state/consul/backend.go +++ /dev/null @@ -1,180 +0,0 @@ -package consul - -import ( - "context" - "net" - "strings" - "time" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" -) - -// New creates a new backend for Consul remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Path to store state in Consul", - }, - - "access_token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Access token for a Consul ACL", - Default: "", // To prevent input - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Address to the Consul Cluster", - Default: "", // To prevent input - }, - - "scheme": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Scheme to communicate to Consul with", - Default: "", // To prevent input - }, - - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Datacenter to communicate with", - Default: "", // To prevent input - }, - - "http_auth": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "HTTP Auth in the format of 'username:password'", - Default: "", // To prevent input - }, - - "gzip": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Compress the state data using gzip", - Default: false, - }, - - "lock": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Lock state access", - Default: true, - }, - - "ca_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.", - DefaultFunc: schema.EnvDefaultFunc("CONSUL_CACERT", ""), - }, - - "cert_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.", - DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_CERT", ""), - }, - - "key_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "A path to a PEM-encoded private key, required if cert_file is specified.", - DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_KEY", ""), - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - client *consulapi.Client - configData *schema.ResourceData - lock bool -} - -func (b *Backend) configure(ctx context.Context) error { - // Grab the resource data - b.configData = schema.FromContextBackendConfig(ctx) - - // Store the lock information - b.lock = b.configData.Get("lock").(bool) - - data := b.configData - - // Configure the client - config := consulapi.DefaultConfig() - - // replace the default Transport Dialer to reduce the KeepAlive - config.Transport.DialContext = dialContext - - if v, ok := data.GetOk("access_token"); ok && v.(string) != "" { - config.Token = v.(string) - } - if v, ok := data.GetOk("address"); ok && v.(string) != "" { - config.Address = v.(string) - } - if v, ok := data.GetOk("scheme"); ok && v.(string) != "" { - config.Scheme = v.(string) - } - if v, ok := data.GetOk("datacenter"); ok && v.(string) != "" { - config.Datacenter = v.(string) - } - - if v, ok := data.GetOk("ca_file"); ok && v.(string) != "" { - config.TLSConfig.CAFile = v.(string) - } - if v, ok := data.GetOk("cert_file"); ok && v.(string) != "" { - config.TLSConfig.CertFile = v.(string) - } - if v, ok := data.GetOk("key_file"); ok && v.(string) != "" { - config.TLSConfig.KeyFile = v.(string) - } - - if v, ok := data.GetOk("http_auth"); ok && v.(string) != "" { - auth := v.(string) - - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &consulapi.HttpBasicAuth{ - Username: username, - Password: password, - } - } - - client, err := consulapi.NewClient(config) - if err != nil { - return err - } - - b.client = client - return nil -} - -// dialContext is the DialContext function for the consul client transport. -// This is stored in a package var to inject a different dialer for tests. -var dialContext = (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 17 * time.Second, -}).DialContext diff --git a/internal/backend/remote-state/consul/backend_state.go b/internal/backend/remote-state/consul/backend_state.go deleted file mode 100644 index 98934f3197b5..000000000000 --- a/internal/backend/remote-state/consul/backend_state.go +++ /dev/null @@ -1,154 +0,0 @@ -package consul - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -const ( - keyEnvPrefix = "-env:" -) - -func (b *Backend) Workspaces() ([]string, error) { - // List our raw path - prefix := b.configData.Get("path").(string) + keyEnvPrefix - keys, _, err := b.client.KV().Keys(prefix, "/", nil) - if err != nil { - return nil, err - } - - // Find the envs, we use a map since we can get duplicates with - // path suffixes. - envs := map[string]struct{}{} - for _, key := range keys { - // Consul should ensure this but it doesn't hurt to check again - if strings.HasPrefix(key, prefix) { - key = strings.TrimPrefix(key, prefix) - - // Ignore anything with a "/" in it since we store the state - // directly in a key not a directory. - if idx := strings.IndexRune(key, '/'); idx >= 0 { - continue - } - - envs[key] = struct{}{} - } - } - - result := make([]string, 1, len(envs)+1) - result[0] = backend.DefaultStateName - for k, _ := range envs { - result = append(result, k) - } - - return result, nil -} - -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - // Determine the path of the data - path := b.path(name) - - // Delete it. We just delete it without any locking since - // the DeleteState API is documented as such. - _, err := b.client.KV().Delete(path, nil) - return err -} - -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - // Determine the path of the data - path := b.path(name) - - // Determine whether to gzip or not - gzip := b.configData.Get("gzip").(bool) - - // Build the state client - var stateMgr = &remote.State{ - Client: &RemoteClient{ - Client: b.client, - Path: path, - GZip: gzip, - lockState: b.lock, - }, - } - - if !b.lock { - stateMgr.DisableLocks() - } - - // the default state always exists - if name == backend.DefaultStateName { - return stateMgr, nil - } - - // Grab a lock, we use this to write an empty state if one doesn't - // exist already. We have to write an empty state as a sentinel value - // so States() knows it exists. - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := stateMgr.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock state in Consul: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) - } - - return parent - } - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(states.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(nil); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - - return stateMgr, nil -} - -func (b *Backend) path(name string) string { - path := b.configData.Get("path").(string) - if name != backend.DefaultStateName { - path += fmt.Sprintf("%s%s", keyEnvPrefix, name) - } - - return path -} - -const errStateUnlock = ` -Error unlocking Consul state. Lock ID: %s - -Error: %s - -You may have to force-unlock this state in order to use it again. -The Consul backend acquires a lock during initialization to ensure -the minimum required key/values are prepared. -` diff --git a/internal/backend/remote-state/consul/backend_test.go b/internal/backend/remote-state/consul/backend_test.go deleted file mode 100644 index 6b6700825aa6..000000000000 --- a/internal/backend/remote-state/consul/backend_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package consul - -import ( - "flag" - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/terraform/internal/backend" -) - -func TestBackend_impl(t *testing.T) { - var _ backend.Backend = new(Backend) -} - -func newConsulTestServer(t *testing.T) *testutil.TestServer { - if os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == "" { - t.Skipf("consul server tests require setting TF_ACC or TF_CONSUL_TEST") - } - - srv, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - c.LogLevel = "warn" - - if !flag.Parsed() { - flag.Parse() - } - - if !testing.Verbose() { - c.Stdout = ioutil.Discard - c.Stderr = ioutil.Discard - } - }) - - if err != nil { - t.Fatalf("failed to create consul test server: %s", err) - } - - srv.WaitForSerfCheck(t) - srv.WaitForLeader(t) - - return srv -} - -func TestBackend(t *testing.T) { - srv := newConsulTestServer(t) - - path := fmt.Sprintf("tf-unit/%s", time.Now().String()) - - // Get the backend. We need two to test locking. - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })) - - // Test - backend.TestBackendStates(t, b1) - backend.TestBackendStateLocks(t, b1, b2) -} - -func TestBackend_lockDisabled(t *testing.T) { - srv := newConsulTestServer(t) - - path := fmt.Sprintf("tf-unit/%s", time.Now().String()) - - // Get the backend. We need two to test locking. - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - "lock": false, - })) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path + "different", // Diff so locking test would fail if it was locking - "lock": false, - })) - - // Test - backend.TestBackendStates(t, b1) - backend.TestBackendStateLocks(t, b1, b2) -} - -func TestBackend_gzip(t *testing.T) { - srv := newConsulTestServer(t) - - // Get the backend - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": fmt.Sprintf("tf-unit/%s", time.Now().String()), - "gzip": true, - })) - - // Test - backend.TestBackendStates(t, b) -} diff --git a/internal/backend/remote-state/consul/client.go b/internal/backend/remote-state/consul/client.go deleted file mode 100644 index 31dfecb0fd38..000000000000 --- a/internal/backend/remote-state/consul/client.go +++ /dev/null @@ -1,682 +0,0 @@ -package consul - -import ( - "bytes" - "compress/gzip" - "context" - "crypto/md5" - "encoding/json" - "errors" - "fmt" - "log" - "strings" - "sync" - "time" - - consulapi "github.com/hashicorp/consul/api" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -const ( - lockSuffix = "/.lock" - lockInfoSuffix = "/.lockinfo" - - // The Session TTL associated with this lock. - lockSessionTTL = "15s" - - // the delay time from when a session is lost to when the - // lock is released by the server - lockDelay = 5 * time.Second - // interval between attempts to reacquire a lost lock - lockReacquireInterval = 2 * time.Second -) - -var lostLockErr = errors.New("consul lock was lost") - -// RemoteClient is a remote client that stores data in Consul. -type RemoteClient struct { - Client *consulapi.Client - Path string - GZip bool - - mu sync.Mutex - // lockState is true if we're using locks - lockState bool - - // The index of the last state we wrote. - // If this is > 0, Put will perform a CAS to ensure that the state wasn't - // changed during the operation. This is important even with locks, because - // if the client loses the lock for some reason, then reacquires it, we - // need to make sure that the state was not modified. - modifyIndex uint64 - - consulLock *consulapi.Lock - lockCh <-chan struct{} - - info *statemgr.LockInfo - - // cancel our goroutine which is monitoring the lock to automatically - // reacquire it when possible. - monitorCancel context.CancelFunc - monitorWG sync.WaitGroup - - // sessionCancel cancels the Context use for session.RenewPeriodic, and is - // called when unlocking, or before creating a new lock if the lock is - // lost. - sessionCancel context.CancelFunc -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - c.mu.Lock() - defer c.mu.Unlock() - - kv := c.Client.KV() - - chunked, hash, chunks, pair, err := c.chunkedMode() - if err != nil { - return nil, err - } - if pair == nil { - return nil, nil - } - - c.modifyIndex = pair.ModifyIndex - - var payload []byte - if chunked { - for _, c := range chunks { - pair, _, err := kv.Get(c, nil) - if err != nil { - return nil, err - } - if pair == nil { - return nil, fmt.Errorf("Key %q could not be found", c) - } - payload = append(payload, pair.Value[:]...) - } - } else { - payload = pair.Value - } - - // If the payload starts with 0x1f, it's gzip, not json - if len(payload) >= 1 && payload[0] == '\x1f' { - payload, err = uncompressState(payload) - if err != nil { - return nil, err - } - } - - md5 := md5.Sum(payload) - - if hash != "" && fmt.Sprintf("%x", md5) != hash { - return nil, fmt.Errorf("The remote state does not match the expected hash") - } - - return &remote.Payload{ - Data: payload, - MD5: md5[:], - }, nil -} - -func (c *RemoteClient) Put(data []byte) error { - // The state can be stored in 4 different ways, based on the payload size - // and whether the user enabled gzip: - // - single entry mode with plain JSON: a single JSON is stored at - // "tfstate/my_project" - // - single entry mode gzip: the JSON payload is first gziped and stored at - // "tfstate/my_project" - // - chunked mode with plain JSON: the JSON payload is split in pieces and - // stored like so: - // - "tfstate/my_project" -> a JSON payload that contains the path of - // the chunks and an MD5 sum like so: - // { - // "current-hash": "abcdef1234", - // "chunks": [ - // "tfstate/my_project/tfstate.abcdef1234/0", - // "tfstate/my_project/tfstate.abcdef1234/1", - // "tfstate/my_project/tfstate.abcdef1234/2", - // ] - // } - // - "tfstate/my_project/tfstate.abcdef1234/0" -> The first chunk - // - "tfstate/my_project/tfstate.abcdef1234/1" -> The next one - // - ... - // - chunked mode with gzip: the same system but we gziped the JSON payload - // before splitting it in chunks - // - // When overwritting the current state, we need to clean the old chunks if - // we were in chunked mode (no matter whether we need to use chunks for the - // new one). To do so based on the 4 possibilities above we look at the - // value at "tfstate/my_project" and if it is: - // - absent then it's a new state and there will be nothing to cleanup, - // - not a JSON payload we were in single entry mode with gzip so there will - // be nothing to cleanup - // - a JSON payload, then we were either single entry mode with plain JSON - // or in chunked mode. To differentiate between the two we look whether a - // "current-hash" key is present in the payload. If we find one we were - // in chunked mode and we will need to remove the old chunks (whether or - // not we were using gzip does not matter in that case). - - c.mu.Lock() - defer c.mu.Unlock() - - kv := c.Client.KV() - - // First we determine what mode we were using and to prepare the cleanup - chunked, hash, _, _, err := c.chunkedMode() - if err != nil { - return err - } - cleanupOldChunks := func() {} - if chunked { - cleanupOldChunks = func() { - // We ignore all errors that can happen here because we already - // saved the new state and there is no way to return a warning to - // the user. We may end up with dangling chunks but there is no way - // to be sure we won't. - path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash) - kv.DeleteTree(path, nil) - } - } - - payload := data - if c.GZip { - if compressedState, err := compressState(data); err == nil { - payload = compressedState - } else { - return err - } - } - - // default to doing a CAS - verb := consulapi.KVCAS - - // Assume a 0 index doesn't need a CAS for now, since we are either - // creating a new state or purposely overwriting one. - if c.modifyIndex == 0 { - verb = consulapi.KVSet - } - - // The payload may be too large to store in a single KV entry in Consul. We - // could try to determine whether it will fit or not before sending the - // request but since we are using the Transaction API and not the KV API, - // it grows by about a 1/3 when it is base64 encoded plus the overhead of - // the fields specific to the Transaction API. - // Rather than trying to calculate the overhead (which could change from - // one version of Consul to another, and between Consul Community Edition - // and Consul Enterprise), we try to send the whole state in one request, if - // it fails because it is too big we then split it in chunks and send each - // chunk separately. - // When splitting in chunks, we make each chunk 524288 bits, which is the - // default max size for raft. If the user changed it, we still may send - // chunks too big and fail but this is not a setting that should be fiddled - // with anyway. - - store := func(payload []byte) error { - // KV.Put doesn't return the new index, so we use a single operation - // transaction to get the new index with a single request. - txOps := consulapi.KVTxnOps{ - &consulapi.KVTxnOp{ - Verb: verb, - Key: c.Path, - Value: payload, - Index: c.modifyIndex, - }, - } - - ok, resp, _, err := kv.Txn(txOps, nil) - if err != nil { - return err - } - // transaction was rolled back - if !ok { - return fmt.Errorf("consul CAS failed with transaction errors: %v", resp.Errors) - } - - if len(resp.Results) != 1 { - // this probably shouldn't happen - return fmt.Errorf("expected on 1 response value, got: %d", len(resp.Results)) - } - - c.modifyIndex = resp.Results[0].ModifyIndex - - // We remove all the old chunks - cleanupOldChunks() - - return nil - } - - if err = store(payload); err == nil { - // The payload was small enough to be stored - return nil - } else if !strings.Contains(err.Error(), "too large") { - // We failed for some other reason, report this to the user - return err - } - - // The payload was too large so we split it in multiple chunks - - md5 := md5.Sum(data) - chunks := split(payload, 524288) - chunkPaths := make([]string, 0) - - // First we write the new chunks - for i, p := range chunks { - path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%x/%d", md5, i) - chunkPaths = append(chunkPaths, path) - _, err := kv.Put(&consulapi.KVPair{ - Key: path, - Value: p, - }, nil) - - if err != nil { - return err - } - } - - // Then we update the link to point to the new chunks - payload, err = json.Marshal(map[string]interface{}{ - "current-hash": fmt.Sprintf("%x", md5), - "chunks": chunkPaths, - }) - if err != nil { - return err - } - return store(payload) -} - -func (c *RemoteClient) Delete() error { - c.mu.Lock() - defer c.mu.Unlock() - - kv := c.Client.KV() - - chunked, hash, _, _, err := c.chunkedMode() - if err != nil { - return err - } - - _, err = kv.Delete(c.Path, nil) - - // If there were chunks we need to remove them - if chunked { - path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash) - kv.DeleteTree(path, nil) - } - - return err -} - -func (c *RemoteClient) lockPath() string { - // we sanitize the path for the lock as Consul does not like having - // two consecutive slashes for the lock path - return strings.TrimRight(c.Path, "/") -} - -func (c *RemoteClient) putLockInfo(info *statemgr.LockInfo) error { - info.Path = c.Path - info.Created = time.Now().UTC() - - kv := c.Client.KV() - _, err := kv.Put(&consulapi.KVPair{ - Key: c.lockPath() + lockInfoSuffix, - Value: info.Marshal(), - }, nil) - - return err -} - -func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { - path := c.lockPath() + lockInfoSuffix - pair, _, err := c.Client.KV().Get(path, nil) - if err != nil { - return nil, err - } - if pair == nil { - return nil, nil - } - - li := &statemgr.LockInfo{} - err = json.Unmarshal(pair.Value, li) - if err != nil { - return nil, fmt.Errorf("error unmarshaling lock info: %s", err) - } - - return li, nil -} - -func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { - c.mu.Lock() - defer c.mu.Unlock() - - if !c.lockState { - return "", nil - } - - c.info = info - - // These checks only are to ensure we strictly follow the specification. - // Terraform shouldn't ever re-lock, so provide errors for the 2 possible - // states if this is called. - select { - case <-c.lockCh: - // We had a lock, but lost it. - return "", errors.New("lost consul lock, cannot re-lock") - default: - if c.lockCh != nil { - // we have an active lock already - return "", fmt.Errorf("state %q already locked", c.Path) - } - } - - return c.lock() -} - -// the lock implementation. -// Only to be called while holding Client.mu -func (c *RemoteClient) lock() (string, error) { - // We create a new session here, so it can be canceled when the lock is - // lost or unlocked. - lockSession, err := c.createSession() - if err != nil { - return "", err - } - - // store the session ID for correlation with consul logs - c.info.Info = "consul session: " + lockSession - - // A random lock ID has been generated but we override it with the session - // ID as this will make it easier to manually invalidate the session - // if needed. - c.info.ID = lockSession - - opts := &consulapi.LockOptions{ - Key: c.lockPath() + lockSuffix, - Session: lockSession, - - // only wait briefly, so terraform has the choice to fail fast or - // retry as needed. - LockWaitTime: time.Second, - LockTryOnce: true, - - // Don't let the lock monitor give up right away, as it's possible the - // session is still OK. While the session is refreshed at a rate of - // TTL/2, the lock monitor is an idle blocking request and is more - // susceptible to being closed by a lower network layer. - MonitorRetries: 5, - // - // The delay between lock monitor retries. - // While the session has a 15s TTL plus a 5s wait period on a lost - // lock, if we can't get our lock back in 10+ seconds something is - // wrong so we're going to drop the session and start over. - MonitorRetryTime: 2 * time.Second, - } - - c.consulLock, err = c.Client.LockOpts(opts) - if err != nil { - return "", err - } - - lockErr := &statemgr.LockError{} - - lockCh, err := c.consulLock.Lock(make(chan struct{})) - if err != nil { - lockErr.Err = err - return "", lockErr - } - - if lockCh == nil { - lockInfo, e := c.getLockInfo() - if e != nil { - lockErr.Err = e - return "", lockErr - } - - lockErr.Info = lockInfo - - return "", lockErr - } - - c.lockCh = lockCh - - err = c.putLockInfo(c.info) - if err != nil { - if unlockErr := c.unlock(c.info.ID); unlockErr != nil { - err = multierror.Append(err, unlockErr) - } - - return "", err - } - - // Start a goroutine to monitor the lock state. - // If we lose the lock to due communication issues with the consul agent, - // attempt to immediately reacquire the lock. Put will verify the integrity - // of the state by using a CAS operation. - ctx, cancel := context.WithCancel(context.Background()) - c.monitorCancel = cancel - c.monitorWG.Add(1) - go func() { - defer c.monitorWG.Done() - select { - case <-c.lockCh: - log.Println("[ERROR] lost consul lock") - for { - c.mu.Lock() - // We lost our lock, so we need to cancel the session too. - // The CancelFunc is only replaced while holding Client.mu, so - // this is safe to call here. This will be replaced by the - // lock() call below. - c.sessionCancel() - - c.consulLock = nil - _, err := c.lock() - c.mu.Unlock() - - if err != nil { - // We failed to get the lock, keep trying as long as - // terraform is running. There may be changes in progress, - // so there's no use in aborting. Either we eventually - // reacquire the lock, or a Put will fail on a CAS. - log.Printf("[ERROR] could not reacquire lock: %s", err) - time.Sleep(lockReacquireInterval) - - select { - case <-ctx.Done(): - return - default: - } - continue - } - - // if the error was nil, the new lock started a new copy of - // this goroutine. - return - } - - case <-ctx.Done(): - return - } - }() - - if testLockHook != nil { - testLockHook() - } - - return c.info.ID, nil -} - -// called after a lock is acquired -var testLockHook func() - -func (c *RemoteClient) createSession() (string, error) { - // create the context first. Even if the session creation fails, we assume - // that the CancelFunc is always callable. - ctx, cancel := context.WithCancel(context.Background()) - c.sessionCancel = cancel - - session := c.Client.Session() - se := &consulapi.SessionEntry{ - Name: consulapi.DefaultLockSessionName, - TTL: lockSessionTTL, - LockDelay: lockDelay, - } - - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - - log.Println("[INFO] created consul lock session", id) - - // keep the session renewed - go session.RenewPeriodic(lockSessionTTL, id, nil, ctx.Done()) - - return id, nil -} - -func (c *RemoteClient) Unlock(id string) error { - c.mu.Lock() - defer c.mu.Unlock() - - if !c.lockState { - return nil - } - - return c.unlock(id) -} - -// the unlock implementation. -// Only to be called while holding Client.mu -func (c *RemoteClient) unlock(id string) error { - // This method can be called in two circumstances: - // - when the plan apply or destroy operation finishes and the lock needs to be released, - // the watchdog stopped and the session closed - // - when the user calls `terraform force-unlock ` in which case - // we only need to release the lock. - - if c.consulLock == nil || c.lockCh == nil { - // The user called `terraform force-unlock `, we just destroy - // the session which will release the lock, clean the KV store and quit. - - _, err := c.Client.Session().Destroy(id, nil) - if err != nil { - return err - } - // We ignore the errors that may happen during cleanup - kv := c.Client.KV() - kv.Delete(c.lockPath()+lockSuffix, nil) - kv.Delete(c.lockPath()+lockInfoSuffix, nil) - - return nil - } - - // cancel our monitoring goroutine - c.monitorCancel() - - defer func() { - c.consulLock = nil - - // The consul session is only used for this single lock, so cancel it - // after we unlock. - // The session is only created and replaced holding Client.mu, so the - // CancelFunc must be non-nil. - c.sessionCancel() - }() - - select { - case <-c.lockCh: - return lostLockErr - default: - } - - kv := c.Client.KV() - - var errs error - - if _, err := kv.Delete(c.lockPath()+lockInfoSuffix, nil); err != nil { - errs = multierror.Append(errs, err) - } - - if err := c.consulLock.Unlock(); err != nil { - errs = multierror.Append(errs, err) - } - - // the monitoring goroutine may be in a select on the lockCh, so we need to - // wait for it to return before changing the value. - c.monitorWG.Wait() - c.lockCh = nil - - // This is only cleanup, and will fail if the lock was immediately taken by - // another client, so we don't report an error to the user here. - c.consulLock.Destroy() - - return errs -} - -func compressState(data []byte) ([]byte, error) { - b := new(bytes.Buffer) - gz := gzip.NewWriter(b) - if _, err := gz.Write(data); err != nil { - return nil, err - } - if err := gz.Flush(); err != nil { - return nil, err - } - if err := gz.Close(); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func uncompressState(data []byte) ([]byte, error) { - b := new(bytes.Buffer) - gz, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } - b.ReadFrom(gz) - if err := gz.Close(); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func split(payload []byte, limit int) [][]byte { - var chunk []byte - chunks := make([][]byte, 0, len(payload)/limit+1) - for len(payload) >= limit { - chunk, payload = payload[:limit], payload[limit:] - chunks = append(chunks, chunk) - } - if len(payload) > 0 { - chunks = append(chunks, payload[:]) - } - return chunks -} - -func (c *RemoteClient) chunkedMode() (bool, string, []string, *consulapi.KVPair, error) { - kv := c.Client.KV() - pair, _, err := kv.Get(c.Path, nil) - if err != nil { - return false, "", nil, pair, err - } - if pair != nil { - var d map[string]interface{} - err = json.Unmarshal(pair.Value, &d) - // If there is an error when unmarshaling the payload, the state has - // probably been gziped in single entry mode. - if err == nil { - // If we find the "current-hash" key we were in chunked mode - hash, ok := d["current-hash"] - if ok { - chunks := make([]string, 0) - for _, c := range d["chunks"].([]interface{}) { - chunks = append(chunks, c.(string)) - } - return true, hash.(string), chunks, pair, nil - } - } - } - return false, "", nil, pair, nil -} diff --git a/internal/backend/remote-state/consul/client_test.go b/internal/backend/remote-state/consul/client_test.go deleted file mode 100644 index 2a4acf06b9b2..000000000000 --- a/internal/backend/remote-state/consul/client_test.go +++ /dev/null @@ -1,491 +0,0 @@ -package consul - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "math/rand" - "net" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func TestRemoteClient_impl(t *testing.T) { - var _ remote.Client = new(RemoteClient) - var _ remote.ClientLocker = new(RemoteClient) -} - -func TestRemoteClient(t *testing.T) { - srv := newConsulTestServer(t) - - testCases := []string{ - fmt.Sprintf("tf-unit/%s", time.Now().String()), - fmt.Sprintf("tf-unit/%s/", time.Now().String()), - } - - for _, path := range testCases { - t.Run(path, func(*testing.T) { - // Get the backend - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })) - - // Grab the client - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Test - remote.TestClient(t, state.(*remote.State).Client) - }) - } -} - -// test the gzip functionality of the client -func TestRemoteClient_gzipUpgrade(t *testing.T) { - srv := newConsulTestServer(t) - - statePath := fmt.Sprintf("tf-unit/%s", time.Now().String()) - - // Get the backend - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": statePath, - })) - - // Grab the client - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Test - remote.TestClient(t, state.(*remote.State).Client) - - // create a new backend with gzip - b = backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": statePath, - "gzip": true, - })) - - // Grab the client - state, err = b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Test - remote.TestClient(t, state.(*remote.State).Client) -} - -// TestConsul_largeState tries to write a large payload using the Consul state -// manager, as there is a limit to the size of the values in the KV store it -// will need to be split up before being saved and put back together when read. -func TestConsul_largeState(t *testing.T) { - srv := newConsulTestServer(t) - - path := "tf-unit/test-large-state" - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })) - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - c := s.(*remote.State).Client.(*RemoteClient) - c.Path = path - - // testPaths fails the test if the keys found at the prefix don't match - // what is expected - testPaths := func(t *testing.T, expected []string) { - kv := c.Client.KV() - pairs, _, err := kv.List(c.Path, nil) - if err != nil { - t.Fatal(err) - } - res := make([]string, 0) - for _, p := range pairs { - res = append(res, p.Key) - } - if !reflect.DeepEqual(res, expected) { - t.Fatalf("Wrong keys: %#v", res) - } - } - - testPayload := func(t *testing.T, data map[string]string, keys []string) { - payload, err := json.Marshal(data) - if err != nil { - t.Fatal(err) - } - err = c.Put(payload) - if err != nil { - t.Fatal("could not put payload", err) - } - - remote, err := c.Get() - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(payload, remote.Data) { - t.Fatal("the data do not match") - } - - testPaths(t, keys) - } - - // The default limit for the size of the value in Consul is 524288 bytes - testPayload( - t, - map[string]string{ - "foo": strings.Repeat("a", 524288+2), - }, - []string{ - "tf-unit/test-large-state", - "tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/0", - "tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/1", - }, - ) - - // This payload is just short enough to be stored but will be bigger when - // going through the Transaction API as it will be base64 encoded - testPayload( - t, - map[string]string{ - "foo": strings.Repeat("a", 524288-10), - }, - []string{ - "tf-unit/test-large-state", - "tf-unit/test-large-state/tfstate.4f407ace136a86521fd0d366972fe5c7/0", - }, - ) - - // We try to replace the payload with a small one, the old chunks should be removed - testPayload( - t, - map[string]string{"var": "a"}, - []string{"tf-unit/test-large-state"}, - ) - - // Test with gzip and chunks - b = backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - "gzip": true, - })) - - s, err = b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - c = s.(*remote.State).Client.(*RemoteClient) - c.Path = path - - // We need a long random string so it results in multiple chunks even after - // being gziped - - // We use a fixed seed so the test can be reproductible - rand.Seed(1234) - RandStringRunes := func(n int) string { - var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(b) - } - - testPayload( - t, - map[string]string{ - "bar": RandStringRunes(5 * (524288 + 2)), - }, - []string{ - "tf-unit/test-large-state", - "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/0", - "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/1", - "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/2", - "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/3", - }, - ) - - // Deleting the state should remove all chunks - err = c.Delete() - if err != nil { - t.Fatal(err) - } - testPaths(t, []string{}) -} - -func TestConsul_stateLock(t *testing.T) { - srv := newConsulTestServer(t) - - testCases := []string{ - fmt.Sprintf("tf-unit/%s", time.Now().String()), - fmt.Sprintf("tf-unit/%s/", time.Now().String()), - } - - for _, path := range testCases { - t.Run(path, func(*testing.T) { - // create 2 instances to get 2 remote.Clients - sA, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })).StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - sB, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })).StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestRemoteLocks(t, sA.(*remote.State).Client, sB.(*remote.State).Client) - }) - } -} - -func TestConsul_destroyLock(t *testing.T) { - srv := newConsulTestServer(t) - - testCases := []string{ - fmt.Sprintf("tf-unit/%s", time.Now().String()), - fmt.Sprintf("tf-unit/%s/", time.Now().String()), - } - - testLock := func(client *RemoteClient, lockPath string) { - // get the lock val - pair, _, err := client.Client.KV().Get(lockPath, nil) - if err != nil { - t.Fatal(err) - } - if pair != nil { - t.Fatalf("lock key not cleaned up at: %s", pair.Key) - } - } - - for _, path := range testCases { - t.Run(path, func(*testing.T) { - // Get the backend - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })) - - // Grab the client - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("err: %s", err) - } - - clientA := s.(*remote.State).Client.(*RemoteClient) - - info := statemgr.NewLockInfo() - id, err := clientA.Lock(info) - if err != nil { - t.Fatal(err) - } - - lockPath := clientA.Path + lockSuffix - - if err := clientA.Unlock(id); err != nil { - t.Fatal(err) - } - - testLock(clientA, lockPath) - - // The release the lock from a second client to test the - // `terraform force-unlock ` functionnality - s, err = b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("err: %s", err) - } - - clientB := s.(*remote.State).Client.(*RemoteClient) - - info = statemgr.NewLockInfo() - id, err = clientA.Lock(info) - if err != nil { - t.Fatal(err) - } - - if err := clientB.Unlock(id); err != nil { - t.Fatal(err) - } - - testLock(clientA, lockPath) - - err = clientA.Unlock(id) - - if err == nil { - t.Fatal("consul lock should have been lost") - } - if err.Error() != "consul lock was lost" { - t.Fatal("got wrong error", err) - } - }) - } -} - -func TestConsul_lostLock(t *testing.T) { - srv := newConsulTestServer(t) - - path := fmt.Sprintf("tf-unit/%s", time.Now().String()) - - // create 2 instances to get 2 remote.Clients - sA, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })).StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - sB, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path + "-not-used", - })).StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - info := statemgr.NewLockInfo() - info.Operation = "test-lost-lock" - id, err := sA.Lock(info) - if err != nil { - t.Fatal(err) - } - - reLocked := make(chan struct{}) - testLockHook = func() { - close(reLocked) - testLockHook = nil - } - - // now we use the second client to break the lock - kv := sB.(*remote.State).Client.(*RemoteClient).Client.KV() - _, err = kv.Delete(path+lockSuffix, nil) - if err != nil { - t.Fatal(err) - } - - <-reLocked - - if err := sA.Unlock(id); err != nil { - t.Fatal(err) - } -} - -func TestConsul_lostLockConnection(t *testing.T) { - srv := newConsulTestServer(t) - - // create an "unreliable" network by closing all the consul client's - // network connections - conns := &unreliableConns{} - origDialFn := dialContext - defer func() { - dialContext = origDialFn - }() - dialContext = conns.DialContext - - path := fmt.Sprintf("tf-unit/%s", time.Now().String()) - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "address": srv.HTTPAddr, - "path": path, - })) - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - info := statemgr.NewLockInfo() - info.Operation = "test-lost-lock-connection" - id, err := s.Lock(info) - if err != nil { - t.Fatal(err) - } - - // kill the connection a few times - for i := 0; i < 3; i++ { - dialed := conns.dialedDone() - // kill any open connections - conns.Kill() - // wait for a new connection to be dialed, and kill it again - <-dialed - } - - if err := s.Unlock(id); err != nil { - t.Fatal("unlock error:", err) - } -} - -type unreliableConns struct { - sync.Mutex - conns []net.Conn - dialCallback func() -} - -func (u *unreliableConns) DialContext(ctx context.Context, netw, addr string) (net.Conn, error) { - u.Lock() - defer u.Unlock() - - dialer := &net.Dialer{} - conn, err := dialer.DialContext(ctx, netw, addr) - if err != nil { - return nil, err - } - - u.conns = append(u.conns, conn) - - if u.dialCallback != nil { - u.dialCallback() - } - - return conn, nil -} - -func (u *unreliableConns) dialedDone() chan struct{} { - u.Lock() - defer u.Unlock() - dialed := make(chan struct{}) - u.dialCallback = func() { - defer close(dialed) - u.dialCallback = nil - } - - return dialed -} - -// Kill these with a deadline, just to make sure we don't end up with any EOFs -// that get ignored. -func (u *unreliableConns) Kill() { - u.Lock() - defer u.Unlock() - - for _, conn := range u.conns { - conn.(*net.TCPConn).SetDeadline(time.Now()) - } - u.conns = nil -} diff --git a/internal/backend/remote-state/cos/backend.go b/internal/backend/remote-state/cos/backend.go deleted file mode 100644 index aebf69aabb89..000000000000 --- a/internal/backend/remote-state/cos/backend.go +++ /dev/null @@ -1,335 +0,0 @@ -package cos - -import ( - "context" - "fmt" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" - "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" - "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" - sts "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/sts/v20180813" - tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813" - "github.com/tencentyun/cos-go-sdk-v5" -) - -// Default value from environment variable -const ( - PROVIDER_SECRET_ID = "TENCENTCLOUD_SECRET_ID" - PROVIDER_SECRET_KEY = "TENCENTCLOUD_SECRET_KEY" - PROVIDER_SECURITY_TOKEN = "TENCENTCLOUD_SECURITY_TOKEN" - PROVIDER_REGION = "TENCENTCLOUD_REGION" - PROVIDER_ASSUME_ROLE_ARN = "TENCENTCLOUD_ASSUME_ROLE_ARN" - PROVIDER_ASSUME_ROLE_SESSION_NAME = "TENCENTCLOUD_ASSUME_ROLE_SESSION_NAME" - PROVIDER_ASSUME_ROLE_SESSION_DURATION = "TENCENTCLOUD_ASSUME_ROLE_SESSION_DURATION" -) - -// Backend implements "backend".Backend for tencentCloud cos -type Backend struct { - *schema.Backend - credential *common.Credential - - cosContext context.Context - cosClient *cos.Client - tagClient *tag.Client - stsClient *sts.Client - - region string - bucket string - prefix string - key string - encrypt bool - acl string -} - -// New creates a new backend for TencentCloud cos remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "secret_id": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECRET_ID, nil), - Description: "Secret id of Tencent Cloud", - }, - "secret_key": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECRET_KEY, nil), - Description: "Secret key of Tencent Cloud", - Sensitive: true, - }, - "security_token": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECURITY_TOKEN, nil), - Description: "TencentCloud Security Token of temporary access credentials. It can be sourced from the `TENCENTCLOUD_SECURITY_TOKEN` environment variable. Notice: for supported products, please refer to: [temporary key supported products](https://intl.cloud.tencent.com/document/product/598/10588).", - Sensitive: true, - }, - "region": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc(PROVIDER_REGION, nil), - Description: "The region of the COS bucket", - InputDefault: "ap-guangzhou", - }, - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "The name of the COS bucket", - }, - "prefix": { - Type: schema.TypeString, - Optional: true, - Description: "The directory for saving the state file in bucket", - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - prefix := v.(string) - if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") { - return nil, []error{fmt.Errorf("prefix must not start with '/' or './'")} - } - return nil, nil - }, - }, - "key": { - Type: schema.TypeString, - Optional: true, - Description: "The path for saving the state file in bucket", - Default: "terraform.tfstate", - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") { - return nil, []error{fmt.Errorf("key can not start and end with '/'")} - } - return nil, nil - }, - }, - "encrypt": { - Type: schema.TypeBool, - Optional: true, - Description: "Whether to enable server side encryption of the state file", - Default: true, - }, - "acl": { - Type: schema.TypeString, - Optional: true, - Description: "Object ACL to be applied to the state file", - Default: "private", - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - value := v.(string) - if value != "private" && value != "public-read" { - return nil, []error{fmt.Errorf( - "acl value invalid, expected %s or %s, got %s", - "private", "public-read", value)} - } - return nil, nil - }, - }, - "accelerate": { - Type: schema.TypeBool, - Optional: true, - Description: "Whether to enable global Acceleration", - Default: false, - }, - "assume_role": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Description: "The `assume_role` block. If provided, terraform will attempt to assume this role using the supplied credentials.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role_arn": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc(PROVIDER_ASSUME_ROLE_ARN, nil), - Description: "The ARN of the role to assume. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_ARN`.", - }, - "session_name": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc(PROVIDER_ASSUME_ROLE_SESSION_NAME, nil), - Description: "The session name to use when making the AssumeRole call. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_SESSION_NAME`.", - }, - "session_duration": { - Type: schema.TypeInt, - Required: true, - DefaultFunc: func() (interface{}, error) { - if v := os.Getenv(PROVIDER_ASSUME_ROLE_SESSION_DURATION); v != "" { - return strconv.Atoi(v) - } - return 7200, nil - }, - ValidateFunc: validateIntegerInRange(0, 43200), - Description: "The duration of the session when making the AssumeRole call. Its value ranges from 0 to 43200(seconds), and default is 7200 seconds. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_SESSION_DURATION`.", - }, - "policy": { - Type: schema.TypeString, - Optional: true, - Description: "A more restrictive policy when making the AssumeRole call. Its content must not contains `principal` elements. Notice: more syntax references, please refer to: [policies syntax logic](https://intl.cloud.tencent.com/document/product/598/10603).", - }, - }, - }, - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - - return result -} - -func validateIntegerInRange(min, max int64) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := int64(v.(int)) - if value < min { - errors = append(errors, fmt.Errorf( - "%q cannot be lower than %d: %d", k, min, value)) - } - if value > max { - errors = append(errors, fmt.Errorf( - "%q cannot be higher than %d: %d", k, max, value)) - } - return - } -} - -// configure init cos client -func (b *Backend) configure(ctx context.Context) error { - if b.cosClient != nil { - return nil - } - - b.cosContext = ctx - data := schema.FromContextBackendConfig(b.cosContext) - - b.region = data.Get("region").(string) - b.bucket = data.Get("bucket").(string) - b.prefix = data.Get("prefix").(string) - b.key = data.Get("key").(string) - b.encrypt = data.Get("encrypt").(bool) - b.acl = data.Get("acl").(string) - - var ( - u *url.URL - err error - ) - accelerate := data.Get("accelerate").(bool) - if accelerate { - u, err = url.Parse(fmt.Sprintf("https://%s.cos.accelerate.myqcloud.com", b.bucket)) - } else { - u, err = url.Parse(fmt.Sprintf("https://%s.cos.%s.myqcloud.com", b.bucket, b.region)) - } - if err != nil { - return err - } - - secretId := data.Get("secret_id").(string) - secretKey := data.Get("secret_key").(string) - securityToken := data.Get("security_token").(string) - - // init credential by AKSK & TOKEN - b.credential = common.NewTokenCredential(secretId, secretKey, securityToken) - // update credential if assume role exist - err = handleAssumeRole(data, b) - if err != nil { - return err - } - - b.cosClient = cos.NewClient( - &cos.BaseURL{BucketURL: u}, - &http.Client{ - Timeout: 60 * time.Second, - Transport: &cos.AuthorizationTransport{ - SecretID: b.credential.SecretId, - SecretKey: b.credential.SecretKey, - SessionToken: b.credential.Token, - }, - }, - ) - - b.tagClient = b.UseTagClient() - return err -} - -func handleAssumeRole(data *schema.ResourceData, b *Backend) error { - assumeRoleList := data.Get("assume_role").(*schema.Set).List() - if len(assumeRoleList) == 1 { - assumeRole := assumeRoleList[0].(map[string]interface{}) - assumeRoleArn := assumeRole["role_arn"].(string) - assumeRoleSessionName := assumeRole["session_name"].(string) - assumeRoleSessionDuration := assumeRole["session_duration"].(int) - assumeRolePolicy := assumeRole["policy"].(string) - - err := b.updateCredentialWithSTS(assumeRoleArn, assumeRoleSessionName, assumeRoleSessionDuration, assumeRolePolicy) - if err != nil { - return err - } - } - return nil -} - -func (b *Backend) updateCredentialWithSTS(assumeRoleArn, assumeRoleSessionName string, assumeRoleSessionDuration int, assumeRolePolicy string) error { - // assume role by STS - request := sts.NewAssumeRoleRequest() - request.RoleArn = &assumeRoleArn - request.RoleSessionName = &assumeRoleSessionName - duration := uint64(assumeRoleSessionDuration) - request.DurationSeconds = &duration - if assumeRolePolicy != "" { - policy := url.QueryEscape(assumeRolePolicy) - request.Policy = &policy - } - - response, err := b.UseStsClient().AssumeRole(request) - if err != nil { - return err - } - // update credentials by result of assume role - b.credential = common.NewTokenCredential( - *response.Response.Credentials.TmpSecretId, - *response.Response.Credentials.TmpSecretKey, - *response.Response.Credentials.Token, - ) - - return nil -} - -// UseStsClient returns sts client for service -func (b *Backend) UseStsClient() *sts.Client { - if b.stsClient != nil { - return b.stsClient - } - cpf := b.NewClientProfile(300) - b.stsClient, _ = sts.NewClient(b.credential, b.region, cpf) - b.stsClient.WithHttpTransport(&LogRoundTripper{}) - - return b.stsClient -} - -// UseTagClient returns tag client for service -func (b *Backend) UseTagClient() *tag.Client { - if b.tagClient != nil { - return b.tagClient - } - cpf := b.NewClientProfile(300) - cpf.Language = "en-US" - b.tagClient, _ = tag.NewClient(b.credential, b.region, cpf) - return b.tagClient -} - -// NewClientProfile returns a new ClientProfile -func (b *Backend) NewClientProfile(timeout int) *profile.ClientProfile { - cpf := profile.NewClientProfile() - - // all request use method POST - cpf.HttpProfile.ReqMethod = "POST" - // request timeout - cpf.HttpProfile.ReqTimeout = timeout - - return cpf -} diff --git a/internal/backend/remote-state/cos/backend_state.go b/internal/backend/remote-state/cos/backend_state.go deleted file mode 100644 index 46bd3d3957d6..000000000000 --- a/internal/backend/remote-state/cos/backend_state.go +++ /dev/null @@ -1,185 +0,0 @@ -package cos - -import ( - "fmt" - "log" - "path" - "sort" - "strings" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -// Define file suffix -const ( - stateFileSuffix = ".tfstate" - lockFileSuffix = ".tflock" -) - -// Workspaces returns a list of names for the workspaces -func (b *Backend) Workspaces() ([]string, error) { - c, err := b.client("tencentcloud") - if err != nil { - return nil, err - } - - obs, err := c.getBucket(b.prefix) - log.Printf("[DEBUG] list all workspaces, objects: %v, error: %v", obs, err) - if err != nil { - return nil, err - } - - ws := []string{backend.DefaultStateName} - for _, vv := range obs { - // .tfstate - if !strings.HasSuffix(vv.Key, stateFileSuffix) { - continue - } - // default worksapce - if path.Join(b.prefix, b.key) == vv.Key { - continue - } - // // - prefix := strings.TrimRight(b.prefix, "/") + "/" - parts := strings.Split(strings.TrimPrefix(vv.Key, prefix), "/") - if len(parts) > 0 && parts[0] != "" { - ws = append(ws, parts[0]) - } - } - - sort.Strings(ws[1:]) - log.Printf("[DEBUG] list all workspaces, workspaces: %v", ws) - - return ws, nil -} - -// DeleteWorkspace deletes the named workspaces. The "default" state cannot be deleted. -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - log.Printf("[DEBUG] delete workspace, workspace: %v", name) - - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("default state is not allow to delete") - } - - c, err := b.client(name) - if err != nil { - return err - } - - return c.Delete() -} - -// StateMgr manage the state, if the named state not exists, a new file will created -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - log.Printf("[DEBUG] state manager, current workspace: %v", name) - - c, err := b.client(name) - if err != nil { - return nil, err - } - stateMgr := &remote.State{Client: c} - - ws, err := b.Workspaces() - if err != nil { - return nil, err - } - - exists := false - for _, candidate := range ws { - if candidate == name { - exists = true - break - } - } - - if !exists { - log.Printf("[DEBUG] workspace %v not exists", name) - - // take a lock on this state while we write it - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := c.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("Failed to lock cos state: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(e error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(unlockErrMsg, err, lockId) - } - return e - } - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(states.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(nil); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - } - - return stateMgr, nil -} - -// client returns a remoteClient for the named state. -func (b *Backend) client(name string) (*remoteClient, error) { - if strings.TrimSpace(name) == "" { - return nil, fmt.Errorf("state name not allow to be empty") - } - - return &remoteClient{ - cosContext: b.cosContext, - cosClient: b.cosClient, - tagClient: b.tagClient, - bucket: b.bucket, - stateFile: b.stateFile(name), - lockFile: b.lockFile(name), - encrypt: b.encrypt, - acl: b.acl, - }, nil -} - -// stateFile returns state file path by name -func (b *Backend) stateFile(name string) string { - if name == backend.DefaultStateName { - return path.Join(b.prefix, b.key) - } - return path.Join(b.prefix, name, b.key) -} - -// lockFile returns lock file path by name -func (b *Backend) lockFile(name string) string { - return b.stateFile(name) + lockFileSuffix -} - -// unlockErrMsg is error msg for unlock failed -const unlockErrMsg = ` -Unlocking the state file on TencentCloud cos backend failed: - -Error message: %v -Lock ID (gen): %s - -You may have to force-unlock this state in order to use it again. -The TencentCloud backend acquires a lock during initialization -to ensure the initial state file is created. -` diff --git a/internal/backend/remote-state/cos/backend_test.go b/internal/backend/remote-state/cos/backend_test.go deleted file mode 100644 index eb9038ff35f7..000000000000 --- a/internal/backend/remote-state/cos/backend_test.go +++ /dev/null @@ -1,256 +0,0 @@ -package cos - -import ( - "crypto/md5" - "fmt" - "os" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/remote" -) - -const ( - defaultPrefix = "" - defaultKey = "terraform.tfstate" -) - -// Testing Thanks to GCS - -func TestStateFile(t *testing.T) { - t.Parallel() - - cases := []struct { - prefix string - stateName string - key string - wantStateFile string - wantLockFile string - }{ - {"", "default", "default.tfstate", "default.tfstate", "default.tfstate.tflock"}, - {"", "default", "test.tfstate", "test.tfstate", "test.tfstate.tflock"}, - {"", "dev", "test.tfstate", "dev/test.tfstate", "dev/test.tfstate.tflock"}, - {"terraform/test", "default", "default.tfstate", "terraform/test/default.tfstate", "terraform/test/default.tfstate.tflock"}, - {"terraform/test", "default", "test.tfstate", "terraform/test/test.tfstate", "terraform/test/test.tfstate.tflock"}, - {"terraform/test", "dev", "test.tfstate", "terraform/test/dev/test.tfstate", "terraform/test/dev/test.tfstate.tflock"}, - } - - for _, c := range cases { - t.Run(fmt.Sprintf("%s %s %s", c.prefix, c.key, c.stateName), func(t *testing.T) { - b := &Backend{ - prefix: c.prefix, - key: c.key, - } - if got, want := b.stateFile(c.stateName), c.wantStateFile; got != want { - t.Errorf("wrong state file name\ngot: %s\nwant: %s", got, want) - } - if got, want := b.lockFile(c.stateName), c.wantLockFile; got != want { - t.Errorf("wrong lock file name\ngot: %s\nwant: %s", got, want) - } - }) - } -} - -func TestRemoteClient(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - - be := setupBackend(t, bucket, defaultPrefix, defaultKey, false) - defer teardownBackend(t, be) - - ss, err := be.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - rs, ok := ss.(*remote.State) - if !ok { - t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) - } - - remote.TestClient(t, rs.Client) -} - -func TestRemoteClientWithPrefix(t *testing.T) { - t.Parallel() - - prefix := "prefix/test" - bucket := bucketName(t) - - be := setupBackend(t, bucket, prefix, defaultKey, false) - defer teardownBackend(t, be) - - ss, err := be.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - rs, ok := ss.(*remote.State) - if !ok { - t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) - } - - remote.TestClient(t, rs.Client) -} - -func TestRemoteClientWithEncryption(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - - be := setupBackend(t, bucket, defaultPrefix, defaultKey, true) - defer teardownBackend(t, be) - - ss, err := be.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - rs, ok := ss.(*remote.State) - if !ok { - t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) - } - - remote.TestClient(t, rs.Client) -} - -func TestRemoteLocks(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - - be := setupBackend(t, bucket, defaultPrefix, defaultKey, false) - defer teardownBackend(t, be) - - remoteClient := func() (remote.Client, error) { - ss, err := be.StateMgr(backend.DefaultStateName) - if err != nil { - return nil, err - } - - rs, ok := ss.(*remote.State) - if !ok { - return nil, fmt.Errorf("be.StateMgr(): got a %T, want a *remote.State", ss) - } - - return rs.Client, nil - } - - c0, err := remoteClient() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - c1, err := remoteClient() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - remote.TestRemoteLocks(t, c0, c1) -} - -func TestBackend(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - - be0 := setupBackend(t, bucket, defaultPrefix, defaultKey, false) - defer teardownBackend(t, be0) - - be1 := setupBackend(t, bucket, defaultPrefix, defaultKey, false) - defer teardownBackend(t, be1) - - backend.TestBackendStates(t, be0) - backend.TestBackendStateLocks(t, be0, be1) - backend.TestBackendStateForceUnlock(t, be0, be1) -} - -func TestBackendWithPrefix(t *testing.T) { - t.Parallel() - - prefix := "prefix/test" - bucket := bucketName(t) - - be0 := setupBackend(t, bucket, prefix, defaultKey, false) - defer teardownBackend(t, be0) - - be1 := setupBackend(t, bucket, prefix+"/", defaultKey, false) - defer teardownBackend(t, be1) - - backend.TestBackendStates(t, be0) - backend.TestBackendStateLocks(t, be0, be1) -} - -func TestBackendWithEncryption(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - - be0 := setupBackend(t, bucket, defaultPrefix, defaultKey, true) - defer teardownBackend(t, be0) - - be1 := setupBackend(t, bucket, defaultPrefix, defaultKey, true) - defer teardownBackend(t, be1) - - backend.TestBackendStates(t, be0) - backend.TestBackendStateLocks(t, be0, be1) -} - -func setupBackend(t *testing.T, bucket, prefix, key string, encrypt bool) backend.Backend { - t.Helper() - - skip := os.Getenv("TF_COS_APPID") == "" - if skip { - t.Skip("This test require setting TF_COS_APPID environment variables") - } - - if os.Getenv(PROVIDER_REGION) == "" { - os.Setenv(PROVIDER_REGION, "ap-guangzhou") - } - - appId := os.Getenv("TF_COS_APPID") - region := os.Getenv(PROVIDER_REGION) - - config := map[string]interface{}{ - "region": region, - "bucket": bucket + appId, - "prefix": prefix, - "key": key, - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)) - be := b.(*Backend) - - c, err := be.client("tencentcloud") - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - err = c.putBucket() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - return b -} - -func teardownBackend(t *testing.T, b backend.Backend) { - t.Helper() - - c, err := b.(*Backend).client("tencentcloud") - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - err = c.deleteBucket(true) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } -} - -func bucketName(t *testing.T) string { - unique := fmt.Sprintf("%s-%x", t.Name(), time.Now().UnixNano()) - return fmt.Sprintf("terraform-test-%s-%s", fmt.Sprintf("%x", md5.Sum([]byte(unique)))[:10], "") -} diff --git a/internal/backend/remote-state/cos/client.go b/internal/backend/remote-state/cos/client.go deleted file mode 100644 index 818bc129f5b9..000000000000 --- a/internal/backend/remote-state/cos/client.go +++ /dev/null @@ -1,442 +0,0 @@ -package cos - -import ( - "bytes" - "context" - "crypto/md5" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "strings" - "time" - - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813" - "github.com/tencentyun/cos-go-sdk-v5" -) - -const ( - lockTagKey = "tencentcloud-terraform-lock" -) - -// RemoteClient implements the client of remote state -type remoteClient struct { - cosContext context.Context - cosClient *cos.Client - tagClient *tag.Client - - bucket string - stateFile string - lockFile string - encrypt bool - acl string -} - -// Get returns remote state file -func (c *remoteClient) Get() (*remote.Payload, error) { - log.Printf("[DEBUG] get remote state file %s", c.stateFile) - - exists, data, checksum, err := c.getObject(c.stateFile) - if err != nil { - return nil, err - } - - if !exists { - return nil, nil - } - - payload := &remote.Payload{ - Data: data, - MD5: []byte(checksum), - } - - return payload, nil -} - -// Put put state file to remote -func (c *remoteClient) Put(data []byte) error { - log.Printf("[DEBUG] put remote state file %s", c.stateFile) - - return c.putObject(c.stateFile, data) -} - -// Delete delete remote state file -func (c *remoteClient) Delete() error { - log.Printf("[DEBUG] delete remote state file %s", c.stateFile) - - return c.deleteObject(c.stateFile) -} - -// Lock lock remote state file for writing -func (c *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { - log.Printf("[DEBUG] lock remote state file %s", c.lockFile) - - err := c.cosLock(c.bucket, c.lockFile) - if err != nil { - return "", c.lockError(err) - } - defer c.cosUnlock(c.bucket, c.lockFile) - - exists, _, _, err := c.getObject(c.lockFile) - if err != nil { - return "", c.lockError(err) - } - - if exists { - return "", c.lockError(fmt.Errorf("lock file %s exists", c.lockFile)) - } - - info.Path = c.lockFile - data, err := json.Marshal(info) - if err != nil { - return "", c.lockError(err) - } - - check := fmt.Sprintf("%x", md5.Sum(data)) - err = c.putObject(c.lockFile, data) - if err != nil { - return "", c.lockError(err) - } - - return check, nil -} - -// Unlock unlock remote state file -func (c *remoteClient) Unlock(check string) error { - log.Printf("[DEBUG] unlock remote state file %s", c.lockFile) - - info, err := c.lockInfo() - if err != nil { - return c.lockError(err) - } - - if info.ID != check { - return c.lockError(fmt.Errorf("lock id mismatch, %v != %v", info.ID, check)) - } - - err = c.deleteObject(c.lockFile) - if err != nil { - return c.lockError(err) - } - - err = c.cosUnlock(c.bucket, c.lockFile) - if err != nil { - return c.lockError(err) - } - - return nil -} - -// lockError returns statemgr.LockError -func (c *remoteClient) lockError(err error) *statemgr.LockError { - log.Printf("[DEBUG] failed to lock or unlock %s: %v", c.lockFile, err) - - lockErr := &statemgr.LockError{ - Err: err, - } - - info, infoErr := c.lockInfo() - if infoErr != nil { - lockErr.Err = multierror.Append(lockErr.Err, infoErr) - } else { - lockErr.Info = info - } - - return lockErr -} - -// lockInfo returns LockInfo from lock file -func (c *remoteClient) lockInfo() (*statemgr.LockInfo, error) { - exists, data, checksum, err := c.getObject(c.lockFile) - if err != nil { - return nil, err - } - - if !exists { - return nil, fmt.Errorf("lock file %s not exists", c.lockFile) - } - - info := &statemgr.LockInfo{} - if err := json.Unmarshal(data, info); err != nil { - return nil, err - } - - info.ID = checksum - - return info, nil -} - -// getObject get remote object -func (c *remoteClient) getObject(cosFile string) (exists bool, data []byte, checksum string, err error) { - rsp, err := c.cosClient.Object.Get(c.cosContext, cosFile, nil) - if rsp == nil { - log.Printf("[DEBUG] getObject %s: error: %v", cosFile, err) - err = fmt.Errorf("failed to open file at %v: %v", cosFile, err) - return - } - defer rsp.Body.Close() - - log.Printf("[DEBUG] getObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) - if err != nil { - if rsp.StatusCode == 404 { - err = nil - } else { - err = fmt.Errorf("failed to open file at %v: %v", cosFile, err) - } - return - } - - checksum = rsp.Header.Get("X-Cos-Meta-Md5") - log.Printf("[DEBUG] getObject %s: checksum: %s", cosFile, checksum) - if len(checksum) != 32 { - err = fmt.Errorf("failed to open file at %v: checksum %s invalid", cosFile, checksum) - return - } - - exists = true - data, err = ioutil.ReadAll(rsp.Body) - log.Printf("[DEBUG] getObject %s: data length: %d", cosFile, len(data)) - if err != nil { - err = fmt.Errorf("failed to open file at %v: %v", cosFile, err) - return - } - - check := fmt.Sprintf("%x", md5.Sum(data)) - log.Printf("[DEBUG] getObject %s: check: %s", cosFile, check) - if check != checksum { - err = fmt.Errorf("failed to open file at %v: checksum mismatch, %s != %s", cosFile, check, checksum) - return - } - - return -} - -// putObject put object to remote -func (c *remoteClient) putObject(cosFile string, data []byte) error { - opt := &cos.ObjectPutOptions{ - ObjectPutHeaderOptions: &cos.ObjectPutHeaderOptions{ - XCosMetaXXX: &http.Header{ - "X-Cos-Meta-Md5": []string{fmt.Sprintf("%x", md5.Sum(data))}, - }, - }, - ACLHeaderOptions: &cos.ACLHeaderOptions{ - XCosACL: c.acl, - }, - } - - if c.encrypt { - opt.ObjectPutHeaderOptions.XCosServerSideEncryption = "AES256" - } - - r := bytes.NewReader(data) - rsp, err := c.cosClient.Object.Put(c.cosContext, cosFile, r, opt) - if rsp == nil { - log.Printf("[DEBUG] putObject %s: error: %v", cosFile, err) - return fmt.Errorf("failed to save file to %v: %v", cosFile, err) - } - defer rsp.Body.Close() - - log.Printf("[DEBUG] putObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) - if err != nil { - return fmt.Errorf("failed to save file to %v: %v", cosFile, err) - } - - return nil -} - -// deleteObject delete remote object -func (c *remoteClient) deleteObject(cosFile string) error { - rsp, err := c.cosClient.Object.Delete(c.cosContext, cosFile) - if rsp == nil { - log.Printf("[DEBUG] deleteObject %s: error: %v", cosFile, err) - return fmt.Errorf("failed to delete file %v: %v", cosFile, err) - } - defer rsp.Body.Close() - - log.Printf("[DEBUG] deleteObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) - if rsp.StatusCode == 404 { - return nil - } - - if err != nil { - return fmt.Errorf("failed to delete file %v: %v", cosFile, err) - } - - return nil -} - -// getBucket list bucket by prefix -func (c *remoteClient) getBucket(prefix string) (obs []cos.Object, err error) { - fs, rsp, err := c.cosClient.Bucket.Get(c.cosContext, &cos.BucketGetOptions{Prefix: prefix}) - if rsp == nil { - log.Printf("[DEBUG] getBucket %s/%s: error: %v", c.bucket, prefix, err) - err = fmt.Errorf("bucket %s not exists", c.bucket) - return - } - defer rsp.Body.Close() - - log.Printf("[DEBUG] getBucket %s/%s: code: %d, error: %v", c.bucket, prefix, rsp.StatusCode, err) - if rsp.StatusCode == 404 { - err = fmt.Errorf("bucket %s not exists", c.bucket) - return - } - - if err != nil { - return - } - - return fs.Contents, nil -} - -// putBucket create cos bucket -func (c *remoteClient) putBucket() error { - rsp, err := c.cosClient.Bucket.Put(c.cosContext, nil) - if rsp == nil { - log.Printf("[DEBUG] putBucket %s: error: %v", c.bucket, err) - return fmt.Errorf("failed to create bucket %v: %v", c.bucket, err) - } - defer rsp.Body.Close() - - log.Printf("[DEBUG] putBucket %s: code: %d, error: %v", c.bucket, rsp.StatusCode, err) - if rsp.StatusCode == 409 { - return nil - } - - if err != nil { - return fmt.Errorf("failed to create bucket %v: %v", c.bucket, err) - } - - return nil -} - -// deleteBucket delete cos bucket -func (c *remoteClient) deleteBucket(recursive bool) error { - if recursive { - obs, err := c.getBucket("") - if err != nil { - if strings.Contains(err.Error(), "not exists") { - return nil - } - log.Printf("[DEBUG] deleteBucket %s: empty bucket error: %v", c.bucket, err) - return fmt.Errorf("failed to empty bucket %v: %v", c.bucket, err) - } - for _, v := range obs { - c.deleteObject(v.Key) - } - } - - rsp, err := c.cosClient.Bucket.Delete(c.cosContext) - if rsp == nil { - log.Printf("[DEBUG] deleteBucket %s: error: %v", c.bucket, err) - return fmt.Errorf("failed to delete bucket %v: %v", c.bucket, err) - } - defer rsp.Body.Close() - - log.Printf("[DEBUG] deleteBucket %s: code: %d, error: %v", c.bucket, rsp.StatusCode, err) - if rsp.StatusCode == 404 { - return nil - } - - if err != nil { - return fmt.Errorf("failed to delete bucket %v: %v", c.bucket, err) - } - - return nil -} - -// cosLock lock cos for writing -func (c *remoteClient) cosLock(bucket, cosFile string) error { - log.Printf("[DEBUG] lock cos file %s:%s", bucket, cosFile) - - cosPath := fmt.Sprintf("%s:%s", bucket, cosFile) - lockTagValue := fmt.Sprintf("%x", md5.Sum([]byte(cosPath))) - - return c.CreateTag(lockTagKey, lockTagValue) -} - -// cosUnlock unlock cos writing -func (c *remoteClient) cosUnlock(bucket, cosFile string) error { - log.Printf("[DEBUG] unlock cos file %s:%s", bucket, cosFile) - - cosPath := fmt.Sprintf("%s:%s", bucket, cosFile) - lockTagValue := fmt.Sprintf("%x", md5.Sum([]byte(cosPath))) - - var err error - for i := 0; i < 30; i++ { - tagExists, err := c.CheckTag(lockTagKey, lockTagValue) - - if err != nil { - return err - } - - if !tagExists { - return nil - } - - err = c.DeleteTag(lockTagKey, lockTagValue) - if err == nil { - return nil - } - time.Sleep(1 * time.Second) - } - - return err -} - -// CheckTag checks if tag key:value exists -func (c *remoteClient) CheckTag(key, value string) (exists bool, err error) { - request := tag.NewDescribeTagsRequest() - request.TagKey = &key - request.TagValue = &value - - response, err := c.tagClient.DescribeTags(request) - log.Printf("[DEBUG] create tag %s:%s: error: %v", key, value, err) - if err != nil { - return - } - - if len(response.Response.Tags) == 0 { - return - } - - tagKey := response.Response.Tags[0].TagKey - tagValue := response.Response.Tags[0].TagValue - - exists = key == *tagKey && value == *tagValue - - return -} - -// CreateTag create tag by key and value -func (c *remoteClient) CreateTag(key, value string) error { - request := tag.NewCreateTagRequest() - request.TagKey = &key - request.TagValue = &value - - _, err := c.tagClient.CreateTag(request) - log.Printf("[DEBUG] create tag %s:%s: error: %v", key, value, err) - if err != nil { - return fmt.Errorf("failed to create tag: %s -> %s: %s", key, value, err) - } - - return nil -} - -// DeleteTag create tag by key and value -func (c *remoteClient) DeleteTag(key, value string) error { - request := tag.NewDeleteTagRequest() - request.TagKey = &key - request.TagValue = &value - - _, err := c.tagClient.DeleteTag(request) - log.Printf("[DEBUG] delete tag %s:%s: error: %v", key, value, err) - if err != nil { - return fmt.Errorf("failed to delete tag: %s -> %s: %s", key, value, err) - } - - return nil -} diff --git a/internal/backend/remote-state/gcs/backend.go b/internal/backend/remote-state/gcs/backend.go deleted file mode 100644 index 5c6d119238e5..000000000000 --- a/internal/backend/remote-state/gcs/backend.go +++ /dev/null @@ -1,249 +0,0 @@ -// Package gcs implements remote storage of state on Google Cloud Storage (GCS). -package gcs - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "os" - "strings" - - "cloud.google.com/go/storage" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" - "golang.org/x/oauth2" - "google.golang.org/api/impersonate" - "google.golang.org/api/option" -) - -// Backend implements "backend".Backend for GCS. -// Input(), Validate() and Configure() are implemented by embedding *schema.Backend. -// State(), DeleteState() and States() are implemented explicitly. -type Backend struct { - *schema.Backend - - storageClient *storage.Client - storageContext context.Context - - bucketName string - prefix string - - encryptionKey []byte - kmsKeyName string -} - -func New() backend.Backend { - b := &Backend{} - b.Backend = &schema.Backend{ - ConfigureFunc: b.configure, - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "The name of the Google Cloud Storage bucket", - }, - - "prefix": { - Type: schema.TypeString, - Optional: true, - Description: "The directory where state files will be saved inside the bucket", - }, - - "credentials": { - Type: schema.TypeString, - Optional: true, - Description: "Google Cloud JSON Account Key", - Default: "", - }, - - "access_token": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_OAUTH_ACCESS_TOKEN", - }, nil), - Description: "An OAuth2 token used for GCP authentication", - }, - - "impersonate_service_account": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BACKEND_IMPERSONATE_SERVICE_ACCOUNT", - "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", - }, nil), - Description: "The service account to impersonate for all Google API Calls", - }, - - "impersonate_service_account_delegates": { - Type: schema.TypeList, - Optional: true, - Description: "The delegation chain for the impersonated service account", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "encryption_key": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ENCRYPTION_KEY", - }, nil), - Description: "A 32 byte base64 encoded 'customer supplied encryption key' used when reading and writing state files in the bucket.", - ConflictsWith: []string{"kms_encryption_key"}, - }, - - "kms_encryption_key": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_KMS_ENCRYPTION_KEY", - }, nil), - Description: "A Cloud KMS key ('customer managed encryption key') used when reading and writing state files in the bucket. Format should be 'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{name}}'.", - ConflictsWith: []string{"encryption_key"}, - }, - - "storage_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BACKEND_STORAGE_CUSTOM_ENDPOINT", - "GOOGLE_STORAGE_CUSTOM_ENDPOINT", - }, nil), - }, - }, - } - - return b -} - -func (b *Backend) configure(ctx context.Context) error { - if b.storageClient != nil { - return nil - } - - // ctx is a background context with the backend config added. - // Since no context is passed to remoteClient.Get(), .Lock(), etc. but - // one is required for calling the GCP API, we're holding on to this - // context here and re-use it later. - b.storageContext = ctx - - data := schema.FromContextBackendConfig(b.storageContext) - - b.bucketName = data.Get("bucket").(string) - b.prefix = strings.TrimLeft(data.Get("prefix").(string), "/") - if b.prefix != "" && !strings.HasSuffix(b.prefix, "/") { - b.prefix = b.prefix + "/" - } - - var opts []option.ClientOption - var credOptions []option.ClientOption - - // Add credential source - var creds string - var tokenSource oauth2.TokenSource - - if v, ok := data.GetOk("access_token"); ok { - tokenSource = oauth2.StaticTokenSource(&oauth2.Token{ - AccessToken: v.(string), - }) - } else if v, ok := data.GetOk("credentials"); ok { - creds = v.(string) - } else if v := os.Getenv("GOOGLE_BACKEND_CREDENTIALS"); v != "" { - creds = v - } else { - creds = os.Getenv("GOOGLE_CREDENTIALS") - } - - if tokenSource != nil { - credOptions = append(credOptions, option.WithTokenSource(tokenSource)) - } else if creds != "" { - - // to mirror how the provider works, we accept the file path or the contents - contents, err := backend.ReadPathOrContents(creds) - if err != nil { - return fmt.Errorf("Error loading credentials: %s", err) - } - - if !json.Valid([]byte(contents)) { - return fmt.Errorf("the string provided in credentials is neither valid json nor a valid file path") - } - - credOptions = append(credOptions, option.WithCredentialsJSON([]byte(contents))) - } - - // Service Account Impersonation - if v, ok := data.GetOk("impersonate_service_account"); ok { - ServiceAccount := v.(string) - var delegates []string - - if v, ok := data.GetOk("impersonate_service_account_delegates"); ok { - d := v.([]interface{}) - if len(delegates) > 0 { - delegates = make([]string, 0, len(d)) - } - for _, delegate := range d { - delegates = append(delegates, delegate.(string)) - } - } - - ts, err := impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{ - TargetPrincipal: ServiceAccount, - Scopes: []string{storage.ScopeReadWrite}, - Delegates: delegates, - }, credOptions...) - - if err != nil { - return err - } - - opts = append(opts, option.WithTokenSource(ts)) - - } else { - opts = append(opts, credOptions...) - } - - opts = append(opts, option.WithUserAgent(httpclient.UserAgentString())) - - // Custom endpoint for storage API - if storageEndpoint, ok := data.GetOk("storage_custom_endpoint"); ok { - endpoint := option.WithEndpoint(storageEndpoint.(string)) - opts = append(opts, endpoint) - } - client, err := storage.NewClient(b.storageContext, opts...) - if err != nil { - return fmt.Errorf("storage.NewClient() failed: %v", err) - } - - b.storageClient = client - - // Customer-supplied encryption - key := data.Get("encryption_key").(string) - if key != "" { - kc, err := backend.ReadPathOrContents(key) - if err != nil { - return fmt.Errorf("Error loading encryption key: %s", err) - } - - // The GCS client expects a customer supplied encryption key to be - // passed in as a 32 byte long byte slice. The byte slice is base64 - // encoded before being passed to the API. We take a base64 encoded key - // to remain consistent with the GCS docs. - // https://cloud.google.com/storage/docs/encryption#customer-supplied - // https://github.com/GoogleCloudPlatform/google-cloud-go/blob/def681/storage/storage.go#L1181 - k, err := base64.StdEncoding.DecodeString(kc) - if err != nil { - return fmt.Errorf("Error decoding encryption key: %s", err) - } - b.encryptionKey = k - } - - // Customer-managed encryption - kmsName := data.Get("kms_encryption_key").(string) - if kmsName != "" { - b.kmsKeyName = kmsName - } - - return nil -} diff --git a/internal/backend/remote-state/gcs/backend_state.go b/internal/backend/remote-state/gcs/backend_state.go deleted file mode 100644 index 1f1aa8f1b841..000000000000 --- a/internal/backend/remote-state/gcs/backend_state.go +++ /dev/null @@ -1,155 +0,0 @@ -package gcs - -import ( - "fmt" - "path" - "sort" - "strings" - - "cloud.google.com/go/storage" - "google.golang.org/api/iterator" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -const ( - stateFileSuffix = ".tfstate" - lockFileSuffix = ".tflock" -) - -// Workspaces returns a list of names for the workspaces found on GCS. The default -// state is always returned as the first element in the slice. -func (b *Backend) Workspaces() ([]string, error) { - states := []string{backend.DefaultStateName} - - bucket := b.storageClient.Bucket(b.bucketName) - objs := bucket.Objects(b.storageContext, &storage.Query{ - Delimiter: "/", - Prefix: b.prefix, - }) - for { - attrs, err := objs.Next() - if err == iterator.Done { - break - } - if err != nil { - return nil, fmt.Errorf("querying Cloud Storage failed: %v", err) - } - - name := path.Base(attrs.Name) - if !strings.HasSuffix(name, stateFileSuffix) { - continue - } - st := strings.TrimSuffix(name, stateFileSuffix) - - if st != backend.DefaultStateName { - states = append(states, st) - } - } - - sort.Strings(states[1:]) - return states, nil -} - -// DeleteWorkspace deletes the named workspaces. The "default" state cannot be deleted. -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - if name == backend.DefaultStateName { - return fmt.Errorf("cowardly refusing to delete the %q state", name) - } - - c, err := b.client(name) - if err != nil { - return err - } - - return c.Delete() -} - -// client returns a remoteClient for the named state. -func (b *Backend) client(name string) (*remoteClient, error) { - if name == "" { - return nil, fmt.Errorf("%q is not a valid state name", name) - } - - return &remoteClient{ - storageContext: b.storageContext, - storageClient: b.storageClient, - bucketName: b.bucketName, - stateFilePath: b.stateFile(name), - lockFilePath: b.lockFile(name), - encryptionKey: b.encryptionKey, - kmsKeyName: b.kmsKeyName, - }, nil -} - -// StateMgr reads and returns the named state from GCS. If the named state does -// not yet exist, a new state file is created. -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - c, err := b.client(name) - if err != nil { - return nil, err - } - - st := &remote.State{Client: c} - - // Grab the value - if err := st.RefreshState(); err != nil { - return nil, err - } - - // If we have no state, we have to create an empty state - if v := st.State(); v == nil { - - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockID, err := st.Lock(lockInfo) - if err != nil { - return nil, err - } - - // Local helper function so we can call it multiple places - unlock := func(baseErr error) error { - if err := st.Unlock(lockID); err != nil { - const unlockErrMsg = `%v - Additionally, unlocking the state file on Google Cloud Storage failed: - - Error message: %q - Lock ID (gen): %v - Lock file URL: %v - - You may have to force-unlock this state in order to use it again. - The GCloud backend acquires a lock during initialization to ensure - the initial state file is created.` - return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, c.lockFileURL()) - } - - return baseErr - } - - if err := st.WriteState(states.NewState()); err != nil { - return nil, unlock(err) - } - if err := st.PersistState(nil); err != nil { - return nil, unlock(err) - } - - // Unlock, the state should now be initialized - if err := unlock(nil); err != nil { - return nil, err - } - - } - - return st, nil -} - -func (b *Backend) stateFile(name string) string { - return path.Join(b.prefix, name+stateFileSuffix) -} - -func (b *Backend) lockFile(name string) string { - return path.Join(b.prefix, name+lockFileSuffix) -} diff --git a/internal/backend/remote-state/gcs/backend_test.go b/internal/backend/remote-state/gcs/backend_test.go deleted file mode 100644 index 9e8ca077c319..000000000000 --- a/internal/backend/remote-state/gcs/backend_test.go +++ /dev/null @@ -1,441 +0,0 @@ -package gcs - -import ( - "context" - "encoding/json" - "fmt" - "log" - "os" - "strings" - "testing" - "time" - - kms "cloud.google.com/go/kms/apiv1" - "cloud.google.com/go/storage" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/states/remote" - "google.golang.org/api/option" - kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" -) - -const ( - noPrefix = "" - noEncryptionKey = "" - noKmsKeyName = "" -) - -// See https://cloud.google.com/storage/docs/using-encryption-keys#generating_your_own_encryption_key -const encryptionKey = "yRyCOikXi1ZDNE0xN3yiFsJjg7LGimoLrGFcLZgQoVk=" - -// KMS key ring name and key name are hardcoded here and re-used because key rings (and keys) cannot be deleted -// Test code asserts their presence and creates them if they're absent. They're not deleted at the end of tests. -// See: https://cloud.google.com/kms/docs/faq#cannot_delete -const ( - keyRingName = "tf-gcs-backend-acc-tests" - keyName = "tf-test-key-1" - kmsRole = "roles/cloudkms.cryptoKeyEncrypterDecrypter" // GCS service account needs this binding on the created key -) - -var keyRingLocation = os.Getenv("GOOGLE_REGION") - -func TestStateFile(t *testing.T) { - t.Parallel() - - cases := []struct { - prefix string - name string - wantStateFile string - wantLockFile string - }{ - {"state", "default", "state/default.tfstate", "state/default.tflock"}, - {"state", "test", "state/test.tfstate", "state/test.tflock"}, - {"state", "test", "state/test.tfstate", "state/test.tflock"}, - {"state", "test", "state/test.tfstate", "state/test.tflock"}, - } - for _, c := range cases { - b := &Backend{ - prefix: c.prefix, - } - - if got := b.stateFile(c.name); got != c.wantStateFile { - t.Errorf("stateFile(%q) = %q, want %q", c.name, got, c.wantStateFile) - } - - if got := b.lockFile(c.name); got != c.wantLockFile { - t.Errorf("lockFile(%q) = %q, want %q", c.name, got, c.wantLockFile) - } - } -} - -func TestRemoteClient(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - be := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) - defer teardownBackend(t, be, noPrefix) - - ss, err := be.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("be.StateMgr(%q) = %v", backend.DefaultStateName, err) - } - - rs, ok := ss.(*remote.State) - if !ok { - t.Fatalf("be.StateMgr(): got a %T, want a *remote.State", ss) - } - - remote.TestClient(t, rs.Client) -} -func TestRemoteClientWithEncryption(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - be := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) - defer teardownBackend(t, be, noPrefix) - - ss, err := be.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("be.StateMgr(%q) = %v", backend.DefaultStateName, err) - } - - rs, ok := ss.(*remote.State) - if !ok { - t.Fatalf("be.StateMgr(): got a %T, want a *remote.State", ss) - } - - remote.TestClient(t, rs.Client) -} - -func TestRemoteLocks(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - be := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) - defer teardownBackend(t, be, noPrefix) - - remoteClient := func() (remote.Client, error) { - ss, err := be.StateMgr(backend.DefaultStateName) - if err != nil { - return nil, err - } - - rs, ok := ss.(*remote.State) - if !ok { - return nil, fmt.Errorf("be.StateMgr(): got a %T, want a *remote.State", ss) - } - - return rs.Client, nil - } - - c0, err := remoteClient() - if err != nil { - t.Fatalf("remoteClient(0) = %v", err) - } - c1, err := remoteClient() - if err != nil { - t.Fatalf("remoteClient(1) = %v", err) - } - - remote.TestRemoteLocks(t, c0, c1) -} - -func TestBackend(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - - be0 := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) - defer teardownBackend(t, be0, noPrefix) - - be1 := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) - - backend.TestBackendStates(t, be0) - backend.TestBackendStateLocks(t, be0, be1) - backend.TestBackendStateForceUnlock(t, be0, be1) -} - -func TestBackendWithPrefix(t *testing.T) { - t.Parallel() - - prefix := "test/prefix" - bucket := bucketName(t) - - be0 := setupBackend(t, bucket, prefix, noEncryptionKey, noKmsKeyName) - defer teardownBackend(t, be0, prefix) - - be1 := setupBackend(t, bucket, prefix+"/", noEncryptionKey, noKmsKeyName) - - backend.TestBackendStates(t, be0) - backend.TestBackendStateLocks(t, be0, be1) -} -func TestBackendWithCustomerSuppliedEncryption(t *testing.T) { - t.Parallel() - - bucket := bucketName(t) - - be0 := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) - defer teardownBackend(t, be0, noPrefix) - - be1 := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) - - backend.TestBackendStates(t, be0) - backend.TestBackendStateLocks(t, be0, be1) -} - -func TestBackendWithCustomerManagedKMSEncryption(t *testing.T) { - t.Parallel() - - projectID := os.Getenv("GOOGLE_PROJECT") - bucket := bucketName(t) - - // Taken from global variables in test file - kmsDetails := map[string]string{ - "project": projectID, - "location": keyRingLocation, - "ringName": keyRingName, - "keyName": keyName, - } - - kmsName := setupKmsKey(t, kmsDetails) - - be0 := setupBackend(t, bucket, noPrefix, noEncryptionKey, kmsName) - defer teardownBackend(t, be0, noPrefix) - - be1 := setupBackend(t, bucket, noPrefix, noEncryptionKey, kmsName) - - backend.TestBackendStates(t, be0) - backend.TestBackendStateLocks(t, be0, be1) -} - -// setupBackend returns a new GCS backend. -func setupBackend(t *testing.T, bucket, prefix, key, kmsName string) backend.Backend { - t.Helper() - - projectID := os.Getenv("GOOGLE_PROJECT") - if projectID == "" || os.Getenv("TF_ACC") == "" { - t.Skip("This test creates a bucket in GCS and populates it. " + - "Since this may incur costs, it will only run if " + - "the TF_ACC and GOOGLE_PROJECT environment variables are set.") - } - - config := map[string]interface{}{ - "bucket": bucket, - "prefix": prefix, - } - // Only add encryption keys to config if non-zero value set - // If not set here, default values are supplied in `TestBackendConfig` by `PrepareConfig` function call - if len(key) > 0 { - config["encryption_key"] = key - } - if len(kmsName) > 0 { - config["kms_encryption_key"] = kmsName - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)) - be := b.(*Backend) - - // create the bucket if it doesn't exist - bkt := be.storageClient.Bucket(bucket) - _, err := bkt.Attrs(be.storageContext) - if err != nil { - if err != storage.ErrBucketNotExist { - t.Fatal(err) - } - - attrs := &storage.BucketAttrs{ - Location: os.Getenv("GOOGLE_REGION"), - } - err := bkt.Create(be.storageContext, projectID, attrs) - if err != nil { - t.Fatal(err) - } - } - - return b -} - -// setupKmsKey asserts that a KMS key chain and key exist and necessary IAM bindings are in place -// If the key ring or key do not exist they are created and permissions are given to the GCS Service account -func setupKmsKey(t *testing.T, keyDetails map[string]string) string { - t.Helper() - - projectID := os.Getenv("GOOGLE_PROJECT") - if projectID == "" || os.Getenv("TF_ACC") == "" { - t.Skip("This test creates a KMS key ring and key in Cloud KMS. " + - "Since this may incur costs, it will only run if " + - "the TF_ACC and GOOGLE_PROJECT environment variables are set.") - } - - // KMS Client - ctx := context.Background() - opts, err := testGetClientOptions(t) - if err != nil { - e := fmt.Errorf("testGetClientOptions() failed: %s", err) - t.Fatal(e) - } - c, err := kms.NewKeyManagementClient(ctx, opts...) - if err != nil { - e := fmt.Errorf("kms.NewKeyManagementClient() failed: %v", err) - t.Fatal(e) - } - defer c.Close() - - // Get KMS key ring, create if doesn't exist - reqGetKeyRing := &kmspb.GetKeyRingRequest{ - Name: fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", keyDetails["project"], keyDetails["location"], keyDetails["ringName"]), - } - var keyRing *kmspb.KeyRing - keyRing, err = c.GetKeyRing(ctx, reqGetKeyRing) - if err != nil { - if !strings.Contains(err.Error(), "NotFound") { - // Handle unexpected error that isn't related to the key ring not being made yet - t.Fatal(err) - } - // Create key ring that doesn't exist - t.Logf("Cloud KMS key ring `%s` not found: creating key ring", - fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", keyDetails["project"], keyDetails["location"], keyDetails["ringName"]), - ) - reqCreateKeyRing := &kmspb.CreateKeyRingRequest{ - Parent: fmt.Sprintf("projects/%s/locations/%s", keyDetails["project"], keyDetails["location"]), - KeyRingId: keyDetails["ringName"], - } - keyRing, err = c.CreateKeyRing(ctx, reqCreateKeyRing) - if err != nil { - t.Fatal(err) - } - t.Logf("Cloud KMS key ring `%s` created successfully", keyRing.Name) - } - - // Get KMS key, create if doesn't exist (and give GCS service account permission to use) - reqGetKey := &kmspb.GetCryptoKeyRequest{ - Name: fmt.Sprintf("%s/cryptoKeys/%s", keyRing.Name, keyDetails["keyName"]), - } - var key *kmspb.CryptoKey - key, err = c.GetCryptoKey(ctx, reqGetKey) - if err != nil { - if !strings.Contains(err.Error(), "NotFound") { - // Handle unexpected error that isn't related to the key not being made yet - t.Fatal(err) - } - // Create key that doesn't exist - t.Logf("Cloud KMS key `%s` not found: creating key", - fmt.Sprintf("%s/cryptoKeys/%s", keyRing.Name, keyDetails["keyName"]), - ) - reqCreateKey := &kmspb.CreateCryptoKeyRequest{ - Parent: keyRing.Name, - CryptoKeyId: keyDetails["keyName"], - CryptoKey: &kmspb.CryptoKey{ - Purpose: kmspb.CryptoKey_ENCRYPT_DECRYPT, - }, - } - key, err = c.CreateCryptoKey(ctx, reqCreateKey) - if err != nil { - t.Fatal(err) - } - t.Logf("Cloud KMS key `%s` created successfully", key.Name) - } - - // Get GCS Service account email, check has necessary permission on key - // Note: we cannot reuse the backend's storage client (like in the setupBackend function) - // because the KMS key needs to exist before the backend buckets are made in the test. - sc, err := storage.NewClient(ctx, opts...) //reuse opts from KMS client - if err != nil { - e := fmt.Errorf("storage.NewClient() failed: %v", err) - t.Fatal(e) - } - defer sc.Close() - gcsServiceAccount, err := sc.ServiceAccount(ctx, keyDetails["project"]) - if err != nil { - t.Fatal(err) - } - - // Assert Cloud Storage service account has permission to use this key. - member := fmt.Sprintf("serviceAccount:%s", gcsServiceAccount) - iamHandle := c.ResourceIAM(key.Name) - policy, err := iamHandle.Policy(ctx) - if err != nil { - t.Fatal(err) - } - if ok := policy.HasRole(member, kmsRole); !ok { - // Add the missing permissions - t.Logf("Granting GCS service account %s %s role on key %s", gcsServiceAccount, kmsRole, key.Name) - policy.Add(member, kmsRole) - err = iamHandle.SetPolicy(ctx, policy) - if err != nil { - t.Fatal(err) - } - } - return key.Name -} - -// teardownBackend deletes all states from be except the default state. -func teardownBackend(t *testing.T, be backend.Backend, prefix string) { - t.Helper() - gcsBE, ok := be.(*Backend) - if !ok { - t.Fatalf("be is a %T, want a *gcsBackend", be) - } - ctx := gcsBE.storageContext - - bucket := gcsBE.storageClient.Bucket(gcsBE.bucketName) - objs := bucket.Objects(ctx, nil) - - for o, err := objs.Next(); err == nil; o, err = objs.Next() { - if err := bucket.Object(o.Name).Delete(ctx); err != nil { - log.Printf("Error trying to delete object: %s %s\n\n", o.Name, err) - } else { - log.Printf("Object deleted: %s", o.Name) - } - } - - // Delete the bucket itself. - if err := bucket.Delete(ctx); err != nil { - t.Errorf("deleting bucket %q failed, manual cleanup may be required: %v", gcsBE.bucketName, err) - } -} - -// bucketName returns a valid bucket name for this test. -func bucketName(t *testing.T) string { - name := fmt.Sprintf("tf-%x-%s", time.Now().UnixNano(), t.Name()) - - // Bucket names must contain 3 to 63 characters. - if len(name) > 63 { - name = name[:63] - } - - return strings.ToLower(name) -} - -// getClientOptions returns the []option.ClientOption needed to configure Google API clients -// that are required in acceptance tests but are not part of the gcs backend itself -func testGetClientOptions(t *testing.T) ([]option.ClientOption, error) { - t.Helper() - - var creds string - if v := os.Getenv("GOOGLE_BACKEND_CREDENTIALS"); v != "" { - creds = v - } else { - creds = os.Getenv("GOOGLE_CREDENTIALS") - } - if creds == "" { - t.Skip("This test required credentials to be supplied via" + - "the GOOGLE_CREDENTIALS or GOOGLE_BACKEND_CREDENTIALS environment variables.") - } - - var opts []option.ClientOption - var credOptions []option.ClientOption - - contents, err := backend.ReadPathOrContents(creds) - if err != nil { - return nil, fmt.Errorf("error loading credentials: %s", err) - } - if !json.Valid([]byte(contents)) { - return nil, fmt.Errorf("the string provided in credentials is neither valid json nor a valid file path") - } - credOptions = append(credOptions, option.WithCredentialsJSON([]byte(contents))) - opts = append(opts, credOptions...) - opts = append(opts, option.WithUserAgent(httpclient.UserAgentString())) - - return opts, nil -} diff --git a/internal/backend/remote-state/gcs/client.go b/internal/backend/remote-state/gcs/client.go deleted file mode 100644 index b91eaf350755..000000000000 --- a/internal/backend/remote-state/gcs/client.go +++ /dev/null @@ -1,190 +0,0 @@ -package gcs - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "strconv" - - "cloud.google.com/go/storage" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - "golang.org/x/net/context" -) - -// remoteClient is used by "state/remote".State to read and write -// blobs representing state. -// Implements "state/remote".ClientLocker -type remoteClient struct { - storageContext context.Context - storageClient *storage.Client - bucketName string - stateFilePath string - lockFilePath string - encryptionKey []byte - kmsKeyName string -} - -func (c *remoteClient) Get() (payload *remote.Payload, err error) { - stateFileReader, err := c.stateFile().NewReader(c.storageContext) - if err != nil { - if err == storage.ErrObjectNotExist { - return nil, nil - } else { - return nil, fmt.Errorf("Failed to open state file at %v: %v", c.stateFileURL(), err) - } - } - defer stateFileReader.Close() - - stateFileContents, err := ioutil.ReadAll(stateFileReader) - if err != nil { - return nil, fmt.Errorf("Failed to read state file from %v: %v", c.stateFileURL(), err) - } - - stateFileAttrs, err := c.stateFile().Attrs(c.storageContext) - if err != nil { - return nil, fmt.Errorf("Failed to read state file attrs from %v: %v", c.stateFileURL(), err) - } - - result := &remote.Payload{ - Data: stateFileContents, - MD5: stateFileAttrs.MD5, - } - - return result, nil -} - -func (c *remoteClient) Put(data []byte) error { - err := func() error { - stateFileWriter := c.stateFile().NewWriter(c.storageContext) - if len(c.kmsKeyName) > 0 { - stateFileWriter.KMSKeyName = c.kmsKeyName - } - if _, err := stateFileWriter.Write(data); err != nil { - return err - } - return stateFileWriter.Close() - }() - if err != nil { - return fmt.Errorf("Failed to upload state to %v: %v", c.stateFileURL(), err) - } - - return nil -} - -func (c *remoteClient) Delete() error { - if err := c.stateFile().Delete(c.storageContext); err != nil { - return fmt.Errorf("Failed to delete state file %v: %v", c.stateFileURL(), err) - } - - return nil -} - -// Lock writes to a lock file, ensuring file creation. Returns the generation -// number, which must be passed to Unlock(). -func (c *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { - // update the path we're using - // we can't set the ID until the info is written - info.Path = c.lockFileURL() - - infoJson, err := json.Marshal(info) - if err != nil { - return "", err - } - - lockFile := c.lockFile() - w := lockFile.If(storage.Conditions{DoesNotExist: true}).NewWriter(c.storageContext) - err = func() error { - if _, err := w.Write(infoJson); err != nil { - return err - } - return w.Close() - }() - - if err != nil { - return "", c.lockError(fmt.Errorf("writing %q failed: %v", c.lockFileURL(), err)) - } - - info.ID = strconv.FormatInt(w.Attrs().Generation, 10) - - return info.ID, nil -} - -func (c *remoteClient) Unlock(id string) error { - gen, err := strconv.ParseInt(id, 10, 64) - if err != nil { - return fmt.Errorf("Lock ID should be numerical value, got '%s'", id) - } - - if err := c.lockFile().If(storage.Conditions{GenerationMatch: gen}).Delete(c.storageContext); err != nil { - return c.lockError(err) - } - - return nil -} - -func (c *remoteClient) lockError(err error) *statemgr.LockError { - lockErr := &statemgr.LockError{ - Err: err, - } - - info, infoErr := c.lockInfo() - if infoErr != nil { - lockErr.Err = multierror.Append(lockErr.Err, infoErr) - } else { - lockErr.Info = info - } - return lockErr -} - -// lockInfo reads the lock file, parses its contents and returns the parsed -// LockInfo struct. -func (c *remoteClient) lockInfo() (*statemgr.LockInfo, error) { - r, err := c.lockFile().NewReader(c.storageContext) - if err != nil { - return nil, err - } - defer r.Close() - - rawData, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - info := &statemgr.LockInfo{} - if err := json.Unmarshal(rawData, info); err != nil { - return nil, err - } - - // We use the Generation as the ID, so overwrite the ID in the json. - // This can't be written into the Info, since the generation isn't known - // until it's written. - attrs, err := c.lockFile().Attrs(c.storageContext) - if err != nil { - return nil, err - } - info.ID = strconv.FormatInt(attrs.Generation, 10) - - return info, nil -} - -func (c *remoteClient) stateFile() *storage.ObjectHandle { - h := c.storageClient.Bucket(c.bucketName).Object(c.stateFilePath) - if len(c.encryptionKey) > 0 { - return h.Key(c.encryptionKey) - } - return h -} - -func (c *remoteClient) stateFileURL() string { - return fmt.Sprintf("gs://%v/%v", c.bucketName, c.stateFilePath) -} - -func (c *remoteClient) lockFile() *storage.ObjectHandle { - return c.storageClient.Bucket(c.bucketName).Object(c.lockFilePath) -} - -func (c *remoteClient) lockFileURL() string { - return fmt.Sprintf("gs://%v/%v", c.bucketName, c.lockFilePath) -} diff --git a/internal/backend/remote-state/http/backend.go b/internal/backend/remote-state/http/backend.go deleted file mode 100644 index 2048294cc36a..000000000000 --- a/internal/backend/remote-state/http/backend.go +++ /dev/null @@ -1,257 +0,0 @@ -package http - -import ( - "context" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "log" - "net/http" - "net/url" - "time" - - "github.com/hashicorp/go-retryablehttp" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_ADDRESS", nil), - Description: "The address of the REST endpoint", - }, - "update_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UPDATE_METHOD", "POST"), - Description: "HTTP method to use when updating state", - }, - "lock_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_LOCK_ADDRESS", nil), - Description: "The address of the lock REST endpoint", - }, - "unlock_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UNLOCK_ADDRESS", nil), - Description: "The address of the unlock REST endpoint", - }, - "lock_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_LOCK_METHOD", "LOCK"), - Description: "The HTTP method to use when locking", - }, - "unlock_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UNLOCK_METHOD", "UNLOCK"), - Description: "The HTTP method to use when unlocking", - }, - "username": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_USERNAME", nil), - Description: "The username for HTTP basic authentication", - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_PASSWORD", nil), - Description: "The password for HTTP basic authentication", - }, - "skip_cert_verification": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether to skip TLS verification.", - }, - "retry_max": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_MAX", 2), - Description: "The number of HTTP request retries.", - }, - "retry_wait_min": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_WAIT_MIN", 1), - Description: "The minimum time in seconds to wait between HTTP request attempts.", - }, - "retry_wait_max": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_WAIT_MAX", 30), - Description: "The maximum time in seconds to wait between HTTP request attempts.", - }, - "client_ca_certificate_pem": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_CA_CERTIFICATE_PEM", ""), - Description: "A PEM-encoded CA certificate chain used by the client to verify server certificates during TLS authentication.", - }, - "client_certificate_pem": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_CERTIFICATE_PEM", ""), - Description: "A PEM-encoded certificate used by the server to verify the client during mutual TLS (mTLS) authentication.", - }, - "client_private_key_pem": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_PRIVATE_KEY_PEM", ""), - Description: "A PEM-encoded private key, required if client_certificate_pem is specified.", - }, - }, - } - - b := &Backend{Backend: s} - b.Backend.ConfigureFunc = b.configure - return b -} - -type Backend struct { - *schema.Backend - - client *httpClient -} - -// configureTLS configures TLS when needed; if there are no conditions requiring TLS, no change is made. -func (b *Backend) configureTLS(client *retryablehttp.Client, data *schema.ResourceData) error { - // If there are no conditions needing to configure TLS, leave the client untouched - skipCertVerification := data.Get("skip_cert_verification").(bool) - clientCACertificatePem := data.Get("client_ca_certificate_pem").(string) - clientCertificatePem := data.Get("client_certificate_pem").(string) - clientPrivateKeyPem := data.Get("client_private_key_pem").(string) - if !skipCertVerification && clientCACertificatePem == "" && clientCertificatePem == "" && clientPrivateKeyPem == "" { - return nil - } - if clientCertificatePem != "" && clientPrivateKeyPem == "" { - return fmt.Errorf("client_certificate_pem is set but client_private_key_pem is not") - } - if clientPrivateKeyPem != "" && clientCertificatePem == "" { - return fmt.Errorf("client_private_key_pem is set but client_certificate_pem is not") - } - - // TLS configuration is needed; create an object and configure it - var tlsConfig tls.Config - client.HTTPClient.Transport.(*http.Transport).TLSClientConfig = &tlsConfig - - if skipCertVerification { - // ignores TLS verification - tlsConfig.InsecureSkipVerify = true - } - if clientCACertificatePem != "" { - // trust servers based on a CA - tlsConfig.RootCAs = x509.NewCertPool() - if !tlsConfig.RootCAs.AppendCertsFromPEM([]byte(clientCACertificatePem)) { - return errors.New("failed to append certs") - } - } - if clientCertificatePem != "" && clientPrivateKeyPem != "" { - // attach a client certificate to the TLS handshake (aka mTLS) - certificate, err := tls.X509KeyPair([]byte(clientCertificatePem), []byte(clientPrivateKeyPem)) - if err != nil { - return fmt.Errorf("cannot load client certificate: %w", err) - } - tlsConfig.Certificates = []tls.Certificate{certificate} - } - - return nil -} - -func (b *Backend) configure(ctx context.Context) error { - data := schema.FromContextBackendConfig(ctx) - - address := data.Get("address").(string) - updateURL, err := url.Parse(address) - if err != nil { - return fmt.Errorf("failed to parse address URL: %s", err) - } - if updateURL.Scheme != "http" && updateURL.Scheme != "https" { - return fmt.Errorf("address must be HTTP or HTTPS") - } - - updateMethod := data.Get("update_method").(string) - - var lockURL *url.URL - if v, ok := data.GetOk("lock_address"); ok && v.(string) != "" { - var err error - lockURL, err = url.Parse(v.(string)) - if err != nil { - return fmt.Errorf("failed to parse lockAddress URL: %s", err) - } - if lockURL.Scheme != "http" && lockURL.Scheme != "https" { - return fmt.Errorf("lockAddress must be HTTP or HTTPS") - } - } - - lockMethod := data.Get("lock_method").(string) - - var unlockURL *url.URL - if v, ok := data.GetOk("unlock_address"); ok && v.(string) != "" { - var err error - unlockURL, err = url.Parse(v.(string)) - if err != nil { - return fmt.Errorf("failed to parse unlockAddress URL: %s", err) - } - if unlockURL.Scheme != "http" && unlockURL.Scheme != "https" { - return fmt.Errorf("unlockAddress must be HTTP or HTTPS") - } - } - - unlockMethod := data.Get("unlock_method").(string) - - rClient := retryablehttp.NewClient() - rClient.RetryMax = data.Get("retry_max").(int) - rClient.RetryWaitMin = time.Duration(data.Get("retry_wait_min").(int)) * time.Second - rClient.RetryWaitMax = time.Duration(data.Get("retry_wait_max").(int)) * time.Second - rClient.Logger = log.New(logging.LogOutput(), "", log.Flags()) - if err = b.configureTLS(rClient, data); err != nil { - return err - } - - b.client = &httpClient{ - URL: updateURL, - UpdateMethod: updateMethod, - - LockURL: lockURL, - LockMethod: lockMethod, - UnlockURL: unlockURL, - UnlockMethod: unlockMethod, - - Username: data.Get("username").(string), - Password: data.Get("password").(string), - - // accessible only for testing use - Client: rClient, - } - return nil -} - -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - if name != backend.DefaultStateName { - return nil, backend.ErrWorkspacesNotSupported - } - - return &remote.State{Client: b.client}, nil -} - -func (b *Backend) Workspaces() ([]string, error) { - return nil, backend.ErrWorkspacesNotSupported -} - -func (b *Backend) DeleteWorkspace(string, bool) error { - return backend.ErrWorkspacesNotSupported -} diff --git a/internal/backend/remote-state/http/backend_test.go b/internal/backend/remote-state/http/backend_test.go deleted file mode 100644 index 9f32273cbac7..000000000000 --- a/internal/backend/remote-state/http/backend_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package http - -import ( - "os" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/backend" -) - -func TestBackend_impl(t *testing.T) { - var _ backend.Backend = new(Backend) -} - -func TestHTTPClientFactory(t *testing.T) { - // defaults - - conf := map[string]cty.Value{ - "address": cty.StringVal("http://127.0.0.1:8888/foo"), - } - b := backend.TestBackendConfig(t, New(), configs.SynthBody("synth", conf)).(*Backend) - client := b.client - - if client == nil { - t.Fatal("Unexpected failure, address") - } - if client.URL.String() != "http://127.0.0.1:8888/foo" { - t.Fatalf("Expected address \"%s\", got \"%s\"", conf["address"], client.URL.String()) - } - if client.UpdateMethod != "POST" { - t.Fatalf("Expected update_method \"%s\", got \"%s\"", "POST", client.UpdateMethod) - } - if client.LockURL != nil || client.LockMethod != "LOCK" { - t.Fatal("Unexpected lock_address or lock_method") - } - if client.UnlockURL != nil || client.UnlockMethod != "UNLOCK" { - t.Fatal("Unexpected unlock_address or unlock_method") - } - if client.Username != "" || client.Password != "" { - t.Fatal("Unexpected username or password") - } - - // custom - conf = map[string]cty.Value{ - "address": cty.StringVal("http://127.0.0.1:8888/foo"), - "update_method": cty.StringVal("BLAH"), - "lock_address": cty.StringVal("http://127.0.0.1:8888/bar"), - "lock_method": cty.StringVal("BLIP"), - "unlock_address": cty.StringVal("http://127.0.0.1:8888/baz"), - "unlock_method": cty.StringVal("BLOOP"), - "username": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "retry_max": cty.StringVal("999"), - "retry_wait_min": cty.StringVal("15"), - "retry_wait_max": cty.StringVal("150"), - } - - b = backend.TestBackendConfig(t, New(), configs.SynthBody("synth", conf)).(*Backend) - client = b.client - - if client == nil { - t.Fatal("Unexpected failure, update_method") - } - if client.UpdateMethod != "BLAH" { - t.Fatalf("Expected update_method \"%s\", got \"%s\"", "BLAH", client.UpdateMethod) - } - if client.LockURL.String() != conf["lock_address"].AsString() || client.LockMethod != "BLIP" { - t.Fatalf("Unexpected lock_address \"%s\" vs \"%s\" or lock_method \"%s\" vs \"%s\"", client.LockURL.String(), - conf["lock_address"].AsString(), client.LockMethod, conf["lock_method"]) - } - if client.UnlockURL.String() != conf["unlock_address"].AsString() || client.UnlockMethod != "BLOOP" { - t.Fatalf("Unexpected unlock_address \"%s\" vs \"%s\" or unlock_method \"%s\" vs \"%s\"", client.UnlockURL.String(), - conf["unlock_address"].AsString(), client.UnlockMethod, conf["unlock_method"]) - } - if client.Username != "user" || client.Password != "pass" { - t.Fatalf("Unexpected username \"%s\" vs \"%s\" or password \"%s\" vs \"%s\"", client.Username, conf["username"], - client.Password, conf["password"]) - } - if client.Client.RetryMax != 999 { - t.Fatalf("Expected retry_max \"%d\", got \"%d\"", 999, client.Client.RetryMax) - } - if client.Client.RetryWaitMin != 15*time.Second { - t.Fatalf("Expected retry_wait_min \"%s\", got \"%s\"", 15*time.Second, client.Client.RetryWaitMin) - } - if client.Client.RetryWaitMax != 150*time.Second { - t.Fatalf("Expected retry_wait_max \"%s\", got \"%s\"", 150*time.Second, client.Client.RetryWaitMax) - } -} - -func TestHTTPClientFactoryWithEnv(t *testing.T) { - // env - conf := map[string]string{ - "address": "http://127.0.0.1:8888/foo", - "update_method": "BLAH", - "lock_address": "http://127.0.0.1:8888/bar", - "lock_method": "BLIP", - "unlock_address": "http://127.0.0.1:8888/baz", - "unlock_method": "BLOOP", - "username": "user", - "password": "pass", - "retry_max": "999", - "retry_wait_min": "15", - "retry_wait_max": "150", - } - - defer testWithEnv(t, "TF_HTTP_ADDRESS", conf["address"])() - defer testWithEnv(t, "TF_HTTP_UPDATE_METHOD", conf["update_method"])() - defer testWithEnv(t, "TF_HTTP_LOCK_ADDRESS", conf["lock_address"])() - defer testWithEnv(t, "TF_HTTP_UNLOCK_ADDRESS", conf["unlock_address"])() - defer testWithEnv(t, "TF_HTTP_LOCK_METHOD", conf["lock_method"])() - defer testWithEnv(t, "TF_HTTP_UNLOCK_METHOD", conf["unlock_method"])() - defer testWithEnv(t, "TF_HTTP_USERNAME", conf["username"])() - defer testWithEnv(t, "TF_HTTP_PASSWORD", conf["password"])() - defer testWithEnv(t, "TF_HTTP_RETRY_MAX", conf["retry_max"])() - defer testWithEnv(t, "TF_HTTP_RETRY_WAIT_MIN", conf["retry_wait_min"])() - defer testWithEnv(t, "TF_HTTP_RETRY_WAIT_MAX", conf["retry_wait_max"])() - - b := backend.TestBackendConfig(t, New(), nil).(*Backend) - client := b.client - - if client == nil { - t.Fatal("Unexpected failure, EnvDefaultFunc") - } - if client.UpdateMethod != "BLAH" { - t.Fatalf("Expected update_method \"%s\", got \"%s\"", "BLAH", client.UpdateMethod) - } - if client.LockURL.String() != conf["lock_address"] || client.LockMethod != "BLIP" { - t.Fatalf("Unexpected lock_address \"%s\" vs \"%s\" or lock_method \"%s\" vs \"%s\"", client.LockURL.String(), - conf["lock_address"], client.LockMethod, conf["lock_method"]) - } - if client.UnlockURL.String() != conf["unlock_address"] || client.UnlockMethod != "BLOOP" { - t.Fatalf("Unexpected unlock_address \"%s\" vs \"%s\" or unlock_method \"%s\" vs \"%s\"", client.UnlockURL.String(), - conf["unlock_address"], client.UnlockMethod, conf["unlock_method"]) - } - if client.Username != "user" || client.Password != "pass" { - t.Fatalf("Unexpected username \"%s\" vs \"%s\" or password \"%s\" vs \"%s\"", client.Username, conf["username"], - client.Password, conf["password"]) - } - if client.Client.RetryMax != 999 { - t.Fatalf("Expected retry_max \"%d\", got \"%d\"", 999, client.Client.RetryMax) - } - if client.Client.RetryWaitMin != 15*time.Second { - t.Fatalf("Expected retry_wait_min \"%s\", got \"%s\"", 15*time.Second, client.Client.RetryWaitMin) - } - if client.Client.RetryWaitMax != 150*time.Second { - t.Fatalf("Expected retry_wait_max \"%s\", got \"%s\"", 150*time.Second, client.Client.RetryWaitMax) - } -} - -// testWithEnv sets an environment variable and returns a deferable func to clean up -func testWithEnv(t *testing.T, key string, value string) func() { - if err := os.Setenv(key, value); err != nil { - t.Fatalf("err: %v", err) - } - - return func() { - if err := os.Unsetenv(key); err != nil { - t.Fatalf("err: %v", err) - } - } -} diff --git a/internal/backend/remote-state/http/client.go b/internal/backend/remote-state/http/client.go deleted file mode 100644 index 71668c0a2d07..000000000000 --- a/internal/backend/remote-state/http/client.go +++ /dev/null @@ -1,256 +0,0 @@ -package http - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - - "github.com/hashicorp/go-retryablehttp" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -// httpClient is a remote client that stores data in Consul or HTTP REST. -type httpClient struct { - // Update & Retrieve - URL *url.URL - UpdateMethod string - - // Locking - LockURL *url.URL - LockMethod string - UnlockURL *url.URL - UnlockMethod string - - // HTTP - Client *retryablehttp.Client - Username string - Password string - - lockID string - jsonLockInfo []byte -} - -func (c *httpClient) httpRequest(method string, url *url.URL, data *[]byte, what string) (*http.Response, error) { - // If we have data we need a reader - var reader io.Reader = nil - if data != nil { - reader = bytes.NewReader(*data) - } - - // Create the request - req, err := retryablehttp.NewRequest(method, url.String(), reader) - if err != nil { - return nil, fmt.Errorf("Failed to make %s HTTP request: %s", what, err) - } - // Set up basic auth - if c.Username != "" { - req.SetBasicAuth(c.Username, c.Password) - } - - // Work with data/body - if data != nil { - req.Header.Set("Content-Type", "application/json") - req.ContentLength = int64(len(*data)) - - // Generate the MD5 - hash := md5.Sum(*data) - b64 := base64.StdEncoding.EncodeToString(hash[:]) - req.Header.Set("Content-MD5", b64) - } - - // Make the request - resp, err := c.Client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to %s: %v", what, err) - } - - return resp, nil -} - -func (c *httpClient) Lock(info *statemgr.LockInfo) (string, error) { - if c.LockURL == nil { - return "", nil - } - c.lockID = "" - - jsonLockInfo := info.Marshal() - resp, err := c.httpRequest(c.LockMethod, c.LockURL, &jsonLockInfo, "lock") - if err != nil { - return "", err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - c.lockID = info.ID - c.jsonLockInfo = jsonLockInfo - return info.ID, nil - case http.StatusUnauthorized: - return "", fmt.Errorf("HTTP remote state endpoint requires auth") - case http.StatusForbidden: - return "", fmt.Errorf("HTTP remote state endpoint invalid auth") - case http.StatusConflict, http.StatusLocked: - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", &statemgr.LockError{ - Info: info, - Err: fmt.Errorf("HTTP remote state already locked, failed to read body"), - } - } - existing := statemgr.LockInfo{} - err = json.Unmarshal(body, &existing) - if err != nil { - return "", &statemgr.LockError{ - Info: info, - Err: fmt.Errorf("HTTP remote state already locked, failed to unmarshal body"), - } - } - return "", &statemgr.LockError{ - Info: info, - Err: fmt.Errorf("HTTP remote state already locked: ID=%s", existing.ID), - } - default: - return "", fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) - } -} - -func (c *httpClient) Unlock(id string) error { - if c.UnlockURL == nil { - return nil - } - - resp, err := c.httpRequest(c.UnlockMethod, c.UnlockURL, &c.jsonLockInfo, "unlock") - if err != nil { - return err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - return nil - default: - return fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) - } -} - -func (c *httpClient) Get() (*remote.Payload, error) { - resp, err := c.httpRequest("GET", c.URL, nil, "get state") - if err != nil { - return nil, err - } - defer resp.Body.Close() - - // Handle the common status codes - switch resp.StatusCode { - case http.StatusOK: - // Handled after - case http.StatusNoContent: - return nil, nil - case http.StatusNotFound: - return nil, nil - case http.StatusUnauthorized: - return nil, fmt.Errorf("HTTP remote state endpoint requires auth") - case http.StatusForbidden: - return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") - case http.StatusInternalServerError: - return nil, fmt.Errorf("HTTP remote state internal server error") - default: - return nil, fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) - } - - // Read in the body - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, resp.Body); err != nil { - return nil, fmt.Errorf("Failed to read remote state: %s", err) - } - - // Create the payload - payload := &remote.Payload{ - Data: buf.Bytes(), - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - // Check for the MD5 - if raw := resp.Header.Get("Content-MD5"); raw != "" { - md5, err := base64.StdEncoding.DecodeString(raw) - if err != nil { - return nil, fmt.Errorf( - "Failed to decode Content-MD5 '%s': %s", raw, err) - } - - payload.MD5 = md5 - } else { - // Generate the MD5 - hash := md5.Sum(payload.Data) - payload.MD5 = hash[:] - } - - return payload, nil -} - -func (c *httpClient) Put(data []byte) error { - // Copy the target URL - base := *c.URL - - if c.lockID != "" { - query := base.Query() - query.Set("ID", c.lockID) - base.RawQuery = query.Encode() - } - - /* - // Set the force query parameter if needed - if force { - values := base.Query() - values.Set("force", "true") - base.RawQuery = values.Encode() - } - */ - - var method string = "POST" - if c.UpdateMethod != "" { - method = c.UpdateMethod - } - resp, err := c.httpRequest(method, &base, &data, "upload state") - if err != nil { - return err - } - defer resp.Body.Close() - - // Handle the error codes - switch resp.StatusCode { - case http.StatusOK, http.StatusCreated, http.StatusNoContent: - return nil - default: - return fmt.Errorf("HTTP error: %d", resp.StatusCode) - } -} - -func (c *httpClient) Delete() error { - // Make the request - resp, err := c.httpRequest("DELETE", c.URL, nil, "delete state") - if err != nil { - return err - } - defer resp.Body.Close() - - // Handle the error codes - switch resp.StatusCode { - case http.StatusOK: - return nil - default: - return fmt.Errorf("HTTP error: %d", resp.StatusCode) - } -} diff --git a/internal/backend/remote-state/http/client_test.go b/internal/backend/remote-state/http/client_test.go deleted file mode 100644 index c8bd121c73dd..000000000000 --- a/internal/backend/remote-state/http/client_test.go +++ /dev/null @@ -1,175 +0,0 @@ -package http - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - - "github.com/hashicorp/go-retryablehttp" - "github.com/hashicorp/terraform/internal/states/remote" -) - -func TestHTTPClient_impl(t *testing.T) { - var _ remote.Client = new(httpClient) - var _ remote.ClientLocker = new(httpClient) -} - -func TestHTTPClient(t *testing.T) { - handler := new(testHTTPHandler) - ts := httptest.NewServer(http.HandlerFunc(handler.Handle)) - defer ts.Close() - - url, err := url.Parse(ts.URL) - if err != nil { - t.Fatalf("Parse: %s", err) - } - - // Test basic get/update - client := &httpClient{URL: url, Client: retryablehttp.NewClient()} - remote.TestClient(t, client) - - // test just a single PUT - p := &httpClient{ - URL: url, - UpdateMethod: "PUT", - Client: retryablehttp.NewClient(), - } - remote.TestClient(t, p) - - // Test locking and alternative UpdateMethod - a := &httpClient{ - URL: url, - UpdateMethod: "PUT", - LockURL: url, - LockMethod: "LOCK", - UnlockURL: url, - UnlockMethod: "UNLOCK", - Client: retryablehttp.NewClient(), - } - b := &httpClient{ - URL: url, - UpdateMethod: "PUT", - LockURL: url, - LockMethod: "LOCK", - UnlockURL: url, - UnlockMethod: "UNLOCK", - Client: retryablehttp.NewClient(), - } - remote.TestRemoteLocks(t, a, b) - - // test a WebDAV-ish backend - davhandler := new(testHTTPHandler) - ts = httptest.NewServer(http.HandlerFunc(davhandler.HandleWebDAV)) - defer ts.Close() - - url, err = url.Parse(ts.URL) - client = &httpClient{ - URL: url, - UpdateMethod: "PUT", - Client: retryablehttp.NewClient(), - } - if err != nil { - t.Fatalf("Parse: %s", err) - } - - remote.TestClient(t, client) // first time through: 201 - remote.TestClient(t, client) // second time, with identical data: 204 - - // test a broken backend - brokenHandler := new(testBrokenHTTPHandler) - brokenHandler.handler = new(testHTTPHandler) - ts = httptest.NewServer(http.HandlerFunc(brokenHandler.Handle)) - defer ts.Close() - - url, err = url.Parse(ts.URL) - if err != nil { - t.Fatalf("Parse: %s", err) - } - client = &httpClient{URL: url, Client: retryablehttp.NewClient()} - remote.TestClient(t, client) -} - -type testHTTPHandler struct { - Data []byte - Locked bool -} - -func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case "GET": - w.Write(h.Data) - case "PUT": - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, r.Body); err != nil { - w.WriteHeader(500) - } - w.WriteHeader(201) - h.Data = buf.Bytes() - case "POST": - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, r.Body); err != nil { - w.WriteHeader(500) - } - h.Data = buf.Bytes() - case "LOCK": - if h.Locked { - w.WriteHeader(423) - } else { - h.Locked = true - } - case "UNLOCK": - h.Locked = false - case "DELETE": - h.Data = nil - w.WriteHeader(200) - default: - w.WriteHeader(500) - w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method))) - } -} - -// mod_dav-ish behavior -func (h *testHTTPHandler) HandleWebDAV(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case "GET": - w.Write(h.Data) - case "PUT": - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, r.Body); err != nil { - w.WriteHeader(500) - } - if reflect.DeepEqual(h.Data, buf.Bytes()) { - h.Data = buf.Bytes() - w.WriteHeader(204) - } else { - h.Data = buf.Bytes() - w.WriteHeader(201) - } - case "DELETE": - h.Data = nil - w.WriteHeader(200) - default: - w.WriteHeader(500) - w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method))) - } -} - -type testBrokenHTTPHandler struct { - lastRequestWasBroken bool - handler *testHTTPHandler -} - -func (h *testBrokenHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) { - if h.lastRequestWasBroken { - h.lastRequestWasBroken = false - h.handler.Handle(w, r) - } else { - h.lastRequestWasBroken = true - w.WriteHeader(500) - } -} diff --git a/internal/backend/remote-state/inmem/backend.go b/internal/backend/remote-state/inmem/backend.go deleted file mode 100644 index 215dcc41be5e..000000000000 --- a/internal/backend/remote-state/inmem/backend.go +++ /dev/null @@ -1,208 +0,0 @@ -package inmem - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - "time" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" - statespkg "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -// we keep the states and locks in package-level variables, so that they can be -// accessed from multiple instances of the backend. This better emulates -// backend instances accessing a single remote data store. -var ( - states stateMap - locks lockMap -) - -func init() { - Reset() -} - -// Reset clears out all existing state and lock data. -// This is used to initialize the package during init, as well as between -// tests. -func Reset() { - states = stateMap{ - m: map[string]*remote.State{}, - } - - locks = lockMap{ - m: map[string]*statemgr.LockInfo{}, - } -} - -// New creates a new backend for Inmem remote state. -func New() backend.Backend { - // Set the schema - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "lock_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "initializes the state in a locked configuration", - }, - }, - } - backend := &Backend{Backend: s} - backend.Backend.ConfigureFunc = backend.configure - return backend -} - -type Backend struct { - *schema.Backend -} - -func (b *Backend) configure(ctx context.Context) error { - states.Lock() - defer states.Unlock() - - defaultClient := &RemoteClient{ - Name: backend.DefaultStateName, - } - - states.m[backend.DefaultStateName] = &remote.State{ - Client: defaultClient, - } - - // set the default client lock info per the test config - data := schema.FromContextBackendConfig(ctx) - if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" { - info := statemgr.NewLockInfo() - info.ID = v.(string) - info.Operation = "test" - info.Info = "test config" - - locks.lock(backend.DefaultStateName, info) - } - - return nil -} - -func (b *Backend) Workspaces() ([]string, error) { - states.Lock() - defer states.Unlock() - - var workspaces []string - - for s := range states.m { - workspaces = append(workspaces, s) - } - - sort.Strings(workspaces) - return workspaces, nil -} - -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - states.Lock() - defer states.Unlock() - - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - delete(states.m, name) - return nil -} - -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - states.Lock() - defer states.Unlock() - - s := states.m[name] - if s == nil { - s = &remote.State{ - Client: &RemoteClient{ - Name: name, - }, - } - states.m[name] = s - - // to most closely replicate other implementations, we are going to - // take a lock and create a new state if it doesn't exist. - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockID, err := s.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock inmem state: %s", err) - } - defer s.Unlock(lockID) - - // If we have no state, we have to create an empty state - if v := s.State(); v == nil { - if err := s.WriteState(statespkg.NewState()); err != nil { - return nil, err - } - if err := s.PersistState(nil); err != nil { - return nil, err - } - } - } - - return s, nil -} - -type stateMap struct { - sync.Mutex - m map[string]*remote.State -} - -// Global level locks for inmem backends. -type lockMap struct { - sync.Mutex - m map[string]*statemgr.LockInfo -} - -func (l *lockMap) lock(name string, info *statemgr.LockInfo) (string, error) { - l.Lock() - defer l.Unlock() - - lockInfo := l.m[name] - if lockInfo != nil { - lockErr := &statemgr.LockError{ - Info: lockInfo, - } - - lockErr.Err = errors.New("state locked") - // make a copy of the lock info to avoid any testing shenanigans - *lockErr.Info = *lockInfo - return "", lockErr - } - - info.Created = time.Now().UTC() - l.m[name] = info - - return info.ID, nil -} - -func (l *lockMap) unlock(name, id string) error { - l.Lock() - defer l.Unlock() - - lockInfo := l.m[name] - - if lockInfo == nil { - return errors.New("state not locked") - } - - lockErr := &statemgr.LockError{ - Info: &statemgr.LockInfo{}, - } - - if id != lockInfo.ID { - lockErr.Err = errors.New("invalid lock id") - *lockErr.Info = *lockInfo - return lockErr - } - - delete(l.m, name) - return nil -} diff --git a/internal/backend/remote-state/inmem/backend_test.go b/internal/backend/remote-state/inmem/backend_test.go deleted file mode 100644 index b7e9a555a906..000000000000 --- a/internal/backend/remote-state/inmem/backend_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package inmem - -import ( - "flag" - "os" - "testing" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/internal/backend" - statespkg "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - - _ "github.com/hashicorp/terraform/internal/logging" -) - -func TestMain(m *testing.M) { - flag.Parse() - os.Exit(m.Run()) -} - -func TestBackend_impl(t *testing.T) { - var _ backend.Backend = new(Backend) -} - -func TestBackendConfig(t *testing.T) { - defer Reset() - testID := "test_lock_id" - - config := map[string]interface{}{ - "lock_id": testID, - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - c := s.(*remote.State).Client.(*RemoteClient) - if c.Name != backend.DefaultStateName { - t.Fatal("client name is not configured") - } - - if err := locks.unlock(backend.DefaultStateName, testID); err != nil { - t.Fatalf("default state should have been locked: %s", err) - } -} - -func TestBackend(t *testing.T) { - defer Reset() - b := backend.TestBackendConfig(t, New(), hcl.EmptyBody()).(*Backend) - backend.TestBackendStates(t, b) -} - -func TestBackendLocked(t *testing.T) { - defer Reset() - b1 := backend.TestBackendConfig(t, New(), hcl.EmptyBody()).(*Backend) - b2 := backend.TestBackendConfig(t, New(), hcl.EmptyBody()).(*Backend) - - backend.TestBackendStateLocks(t, b1, b2) -} - -// use the this backen to test the remote.State implementation -func TestRemoteState(t *testing.T) { - defer Reset() - b := backend.TestBackendConfig(t, New(), hcl.EmptyBody()) - - workspace := "workspace" - - // create a new workspace in this backend - s, err := b.StateMgr(workspace) - if err != nil { - t.Fatal(err) - } - - // force overwriting the remote state - newState := statespkg.NewState() - - if err := s.WriteState(newState); err != nil { - t.Fatal(err) - } - - if err := s.PersistState(nil); err != nil { - t.Fatal(err) - } - - if err := s.RefreshState(); err != nil { - t.Fatal(err) - } -} diff --git a/internal/backend/remote-state/inmem/client.go b/internal/backend/remote-state/inmem/client.go deleted file mode 100644 index 5f404567fd37..000000000000 --- a/internal/backend/remote-state/inmem/client.go +++ /dev/null @@ -1,47 +0,0 @@ -package inmem - -import ( - "crypto/md5" - - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -// RemoteClient is a remote client that stores data in memory for testing. -type RemoteClient struct { - Data []byte - MD5 []byte - Name string -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - if c.Data == nil { - return nil, nil - } - - return &remote.Payload{ - Data: c.Data, - MD5: c.MD5, - }, nil -} - -func (c *RemoteClient) Put(data []byte) error { - md5 := md5.Sum(data) - - c.Data = data - c.MD5 = md5[:] - return nil -} - -func (c *RemoteClient) Delete() error { - c.Data = nil - c.MD5 = nil - return nil -} - -func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { - return locks.lock(c.Name, info) -} -func (c *RemoteClient) Unlock(id string) error { - return locks.unlock(c.Name, id) -} diff --git a/internal/backend/remote-state/inmem/client_test.go b/internal/backend/remote-state/inmem/client_test.go deleted file mode 100644 index a9fb56b6e1cc..000000000000 --- a/internal/backend/remote-state/inmem/client_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package inmem - -import ( - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/remote" -) - -func TestRemoteClient_impl(t *testing.T) { - var _ remote.Client = new(RemoteClient) - var _ remote.ClientLocker = new(RemoteClient) -} - -func TestRemoteClient(t *testing.T) { - defer Reset() - b := backend.TestBackendConfig(t, New(), hcl.EmptyBody()) - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, s.(*remote.State).Client) -} - -func TestInmemLocks(t *testing.T) { - defer Reset() - s, err := backend.TestBackendConfig(t, New(), hcl.EmptyBody()).StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestRemoteLocks(t, s.(*remote.State).Client, s.(*remote.State).Client) -} diff --git a/internal/backend/remote-state/kubernetes/backend.go b/internal/backend/remote-state/kubernetes/backend.go deleted file mode 100644 index 907cda9e246e..000000000000 --- a/internal/backend/remote-state/kubernetes/backend.go +++ /dev/null @@ -1,405 +0,0 @@ -package kubernetes - -import ( - "bytes" - "context" - "fmt" - "log" - "os" - "path/filepath" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" - "github.com/hashicorp/terraform/version" - "github.com/mitchellh/go-homedir" - k8sSchema "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -// Modified from github.com/terraform-providers/terraform-provider-kubernetes - -const ( - noConfigError = ` - -[Kubernetes backend] Neither service_account nor load_config_file were set to true, -this could cause issues connecting to your Kubernetes cluster. -` -) - -var ( - secretResource = k8sSchema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "secrets", - } -) - -// New creates a new backend for kubernetes remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "secret_suffix": { - Type: schema.TypeString, - Required: true, - Description: "Suffix used when creating the secret. The secret will be named in the format: `tfstate-{workspace}-{secret_suffix}`.", - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: "Map of additional labels to be applied to the secret.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_NAMESPACE", "default"), - Description: "Namespace to store the secret in.", - }, - "in_cluster_config": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_IN_CLUSTER_CONFIG", false), - Description: "Used to authenticate to the cluster from inside a pod.", - }, - "load_config_file": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_LOAD_CONFIG_FILE", true), - Description: "Load local kubeconfig.", - }, - "host": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_HOST", ""), - Description: "The hostname (in form of URI) of Kubernetes master.", - }, - "username": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_USER", ""), - Description: "The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", - }, - "password": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_PASSWORD", ""), - Description: "The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", - }, - "insecure": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_INSECURE", false), - Description: "Whether server should be accessed without verifying the TLS certificate.", - }, - "client_certificate": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CLIENT_CERT_DATA", ""), - Description: "PEM-encoded client certificate for TLS authentication.", - }, - "client_key": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CLIENT_KEY_DATA", ""), - Description: "PEM-encoded client certificate key for TLS authentication.", - }, - "cluster_ca_certificate": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CLUSTER_CA_CERT_DATA", ""), - Description: "PEM-encoded root certificates bundle for TLS authentication.", - }, - "config_paths": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Description: "A list of paths to kube config files. Can be set with KUBE_CONFIG_PATHS environment variable.", - }, - "config_path": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CONFIG_PATH", ""), - Description: "Path to the kube config file. Can be set with KUBE_CONFIG_PATH environment variable.", - }, - "config_context": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX", ""), - }, - "config_context_auth_info": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX_AUTH_INFO", ""), - Description: "", - }, - "config_context_cluster": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX_CLUSTER", ""), - Description: "", - }, - "token": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_TOKEN", ""), - Description: "Token to authentifcate a service account.", - }, - "exec": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "api_version": { - Type: schema.TypeString, - Required: true, - }, - "command": { - Type: schema.TypeString, - Required: true, - }, - "env": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "args": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - Description: "Use a credential plugin to authenticate.", - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - kubernetesSecretClient dynamic.ResourceInterface - kubernetesLeaseClient coordinationv1.LeaseInterface - config *restclient.Config - namespace string - labels map[string]string - nameSuffix string -} - -func (b Backend) KubernetesSecretClient() (dynamic.ResourceInterface, error) { - if b.kubernetesSecretClient != nil { - return b.kubernetesSecretClient, nil - } - - client, err := dynamic.NewForConfig(b.config) - if err != nil { - return nil, fmt.Errorf("Failed to configure: %s", err) - } - - b.kubernetesSecretClient = client.Resource(secretResource).Namespace(b.namespace) - return b.kubernetesSecretClient, nil -} - -func (b Backend) KubernetesLeaseClient() (coordinationv1.LeaseInterface, error) { - if b.kubernetesLeaseClient != nil { - return b.kubernetesLeaseClient, nil - } - - client, err := kubernetes.NewForConfig(b.config) - if err != nil { - return nil, err - } - - b.kubernetesLeaseClient = client.CoordinationV1().Leases(b.namespace) - return b.kubernetesLeaseClient, nil -} - -func (b *Backend) configure(ctx context.Context) error { - if b.config != nil { - return nil - } - - // Grab the resource data - data := schema.FromContextBackendConfig(ctx) - - cfg, err := getInitialConfig(data) - if err != nil { - return err - } - - // Overriding with static configuration - cfg.UserAgent = fmt.Sprintf("HashiCorp/1.0 Terraform/%s", version.String()) - - if v, ok := data.GetOk("host"); ok { - cfg.Host = v.(string) - } - if v, ok := data.GetOk("username"); ok { - cfg.Username = v.(string) - } - if v, ok := data.GetOk("password"); ok { - cfg.Password = v.(string) - } - if v, ok := data.GetOk("insecure"); ok { - cfg.Insecure = v.(bool) - } - if v, ok := data.GetOk("cluster_ca_certificate"); ok { - cfg.CAData = bytes.NewBufferString(v.(string)).Bytes() - } - if v, ok := data.GetOk("client_certificate"); ok { - cfg.CertData = bytes.NewBufferString(v.(string)).Bytes() - } - if v, ok := data.GetOk("client_key"); ok { - cfg.KeyData = bytes.NewBufferString(v.(string)).Bytes() - } - if v, ok := data.GetOk("token"); ok { - cfg.BearerToken = v.(string) - } - - if v, ok := data.GetOk("labels"); ok { - labels := map[string]string{} - for k, vv := range v.(map[string]interface{}) { - labels[k] = vv.(string) - } - b.labels = labels - } - - ns := data.Get("namespace").(string) - b.namespace = ns - b.nameSuffix = data.Get("secret_suffix").(string) - b.config = cfg - - return nil -} - -func getInitialConfig(data *schema.ResourceData) (*restclient.Config, error) { - var cfg *restclient.Config - var err error - - inCluster := data.Get("in_cluster_config").(bool) - if inCluster { - cfg, err = restclient.InClusterConfig() - if err != nil { - return nil, err - } - } else { - cfg, err = tryLoadingConfigFile(data) - if err != nil { - return nil, err - } - } - - if cfg == nil { - cfg = &restclient.Config{} - } - return cfg, err -} - -func tryLoadingConfigFile(d *schema.ResourceData) (*restclient.Config, error) { - loader := &clientcmd.ClientConfigLoadingRules{} - - configPaths := []string{} - if v, ok := d.Get("config_path").(string); ok && v != "" { - configPaths = []string{v} - } else if v, ok := d.Get("config_paths").([]interface{}); ok && len(v) > 0 { - for _, p := range v { - configPaths = append(configPaths, p.(string)) - } - } else if v := os.Getenv("KUBE_CONFIG_PATHS"); v != "" { - configPaths = filepath.SplitList(v) - } - - expandedPaths := []string{} - for _, p := range configPaths { - path, err := homedir.Expand(p) - if err != nil { - log.Printf("[DEBUG] Could not expand path: %s", err) - return nil, err - } - log.Printf("[DEBUG] Using kubeconfig: %s", path) - expandedPaths = append(expandedPaths, path) - } - - if len(expandedPaths) == 1 { - loader.ExplicitPath = expandedPaths[0] - } else { - loader.Precedence = expandedPaths - } - - overrides := &clientcmd.ConfigOverrides{} - ctxSuffix := "; default context" - - ctx, ctxOk := d.GetOk("config_context") - authInfo, authInfoOk := d.GetOk("config_context_auth_info") - cluster, clusterOk := d.GetOk("config_context_cluster") - if ctxOk || authInfoOk || clusterOk { - ctxSuffix = "; overriden context" - if ctxOk { - overrides.CurrentContext = ctx.(string) - ctxSuffix += fmt.Sprintf("; config ctx: %s", overrides.CurrentContext) - log.Printf("[DEBUG] Using custom current context: %q", overrides.CurrentContext) - } - - overrides.Context = clientcmdapi.Context{} - if authInfoOk { - overrides.Context.AuthInfo = authInfo.(string) - ctxSuffix += fmt.Sprintf("; auth_info: %s", overrides.Context.AuthInfo) - } - if clusterOk { - overrides.Context.Cluster = cluster.(string) - ctxSuffix += fmt.Sprintf("; cluster: %s", overrides.Context.Cluster) - } - log.Printf("[DEBUG] Using overidden context: %#v", overrides.Context) - } - - if v, ok := d.GetOk("exec"); ok { - exec := &clientcmdapi.ExecConfig{} - if spec, ok := v.([]interface{})[0].(map[string]interface{}); ok { - exec.APIVersion = spec["api_version"].(string) - exec.Command = spec["command"].(string) - exec.Args = expandStringSlice(spec["args"].([]interface{})) - for kk, vv := range spec["env"].(map[string]interface{}) { - exec.Env = append(exec.Env, clientcmdapi.ExecEnvVar{Name: kk, Value: vv.(string)}) - } - } else { - return nil, fmt.Errorf("Failed to parse exec") - } - overrides.AuthInfo.Exec = exec - } - - cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, overrides) - cfg, err := cc.ClientConfig() - if err != nil { - if pathErr, ok := err.(*os.PathError); ok && os.IsNotExist(pathErr.Err) { - log.Printf("[INFO] Unable to load config file as it doesn't exist at %q", pathErr.Path) - return nil, nil - } - return nil, fmt.Errorf("Failed to initialize kubernetes configuration: %s", err) - } - - log.Printf("[INFO] Successfully initialized config") - return cfg, nil -} - -func expandStringSlice(s []interface{}) []string { - result := make([]string, len(s), len(s)) - for k, v := range s { - // Handle the Terraform parser bug which turns empty strings in lists to nil. - if v == nil { - result[k] = "" - } else { - result[k] = v.(string) - } - } - return result -} diff --git a/internal/backend/remote-state/kubernetes/backend_state.go b/internal/backend/remote-state/kubernetes/backend_state.go deleted file mode 100644 index 56009bc34692..000000000000 --- a/internal/backend/remote-state/kubernetes/backend_state.go +++ /dev/null @@ -1,170 +0,0 @@ -package kubernetes - -import ( - "context" - "errors" - "fmt" - "sort" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Workspaces returns a list of names for the workspaces found in k8s. The default -// workspace is always returned as the first element in the slice. -func (b *Backend) Workspaces() ([]string, error) { - secretClient, err := b.KubernetesSecretClient() - if err != nil { - return nil, err - } - - secrets, err := secretClient.List( - context.Background(), - metav1.ListOptions{ - LabelSelector: tfstateKey + "=true", - }, - ) - if err != nil { - return nil, err - } - - // Use a map so there aren't duplicate workspaces - m := make(map[string]struct{}) - for _, secret := range secrets.Items { - sl := secret.GetLabels() - ws, ok := sl[tfstateWorkspaceKey] - if !ok { - continue - } - - key, ok := sl[tfstateSecretSuffixKey] - if !ok { - continue - } - - // Make sure it isn't default and the key matches - if ws != backend.DefaultStateName && key == b.nameSuffix { - m[ws] = struct{}{} - } - } - - states := []string{backend.DefaultStateName} - for k := range m { - states = append(states, k) - } - - sort.Strings(states[1:]) - return states, nil -} - -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - client, err := b.remoteClient(name) - if err != nil { - return err - } - - return client.Delete() -} - -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - c, err := b.remoteClient(name) - if err != nil { - return nil, err - } - - stateMgr := &remote.State{Client: c} - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockID, err := stateMgr.Lock(lockInfo) - if err != nil { - return nil, err - } - - secretName, err := c.createSecretName() - if err != nil { - return nil, err - } - - // Local helper function so we can call it multiple places - unlock := func(baseErr error) error { - if err := stateMgr.Unlock(lockID); err != nil { - const unlockErrMsg = `%v - Additionally, unlocking the state in Kubernetes failed: - - Error message: %q - Lock ID (gen): %v - Secret Name: %v - - You may have to force-unlock this state in order to use it again. - The Kubernetes backend acquires a lock during initialization to ensure - the initial state file is created.` - return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, secretName) - } - - return baseErr - } - - if err := stateMgr.WriteState(states.NewState()); err != nil { - return nil, unlock(err) - } - if err := stateMgr.PersistState(nil); err != nil { - return nil, unlock(err) - } - - // Unlock, the state should now be initialized - if err := unlock(nil); err != nil { - return nil, err - } - - } - - return stateMgr, nil -} - -// get a remote client configured for this state -func (b *Backend) remoteClient(name string) (*RemoteClient, error) { - if name == "" { - return nil, errors.New("missing state name") - } - - secretClient, err := b.KubernetesSecretClient() - if err != nil { - return nil, err - } - - leaseClient, err := b.KubernetesLeaseClient() - if err != nil { - return nil, err - } - - client := &RemoteClient{ - kubernetesSecretClient: secretClient, - kubernetesLeaseClient: leaseClient, - namespace: b.namespace, - labels: b.labels, - nameSuffix: b.nameSuffix, - workspace: name, - } - - return client, nil -} - -func (b *Backend) client() *RemoteClient { - return &RemoteClient{} -} diff --git a/internal/backend/remote-state/kubernetes/backend_test.go b/internal/backend/remote-state/kubernetes/backend_test.go deleted file mode 100644 index e24689f0fb1c..000000000000 --- a/internal/backend/remote-state/kubernetes/backend_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package kubernetes - -import ( - "context" - "fmt" - "math/rand" - "os" - "sync" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/statemgr" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - secretSuffix = "test-state" -) - -var namespace string - -// verify that we are doing ACC tests or the k8s tests specifically -func testACC(t *testing.T) { - skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_K8S_TEST") == "" - if skip { - t.Log("k8s backend tests require setting TF_ACC or TF_K8S_TEST") - t.Skip() - } - - ns := os.Getenv("KUBE_NAMESPACE") - - if ns != "" { - namespace = ns - } else { - namespace = "default" - } - - cleanupK8sResources(t) -} - -func TestBackend_impl(t *testing.T) { - var _ backend.Backend = new(Backend) -} - -func TestBackend(t *testing.T) { - testACC(t) - defer cleanupK8sResources(t) - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - // Test - backend.TestBackendStates(t, b1) -} - -func TestBackendLocks(t *testing.T) { - testACC(t) - defer cleanupK8sResources(t) - - // Get the backend. We need two to test locking. - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - // Test - backend.TestBackendStateLocks(t, b1, b2) - backend.TestBackendStateForceUnlock(t, b1, b2) -} - -func TestBackendLocksSoak(t *testing.T) { - testACC(t) - defer cleanupK8sResources(t) - - clientCount := 100 - lockAttempts := 100 - - lockers := []statemgr.Locker{} - for i := 0; i < clientCount; i++ { - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("Error creating state manager: %v", err) - } - - lockers = append(lockers, s.(statemgr.Locker)) - } - - wg := sync.WaitGroup{} - for i, l := range lockers { - wg.Add(1) - go func(locker statemgr.Locker, n int) { - defer wg.Done() - - li := statemgr.NewLockInfo() - li.Operation = "test" - li.Who = fmt.Sprintf("client-%v", n) - - for i := 0; i < lockAttempts; i++ { - id, err := locker.Lock(li) - if err != nil { - continue - } - - // hold onto the lock for a little bit - time.Sleep(time.Duration(rand.Intn(10)) * time.Microsecond) - - err = locker.Unlock(id) - if err != nil { - t.Errorf("failed to unlock: %v", err) - } - } - }(l, i) - } - - wg.Wait() -} - -func cleanupK8sResources(t *testing.T) { - ctx := context.Background() - // Get a backend to use the k8s client - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - b := b1.(*Backend) - - sClient, err := b.KubernetesSecretClient() - if err != nil { - t.Fatal(err) - } - - // Delete secrets - opts := metav1.ListOptions{LabelSelector: tfstateKey + "=true"} - secrets, err := sClient.List(ctx, opts) - if err != nil { - t.Fatal(err) - } - - delProp := metav1.DeletePropagationBackground - delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} - var errs []error - - for _, secret := range secrets.Items { - labels := secret.GetLabels() - key, ok := labels[tfstateSecretSuffixKey] - if !ok { - continue - } - - if key == secretSuffix { - err = sClient.Delete(ctx, secret.GetName(), delOps) - if err != nil { - errs = append(errs, err) - } - } - } - - leaseClient, err := b.KubernetesLeaseClient() - if err != nil { - t.Fatal(err) - } - - // Delete leases - leases, err := leaseClient.List(ctx, opts) - if err != nil { - t.Fatal(err) - } - - for _, lease := range leases.Items { - labels := lease.GetLabels() - key, ok := labels[tfstateSecretSuffixKey] - if !ok { - continue - } - - if key == secretSuffix { - err = leaseClient.Delete(ctx, lease.GetName(), delOps) - if err != nil { - errs = append(errs, err) - } - } - } - - if len(errs) > 0 { - t.Fatal(errs) - } -} diff --git a/internal/backend/remote-state/kubernetes/client.go b/internal/backend/remote-state/kubernetes/client.go deleted file mode 100644 index 12447c36e0c7..000000000000 --- a/internal/backend/remote-state/kubernetes/client.go +++ /dev/null @@ -1,413 +0,0 @@ -package kubernetes - -import ( - "bytes" - "compress/gzip" - "context" - "crypto/md5" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/client-go/dynamic" - _ "k8s.io/client-go/plugin/pkg/client/auth" // Import to initialize client auth plugins. - "k8s.io/utils/pointer" - - coordinationv1 "k8s.io/api/coordination/v1" - coordinationclientv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" -) - -const ( - tfstateKey = "tfstate" - tfstateSecretSuffixKey = "tfstateSecretSuffix" - tfstateWorkspaceKey = "tfstateWorkspace" - tfstateLockInfoAnnotation = "app.terraform.io/lock-info" - managedByKey = "app.kubernetes.io/managed-by" -) - -type RemoteClient struct { - kubernetesSecretClient dynamic.ResourceInterface - kubernetesLeaseClient coordinationclientv1.LeaseInterface - namespace string - labels map[string]string - nameSuffix string - workspace string -} - -func (c *RemoteClient) Get() (payload *remote.Payload, err error) { - secretName, err := c.createSecretName() - if err != nil { - return nil, err - } - secret, err := c.kubernetesSecretClient.Get(context.Background(), secretName, metav1.GetOptions{}) - if err != nil { - if k8serrors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - - secretData := getSecretData(secret) - stateRaw, ok := secretData[tfstateKey] - if !ok { - // The secret exists but there is no state in it - return nil, nil - } - - stateRawString := stateRaw.(string) - - state, err := uncompressState(stateRawString) - if err != nil { - return nil, err - } - - md5 := md5.Sum(state) - - p := &remote.Payload{ - Data: state, - MD5: md5[:], - } - return p, nil -} - -func (c *RemoteClient) Put(data []byte) error { - ctx := context.Background() - secretName, err := c.createSecretName() - if err != nil { - return err - } - - payload, err := compressState(data) - if err != nil { - return err - } - - secret, err := c.getSecret(secretName) - if err != nil { - if !k8serrors.IsNotFound(err) { - return err - } - - secret = &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": metav1.ObjectMeta{ - Name: secretName, - Namespace: c.namespace, - Labels: c.getLabels(), - Annotations: map[string]string{"encoding": "gzip"}, - }, - }, - } - - secret, err = c.kubernetesSecretClient.Create(ctx, secret, metav1.CreateOptions{}) - if err != nil { - return err - } - } - - setState(secret, payload) - _, err = c.kubernetesSecretClient.Update(ctx, secret, metav1.UpdateOptions{}) - return err -} - -// Delete the state secret -func (c *RemoteClient) Delete() error { - secretName, err := c.createSecretName() - if err != nil { - return err - } - - err = c.deleteSecret(secretName) - if err != nil { - if !k8serrors.IsNotFound(err) { - return err - } - } - - leaseName, err := c.createLeaseName() - if err != nil { - return err - } - - err = c.deleteLease(leaseName) - if err != nil { - if !k8serrors.IsNotFound(err) { - return err - } - } - return nil -} - -func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { - ctx := context.Background() - leaseName, err := c.createLeaseName() - if err != nil { - return "", err - } - - lease, err := c.getLease(leaseName) - if err != nil { - if !k8serrors.IsNotFound(err) { - return "", err - } - - labels := c.getLabels() - lease = &coordinationv1.Lease{ - ObjectMeta: metav1.ObjectMeta{ - Name: leaseName, - Labels: labels, - Annotations: map[string]string{ - tfstateLockInfoAnnotation: string(info.Marshal()), - }, - }, - Spec: coordinationv1.LeaseSpec{ - HolderIdentity: pointer.StringPtr(info.ID), - }, - } - - _, err = c.kubernetesLeaseClient.Create(ctx, lease, metav1.CreateOptions{}) - if err != nil { - return "", err - } else { - return info.ID, nil - } - } - - if lease.Spec.HolderIdentity != nil { - if *lease.Spec.HolderIdentity == info.ID { - return info.ID, nil - } - - currentLockInfo, err := c.getLockInfo(lease) - if err != nil { - return "", err - } - - lockErr := &statemgr.LockError{ - Info: currentLockInfo, - Err: errors.New("the state is already locked by another terraform client"), - } - return "", lockErr - } - - lease.Spec.HolderIdentity = pointer.StringPtr(info.ID) - setLockInfo(lease, info.Marshal()) - _, err = c.kubernetesLeaseClient.Update(ctx, lease, metav1.UpdateOptions{}) - if err != nil { - return "", err - } - - return info.ID, err -} - -func (c *RemoteClient) Unlock(id string) error { - leaseName, err := c.createLeaseName() - if err != nil { - return err - } - - lease, err := c.getLease(leaseName) - if err != nil { - return err - } - - if lease.Spec.HolderIdentity == nil { - return fmt.Errorf("state is already unlocked") - } - - lockInfo, err := c.getLockInfo(lease) - if err != nil { - return err - } - - lockErr := &statemgr.LockError{Info: lockInfo} - if *lease.Spec.HolderIdentity != id { - lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) - return lockErr - } - - lease.Spec.HolderIdentity = nil - removeLockInfo(lease) - - _, err = c.kubernetesLeaseClient.Update(context.Background(), lease, metav1.UpdateOptions{}) - if err != nil { - lockErr.Err = err - return lockErr - } - - return nil -} - -func (c *RemoteClient) getLockInfo(lease *coordinationv1.Lease) (*statemgr.LockInfo, error) { - lockData, ok := getLockInfo(lease) - if len(lockData) == 0 || !ok { - return nil, nil - } - - lockInfo := &statemgr.LockInfo{} - err := json.Unmarshal(lockData, lockInfo) - if err != nil { - return nil, err - } - - return lockInfo, nil -} - -func (c *RemoteClient) getLabels() map[string]string { - l := map[string]string{ - tfstateKey: "true", - tfstateSecretSuffixKey: c.nameSuffix, - tfstateWorkspaceKey: c.workspace, - managedByKey: "terraform", - } - - if len(c.labels) != 0 { - for k, v := range c.labels { - l[k] = v - } - } - - return l -} - -func (c *RemoteClient) getSecret(name string) (*unstructured.Unstructured, error) { - return c.kubernetesSecretClient.Get(context.Background(), name, metav1.GetOptions{}) -} - -func (c *RemoteClient) getLease(name string) (*coordinationv1.Lease, error) { - return c.kubernetesLeaseClient.Get(context.Background(), name, metav1.GetOptions{}) -} - -func (c *RemoteClient) deleteSecret(name string) error { - secret, err := c.getSecret(name) - if err != nil { - return err - } - - labels := secret.GetLabels() - v, ok := labels[tfstateKey] - if !ok || v != "true" { - return fmt.Errorf("Secret does does not have %q label", tfstateKey) - } - - delProp := metav1.DeletePropagationBackground - delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} - return c.kubernetesSecretClient.Delete(context.Background(), name, delOps) -} - -func (c *RemoteClient) deleteLease(name string) error { - secret, err := c.getLease(name) - if err != nil { - return err - } - - labels := secret.GetLabels() - v, ok := labels[tfstateKey] - if !ok || v != "true" { - return fmt.Errorf("Lease does does not have %q label", tfstateKey) - } - - delProp := metav1.DeletePropagationBackground - delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} - return c.kubernetesLeaseClient.Delete(context.Background(), name, delOps) -} - -func (c *RemoteClient) createSecretName() (string, error) { - secretName := strings.Join([]string{tfstateKey, c.workspace, c.nameSuffix}, "-") - - errs := validation.IsDNS1123Subdomain(secretName) - if len(errs) > 0 { - k8sInfo := ` -This is a requirement for Kubernetes secret names. -The workspace name and key must adhere to Kubernetes naming conventions.` - msg := fmt.Sprintf("the secret name %v is invalid, ", secretName) - return "", errors.New(msg + strings.Join(errs, ",") + k8sInfo) - } - - return secretName, nil -} - -func (c *RemoteClient) createLeaseName() (string, error) { - n, err := c.createSecretName() - if err != nil { - return "", err - } - return "lock-" + n, nil -} - -func compressState(data []byte) ([]byte, error) { - b := new(bytes.Buffer) - gz := gzip.NewWriter(b) - if _, err := gz.Write(data); err != nil { - return nil, err - } - if err := gz.Close(); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func uncompressState(data string) ([]byte, error) { - decode, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return nil, err - } - - b := new(bytes.Buffer) - gz, err := gzip.NewReader(bytes.NewReader(decode)) - if err != nil { - return nil, err - } - b.ReadFrom(gz) - if err := gz.Close(); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func getSecretData(secret *unstructured.Unstructured) map[string]interface{} { - if m, ok := secret.Object["data"].(map[string]interface{}); ok { - return m - } - return map[string]interface{}{} -} - -func getLockInfo(lease *coordinationv1.Lease) ([]byte, bool) { - info, ok := lease.ObjectMeta.GetAnnotations()[tfstateLockInfoAnnotation] - if !ok { - return nil, false - } - return []byte(info), true -} - -func setLockInfo(lease *coordinationv1.Lease, l []byte) { - annotations := lease.ObjectMeta.GetAnnotations() - if annotations != nil { - annotations[tfstateLockInfoAnnotation] = string(l) - } else { - annotations = map[string]string{ - tfstateLockInfoAnnotation: string(l), - } - } - lease.ObjectMeta.SetAnnotations(annotations) -} - -func removeLockInfo(lease *coordinationv1.Lease) { - annotations := lease.ObjectMeta.GetAnnotations() - delete(annotations, tfstateLockInfoAnnotation) - lease.ObjectMeta.SetAnnotations(annotations) -} - -func setState(secret *unstructured.Unstructured, t []byte) { - secretData := getSecretData(secret) - secretData[tfstateKey] = t - secret.Object["data"] = secretData -} diff --git a/internal/backend/remote-state/kubernetes/client_test.go b/internal/backend/remote-state/kubernetes/client_test.go deleted file mode 100644 index 08e615423e54..000000000000 --- a/internal/backend/remote-state/kubernetes/client_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package kubernetes - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func TestRemoteClient_impl(t *testing.T) { - var _ remote.Client = new(RemoteClient) - var _ remote.ClientLocker = new(RemoteClient) -} - -func TestRemoteClient(t *testing.T) { - testACC(t) - defer cleanupK8sResources(t) - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, state.(*remote.State).Client) -} - -func TestRemoteClientLocks(t *testing.T) { - testACC(t) - defer cleanupK8sResources(t) - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) -} - -func TestForceUnlock(t *testing.T) { - testACC(t) - defer cleanupK8sResources(t) - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "secret_suffix": secretSuffix, - })) - - // first test with default - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - info := statemgr.NewLockInfo() - info.Operation = "test" - info.Who = "clientA" - - lockID, err := s1.Lock(info) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - // s1 is now locked, get the same state through s2 and unlock it - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal("failed to get default state to force unlock:", err) - } - - if err := s2.Unlock(lockID); err != nil { - t.Fatal("failed to force-unlock default state") - } - - // now try the same thing with a named state - // first test with default - s1, err = b1.StateMgr("test") - if err != nil { - t.Fatal(err) - } - - info = statemgr.NewLockInfo() - info.Operation = "test" - info.Who = "clientA" - - lockID, err = s1.Lock(info) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - // s1 is now locked, get the same state through s2 and unlock it - s2, err = b2.StateMgr("test") - if err != nil { - t.Fatal("failed to get named state to force unlock:", err) - } - - if err = s2.Unlock(lockID); err != nil { - t.Fatal("failed to force-unlock named state") - } -} diff --git a/internal/backend/remote-state/oss/backend.go b/internal/backend/remote-state/oss/backend.go deleted file mode 100644 index 468a883298c8..000000000000 --- a/internal/backend/remote-state/oss/backend.go +++ /dev/null @@ -1,706 +0,0 @@ -package oss - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "os" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints" - - "github.com/aliyun/alibaba-cloud-sdk-go/sdk" - "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" - "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" - "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" - "github.com/aliyun/alibaba-cloud-sdk-go/services/location" - "github.com/aliyun/alibaba-cloud-sdk-go/services/sts" - "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" - "github.com/hashicorp/go-cleanhttp" - "github.com/jmespath/go-jmespath" - "github.com/mitchellh/go-homedir" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" - "github.com/hashicorp/terraform/version" -) - -// Deprecated in favor of flattening assume_role_* options -func deprecatedAssumeRoleSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Deprecated: "use assume_role_* options instead", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role_arn": { - Type: schema.TypeString, - Required: true, - Description: "The ARN of a RAM role to assume prior to making API calls.", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_ARN", ""), - }, - "session_name": { - Type: schema.TypeString, - Optional: true, - Description: "The session name to use when assuming the role.", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_SESSION_NAME", ""), - }, - "policy": { - Type: schema.TypeString, - Optional: true, - Description: "The permissions applied when assuming a role. You cannot use this policy to grant permissions which exceed those of the role that is being assumed.", - }, - "session_expiration": { - Type: schema.TypeInt, - Optional: true, - Description: "The time after which the established session for assuming role expires.", - ValidateFunc: func(v interface{}, k string) ([]string, []error) { - min := 900 - max := 3600 - value, ok := v.(int) - if !ok { - return nil, []error{fmt.Errorf("expected type of %s to be int", k)} - } - - if value < min || value > max { - return nil, []error{fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)} - } - - return nil, nil - }, - }, - }, - }, - } -} - -// New creates a new backend for OSS remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "access_key": { - Type: schema.TypeString, - Optional: true, - Description: "Alibaba Cloud Access Key ID", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_ID")), - }, - - "secret_key": { - Type: schema.TypeString, - Optional: true, - Description: "Alibaba Cloud Access Secret Key", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_SECRET")), - }, - - "security_token": { - Type: schema.TypeString, - Optional: true, - Description: "Alibaba Cloud Security Token", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", ""), - }, - - "ecs_role_name": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ECS_ROLE_NAME", os.Getenv("ALICLOUD_ECS_ROLE_NAME")), - Description: "The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section of the Alibaba Cloud console.", - }, - - "region": { - Type: schema.TypeString, - Optional: true, - Description: "The region of the OSS bucket.", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_DEFAULT_REGION")), - }, - "sts_endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom endpoint for the STS API", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_STS_ENDPOINT", ""), - }, - "tablestore_endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom endpoint for the TableStore API", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_TABLESTORE_ENDPOINT", ""), - }, - "endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom endpoint for the OSS API", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_OSS_ENDPOINT", os.Getenv("OSS_ENDPOINT")), - }, - - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "The name of the OSS bucket", - }, - - "prefix": { - Type: schema.TypeString, - Optional: true, - Description: "The directory where state files will be saved inside the bucket", - Default: "env:", - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - prefix := v.(string) - if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") { - return nil, []error{fmt.Errorf("workspace_key_prefix must not start with '/' or './'")} - } - return nil, nil - }, - }, - - "key": { - Type: schema.TypeString, - Optional: true, - Description: "The path of the state file inside the bucket", - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") { - return nil, []error{fmt.Errorf("key can not start and end with '/'")} - } - return nil, nil - }, - Default: "terraform.tfstate", - }, - - "tablestore_table": { - Type: schema.TypeString, - Optional: true, - Description: "TableStore table for state locking and consistency", - Default: "", - }, - - "encrypt": { - Type: schema.TypeBool, - Optional: true, - Description: "Whether to enable server side encryption of the state file", - Default: false, - }, - - "acl": { - Type: schema.TypeString, - Optional: true, - Description: "Object ACL to be applied to the state file", - Default: "", - ValidateFunc: func(v interface{}, k string) ([]string, []error) { - if value := v.(string); value != "" { - acls := oss.ACLType(value) - if acls != oss.ACLPrivate && acls != oss.ACLPublicRead && acls != oss.ACLPublicReadWrite { - return nil, []error{fmt.Errorf( - "%q must be a valid ACL value , expected %s, %s or %s, got %q", - k, oss.ACLPrivate, oss.ACLPublicRead, oss.ACLPublicReadWrite, acls)} - } - } - return nil, nil - }, - }, - "shared_credentials_file": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SHARED_CREDENTIALS_FILE", ""), - Description: "This is the path to the shared credentials file. If this is not set and a profile is specified, `~/.aliyun/config.json` will be used.", - }, - "profile": { - Type: schema.TypeString, - Optional: true, - Description: "This is the Alibaba Cloud profile name as set in the shared credentials file. It can also be sourced from the `ALICLOUD_PROFILE` environment variable.", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_PROFILE", ""), - }, - "assume_role": deprecatedAssumeRoleSchema(), - "assume_role_role_arn": { - Type: schema.TypeString, - Optional: true, - Description: "The ARN of a RAM role to assume prior to making API calls.", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_ARN", ""), - }, - "assume_role_session_name": { - Type: schema.TypeString, - Optional: true, - Description: "The session name to use when assuming the role.", - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_SESSION_NAME", ""), - }, - "assume_role_policy": { - Type: schema.TypeString, - Optional: true, - Description: "The permissions applied when assuming a role. You cannot use this policy to grant permissions which exceed those of the role that is being assumed.", - }, - "assume_role_session_expiration": { - Type: schema.TypeInt, - Optional: true, - Description: "The time after which the established session for assuming role expires.", - ValidateFunc: func(v interface{}, k string) ([]string, []error) { - min := 900 - max := 3600 - value, ok := v.(int) - if !ok { - return nil, []error{fmt.Errorf("expected type of %s to be int", k)} - } - - if value < min || value > max { - return nil, []error{fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)} - } - - return nil, nil - }, - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - ossClient *oss.Client - otsClient *tablestore.TableStoreClient - - bucketName string - statePrefix string - stateKey string - serverSideEncryption bool - acl string - otsEndpoint string - otsTable string -} - -func (b *Backend) configure(ctx context.Context) error { - if b.ossClient != nil { - return nil - } - - // Grab the resource data - d := schema.FromContextBackendConfig(ctx) - - b.bucketName = d.Get("bucket").(string) - b.statePrefix = strings.TrimPrefix(strings.Trim(d.Get("prefix").(string), "/"), "./") - b.stateKey = d.Get("key").(string) - b.serverSideEncryption = d.Get("encrypt").(bool) - b.acl = d.Get("acl").(string) - - var getBackendConfig = func(str string, key string) string { - if str == "" { - value, err := getConfigFromProfile(d, key) - if err == nil && value != nil { - str = value.(string) - } - } - return str - } - - accessKey := getBackendConfig(d.Get("access_key").(string), "access_key_id") - secretKey := getBackendConfig(d.Get("secret_key").(string), "access_key_secret") - securityToken := getBackendConfig(d.Get("security_token").(string), "sts_token") - region := getBackendConfig(d.Get("region").(string), "region_id") - - stsEndpoint := d.Get("sts_endpoint").(string) - endpoint := d.Get("endpoint").(string) - schma := "https" - - roleArn := getBackendConfig("", "ram_role_arn") - sessionName := getBackendConfig("", "ram_session_name") - var policy string - var sessionExpiration int - expiredSeconds, err := getConfigFromProfile(d, "expired_seconds") - if err == nil && expiredSeconds != nil { - sessionExpiration = (int)(expiredSeconds.(float64)) - } - - if v, ok := d.GetOk("assume_role_role_arn"); ok && v.(string) != "" { - roleArn = v.(string) - if v, ok := d.GetOk("assume_role_session_name"); ok { - sessionName = v.(string) - } - if v, ok := d.GetOk("assume_role_policy"); ok { - policy = v.(string) - } - if v, ok := d.GetOk("assume_role_session_expiration"); ok { - sessionExpiration = v.(int) - } - } else if v, ok := d.GetOk("assume_role"); ok { - // deprecated assume_role block - for _, v := range v.(*schema.Set).List() { - assumeRole := v.(map[string]interface{}) - if assumeRole["role_arn"].(string) != "" { - roleArn = assumeRole["role_arn"].(string) - } - if assumeRole["session_name"].(string) != "" { - sessionName = assumeRole["session_name"].(string) - } - policy = assumeRole["policy"].(string) - sessionExpiration = assumeRole["session_expiration"].(int) - } - } - - if sessionName == "" { - sessionName = "terraform" - } - if sessionExpiration == 0 { - if v := os.Getenv("ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION"); v != "" { - if expiredSeconds, err := strconv.Atoi(v); err == nil { - sessionExpiration = expiredSeconds - } - } - if sessionExpiration == 0 { - sessionExpiration = 3600 - } - } - - if accessKey == "" { - ecsRoleName := getBackendConfig(d.Get("ecs_role_name").(string), "ram_role_name") - subAccessKeyId, subAccessKeySecret, subSecurityToken, err := getAuthCredentialByEcsRoleName(ecsRoleName) - if err != nil { - return err - } - accessKey, secretKey, securityToken = subAccessKeyId, subAccessKeySecret, subSecurityToken - } - - if roleArn != "" { - subAccessKeyId, subAccessKeySecret, subSecurityToken, err := getAssumeRoleAK(accessKey, secretKey, securityToken, region, roleArn, sessionName, policy, stsEndpoint, sessionExpiration) - if err != nil { - return err - } - accessKey, secretKey, securityToken = subAccessKeyId, subAccessKeySecret, subSecurityToken - } - - if endpoint == "" { - endpointsResponse, err := b.getOSSEndpointByRegion(accessKey, secretKey, securityToken, region) - if err != nil { - log.Printf("[WARN] getting oss endpoint failed and using oss-%s.aliyuncs.com instead. Error: %#v.", region, err) - } else { - for _, endpointItem := range endpointsResponse.Endpoints.Endpoint { - if endpointItem.Type == "openAPI" { - endpoint = endpointItem.Endpoint - break - } - } - } - if endpoint == "" { - endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", region) - } - } - if !strings.HasPrefix(endpoint, "http") { - endpoint = fmt.Sprintf("%s://%s", schma, endpoint) - } - log.Printf("[DEBUG] Instantiate OSS client using endpoint: %#v", endpoint) - var options []oss.ClientOption - if securityToken != "" { - options = append(options, oss.SecurityToken(securityToken)) - } - options = append(options, oss.UserAgent(fmt.Sprintf("%s/%s", TerraformUA, TerraformVersion))) - - proxyUrl := getHttpProxyUrl() - if proxyUrl != nil { - options = append(options, oss.Proxy(proxyUrl.String())) - } - - client, err := oss.New(endpoint, accessKey, secretKey, options...) - b.ossClient = client - otsEndpoint := d.Get("tablestore_endpoint").(string) - if otsEndpoint != "" { - if !strings.HasPrefix(otsEndpoint, "http") { - otsEndpoint = fmt.Sprintf("%s://%s", schma, otsEndpoint) - } - b.otsEndpoint = otsEndpoint - parts := strings.Split(strings.TrimPrefix(strings.TrimPrefix(otsEndpoint, "https://"), "http://"), ".") - b.otsClient = tablestore.NewClientWithConfig(otsEndpoint, parts[0], accessKey, secretKey, securityToken, tablestore.NewDefaultTableStoreConfig()) - } - b.otsTable = d.Get("tablestore_table").(string) - - return err -} - -func (b *Backend) getOSSEndpointByRegion(access_key, secret_key, security_token, region string) (*location.DescribeEndpointsResponse, error) { - args := location.CreateDescribeEndpointsRequest() - args.ServiceCode = "oss" - args.Id = region - args.Domain = "location-readonly.aliyuncs.com" - - locationClient, err := location.NewClientWithOptions(region, getSdkConfig(), credentials.NewStsTokenCredential(access_key, secret_key, security_token)) - if err != nil { - return nil, fmt.Errorf("unable to initialize the location client: %#v", err) - - } - locationClient.AppendUserAgent(TerraformUA, TerraformVersion) - endpointsResponse, err := locationClient.DescribeEndpoints(args) - if err != nil { - return nil, fmt.Errorf("describe oss endpoint using region: %#v got an error: %#v", region, err) - } - return endpointsResponse, nil -} - -func getAssumeRoleAK(accessKey, secretKey, stsToken, region, roleArn, sessionName, policy, stsEndpoint string, sessionExpiration int) (string, string, string, error) { - request := sts.CreateAssumeRoleRequest() - request.RoleArn = roleArn - request.RoleSessionName = sessionName - request.DurationSeconds = requests.NewInteger(sessionExpiration) - request.Policy = policy - request.Scheme = "https" - - var client *sts.Client - var err error - if stsToken == "" { - client, err = sts.NewClientWithAccessKey(region, accessKey, secretKey) - } else { - client, err = sts.NewClientWithStsToken(region, accessKey, secretKey, stsToken) - } - if err != nil { - return "", "", "", err - } - if stsEndpoint != "" { - endpoints.AddEndpointMapping(region, "STS", stsEndpoint) - } - response, err := client.AssumeRole(request) - if err != nil { - return "", "", "", err - } - return response.Credentials.AccessKeyId, response.Credentials.AccessKeySecret, response.Credentials.SecurityToken, nil -} - -func getSdkConfig() *sdk.Config { - return sdk.NewConfig(). - WithMaxRetryTime(5). - WithTimeout(time.Duration(30) * time.Second). - WithGoRoutinePoolSize(10). - WithDebug(false). - WithHttpTransport(getTransport()). - WithScheme("HTTPS") -} - -func getTransport() *http.Transport { - handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) - if err != nil { - handshakeTimeout = 120 - } - transport := cleanhttp.DefaultTransport() - transport.TLSHandshakeTimeout = time.Duration(handshakeTimeout) * time.Second - transport.Proxy = http.ProxyFromEnvironment - return transport -} - -type Invoker struct { - catchers []*Catcher -} - -type Catcher struct { - Reason string - RetryCount int - RetryWaitSeconds int -} - -const TerraformUA = "HashiCorp-Terraform" - -var TerraformVersion = strings.TrimSuffix(version.String(), "-dev") -var ClientErrorCatcher = Catcher{"AliyunGoClientFailure", 10, 3} -var ServiceBusyCatcher = Catcher{"ServiceUnavailable", 10, 3} - -func NewInvoker() Invoker { - i := Invoker{} - i.AddCatcher(ClientErrorCatcher) - i.AddCatcher(ServiceBusyCatcher) - return i -} - -func (a *Invoker) AddCatcher(catcher Catcher) { - a.catchers = append(a.catchers, &catcher) -} - -func (a *Invoker) Run(f func() error) error { - err := f() - - if err == nil { - return nil - } - - for _, catcher := range a.catchers { - if strings.Contains(err.Error(), catcher.Reason) { - catcher.RetryCount-- - - if catcher.RetryCount <= 0 { - return fmt.Errorf("retry timeout and got an error: %#v", err) - } else { - time.Sleep(time.Duration(catcher.RetryWaitSeconds) * time.Second) - return a.Run(f) - } - } - } - return err -} - -var providerConfig map[string]interface{} - -func getConfigFromProfile(d *schema.ResourceData, ProfileKey string) (interface{}, error) { - - if providerConfig == nil { - if v, ok := d.GetOk("profile"); !ok || v.(string) == "" { - return nil, nil - } - current := d.Get("profile").(string) - // Set CredsFilename, expanding home directory - profilePath, err := homedir.Expand(d.Get("shared_credentials_file").(string)) - if err != nil { - return nil, err - } - if profilePath == "" { - profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("HOME")) - if runtime.GOOS == "windows" { - profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("USERPROFILE")) - } - } - providerConfig = make(map[string]interface{}) - _, err = os.Stat(profilePath) - if !os.IsNotExist(err) { - data, err := ioutil.ReadFile(profilePath) - if err != nil { - return nil, err - } - config := map[string]interface{}{} - err = json.Unmarshal(data, &config) - if err != nil { - return nil, err - } - for _, v := range config["profiles"].([]interface{}) { - if current == v.(map[string]interface{})["name"] { - providerConfig = v.(map[string]interface{}) - } - } - } - } - - mode := "" - if v, ok := providerConfig["mode"]; ok { - mode = v.(string) - } else { - return v, nil - } - switch ProfileKey { - case "access_key_id", "access_key_secret": - if mode == "EcsRamRole" { - return "", nil - } - case "ram_role_name": - if mode != "EcsRamRole" { - return "", nil - } - case "sts_token": - if mode != "StsToken" { - return "", nil - } - case "ram_role_arn", "ram_session_name": - if mode != "RamRoleArn" { - return "", nil - } - case "expired_seconds": - if mode != "RamRoleArn" { - return float64(0), nil - } - } - - return providerConfig[ProfileKey], nil -} - -var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" - -// getAuthCredentialByEcsRoleName aims to access meta to get sts credential -// Actually, the job should be done by sdk, but currently not all resources and products support alibaba-cloud-sdk-go, -// and their go sdk does support ecs role name. -// This method is a temporary solution and it should be removed after all go sdk support ecs role name -// The related PR: https://github.com/terraform-providers/terraform-provider-alicloud/pull/731 -func getAuthCredentialByEcsRoleName(ecsRoleName string) (accessKey, secretKey, token string, err error) { - - if ecsRoleName == "" { - return - } - requestUrl := securityCredURL + ecsRoleName - httpRequest, err := http.NewRequest(requests.GET, requestUrl, strings.NewReader("")) - if err != nil { - err = fmt.Errorf("build sts requests err: %s", err.Error()) - return - } - httpClient := &http.Client{} - httpResponse, err := httpClient.Do(httpRequest) - if err != nil { - err = fmt.Errorf("get Ecs sts token err : %s", err.Error()) - return - } - - response := responses.NewCommonResponse() - err = responses.Unmarshal(response, httpResponse, "") - if err != nil { - err = fmt.Errorf("unmarshal Ecs sts token response err : %s", err.Error()) - return - } - - if response.GetHttpStatus() != http.StatusOK { - err = fmt.Errorf("get Ecs sts token err, httpStatus: %d, message = %s", response.GetHttpStatus(), response.GetHttpContentString()) - return - } - var data interface{} - err = json.Unmarshal(response.GetHttpContentBytes(), &data) - if err != nil { - err = fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %s", err.Error()) - return - } - code, err := jmespath.Search("Code", data) - if err != nil { - err = fmt.Errorf("refresh Ecs sts token err, fail to get Code: %s", err.Error()) - return - } - if code.(string) != "Success" { - err = fmt.Errorf("refresh Ecs sts token err, Code is not Success") - return - } - accessKeyId, err := jmespath.Search("AccessKeyId", data) - if err != nil { - err = fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeyId: %s", err.Error()) - return - } - accessKeySecret, err := jmespath.Search("AccessKeySecret", data) - if err != nil { - err = fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeySecret: %s", err.Error()) - return - } - securityToken, err := jmespath.Search("SecurityToken", data) - if err != nil { - err = fmt.Errorf("refresh Ecs sts token err, fail to get SecurityToken: %s", err.Error()) - return - } - - if accessKeyId == nil || accessKeySecret == nil || securityToken == nil { - err = fmt.Errorf("there is no any available accesskey, secret and security token for Ecs role %s", ecsRoleName) - return - } - - return accessKeyId.(string), accessKeySecret.(string), securityToken.(string), nil -} - -func getHttpProxyUrl() *url.URL { - for _, v := range []string{"HTTPS_PROXY", "https_proxy", "HTTP_PROXY", "http_proxy"} { - value := strings.Trim(os.Getenv(v), " ") - if value != "" { - if !regexp.MustCompile(`^http(s)?://`).MatchString(value) { - value = fmt.Sprintf("https://%s", value) - } - proxyUrl, err := url.Parse(value) - if err == nil { - return proxyUrl - } - break - } - } - return nil -} diff --git a/internal/backend/remote-state/oss/backend_state.go b/internal/backend/remote-state/oss/backend_state.go deleted file mode 100644 index c1b6616e5efe..000000000000 --- a/internal/backend/remote-state/oss/backend_state.go +++ /dev/null @@ -1,197 +0,0 @@ -package oss - -import ( - "errors" - "fmt" - "log" - "path" - "sort" - "strings" - - "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -const ( - lockFileSuffix = ".tflock" -) - -// get a remote client configured for this state -func (b *Backend) remoteClient(name string) (*RemoteClient, error) { - if name == "" { - return nil, errors.New("missing state name") - } - - client := &RemoteClient{ - ossClient: b.ossClient, - bucketName: b.bucketName, - stateFile: b.stateFile(name), - lockFile: b.lockFile(name), - serverSideEncryption: b.serverSideEncryption, - acl: b.acl, - otsTable: b.otsTable, - otsClient: b.otsClient, - } - if b.otsEndpoint != "" && b.otsTable != "" { - _, err := b.otsClient.DescribeTable(&tablestore.DescribeTableRequest{ - TableName: b.otsTable, - }) - if err != nil { - return client, fmt.Errorf("error describing table store %s: %#v", b.otsTable, err) - } - } - - return client, nil -} - -func (b *Backend) Workspaces() ([]string, error) { - bucket, err := b.ossClient.Bucket(b.bucketName) - if err != nil { - return []string{""}, fmt.Errorf("error getting bucket: %#v", err) - } - - var options []oss.Option - options = append(options, oss.Prefix(b.statePrefix+"/"), oss.MaxKeys(1000)) - resp, err := bucket.ListObjects(options...) - if err != nil { - return nil, err - } - - result := []string{backend.DefaultStateName} - prefix := b.statePrefix - lastObj := "" - for { - for _, obj := range resp.Objects { - // we have 3 parts, the state prefix, the workspace name, and the state file: // - if path.Join(b.statePrefix, b.stateKey) == obj.Key { - // filter the default workspace - continue - } - lastObj = obj.Key - parts := strings.Split(strings.TrimPrefix(obj.Key, prefix+"/"), "/") - if len(parts) > 0 && parts[0] != "" { - result = append(result, parts[0]) - } - } - if resp.IsTruncated { - if len(options) == 3 { - options[2] = oss.Marker(lastObj) - } else { - options = append(options, oss.Marker(lastObj)) - } - resp, err = bucket.ListObjects(options...) - if err != nil { - return nil, err - } - } else { - break - } - } - sort.Strings(result[1:]) - return result, nil -} - -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - client, err := b.remoteClient(name) - if err != nil { - return err - } - return client.Delete() -} - -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - client, err := b.remoteClient(name) - if err != nil { - return nil, err - } - stateMgr := &remote.State{Client: client} - - // Check to see if this state already exists. - existing, err := b.Workspaces() - if err != nil { - return nil, err - } - - log.Printf("[DEBUG] Current workspace name: %s. All workspaces:%#v", name, existing) - - exists := false - for _, s := range existing { - if s == name { - exists = true - break - } - } - // We need to create the object so it's listed by States. - if !exists { - // take a lock on this state while we write it - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := client.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock OSS state: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(e error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(stateUnlockError), lockId, err) - } - return e - } - - // Grab the value - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(states.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(nil); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - - } - return stateMgr, nil -} - -func (b *Backend) stateFile(name string) string { - if name == backend.DefaultStateName { - return path.Join(b.statePrefix, b.stateKey) - } - return path.Join(b.statePrefix, name, b.stateKey) -} - -func (b *Backend) lockFile(name string) string { - return b.stateFile(name) + lockFileSuffix -} - -const stateUnlockError = ` -Error unlocking Alibaba Cloud OSS state file: - -Lock ID: %s -Error message: %#v - -You may have to force-unlock this state in order to use it again. -The Alibaba Cloud backend acquires a lock during initialization to ensure the initial state file is created. -` diff --git a/internal/backend/remote-state/oss/backend_test.go b/internal/backend/remote-state/oss/backend_test.go deleted file mode 100644 index e9bc8871639e..000000000000 --- a/internal/backend/remote-state/oss/backend_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package oss - -import ( - "fmt" - "math/rand" - "os" - "testing" - "time" - - "strings" - - "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" -) - -// verify that we are doing ACC tests or the OSS tests specifically -func testACC(t *testing.T) { - skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_OSS_TEST") == "" - if skip { - t.Log("oss backend tests require setting TF_ACC or TF_OSS_TEST") - t.Skip() - } - if skip { - t.Fatal("oss backend tests require setting ALICLOUD_ACCESS_KEY or ALICLOUD_ACCESS_KEY_ID") - } - if os.Getenv("ALICLOUD_REGION") == "" { - os.Setenv("ALICLOUD_REGION", "cn-beijing") - } -} - -func TestBackend_impl(t *testing.T) { - var _ backend.Backend = new(Backend) -} - -func TestBackendConfig(t *testing.T) { - testACC(t) - config := map[string]interface{}{ - "region": "cn-beijing", - "bucket": "terraform-backend-oss-test", - "prefix": "mystate", - "key": "first.tfstate", - "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", - "tablestore_table": "TableStore", - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) - - if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { - t.Fatalf("Incorrect region was provided") - } - if b.bucketName != "terraform-backend-oss-test" { - t.Fatalf("Incorrect bucketName was provided") - } - if b.statePrefix != "mystate" { - t.Fatalf("Incorrect state file path was provided") - } - if b.stateKey != "first.tfstate" { - t.Fatalf("Incorrect keyName was provided") - } - - if b.ossClient.Config.AccessKeyID == "" { - t.Fatalf("No Access Key Id was provided") - } - if b.ossClient.Config.AccessKeySecret == "" { - t.Fatalf("No Secret Access Key was provided") - } -} - -func TestBackendConfigWorkSpace(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("terraform-backend-oss-test-%d", rand.Intn(1000)) - config := map[string]interface{}{ - "region": "cn-beijing", - "bucket": bucketName, - "prefix": "mystate", - "key": "first.tfstate", - "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", - "tablestore_table": "TableStore", - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) - createOSSBucket(t, b.ossClient, bucketName) - defer deleteOSSBucket(t, b.ossClient, bucketName) - if _, err := b.Workspaces(); err != nil { - t.Fatal(err.Error()) - } - if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { - t.Fatalf("Incorrect region was provided") - } - if b.bucketName != bucketName { - t.Fatalf("Incorrect bucketName was provided") - } - if b.statePrefix != "mystate" { - t.Fatalf("Incorrect state file path was provided") - } - if b.stateKey != "first.tfstate" { - t.Fatalf("Incorrect keyName was provided") - } - - if b.ossClient.Config.AccessKeyID == "" { - t.Fatalf("No Access Key Id was provided") - } - if b.ossClient.Config.AccessKeySecret == "" { - t.Fatalf("No Secret Access Key was provided") - } -} - -func TestBackendConfigProfile(t *testing.T) { - testACC(t) - config := map[string]interface{}{ - "region": "cn-beijing", - "bucket": "terraform-backend-oss-test", - "prefix": "mystate", - "key": "first.tfstate", - "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", - "tablestore_table": "TableStore", - "profile": "default", - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) - - if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { - t.Fatalf("Incorrect region was provided") - } - if b.bucketName != "terraform-backend-oss-test" { - t.Fatalf("Incorrect bucketName was provided") - } - if b.statePrefix != "mystate" { - t.Fatalf("Incorrect state file path was provided") - } - if b.stateKey != "first.tfstate" { - t.Fatalf("Incorrect keyName was provided") - } - - if b.ossClient.Config.AccessKeyID == "" { - t.Fatalf("No Access Key Id was provided") - } - if b.ossClient.Config.AccessKeySecret == "" { - t.Fatalf("No Secret Access Key was provided") - } -} - -func TestBackendConfig_invalidKey(t *testing.T) { - testACC(t) - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ - "region": "cn-beijing", - "bucket": "terraform-backend-oss-test", - "prefix": "/leading-slash", - "name": "/test.tfstate", - "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", - "tablestore_table": "TableStore", - }) - - _, results := New().PrepareConfig(cfg) - if !results.HasErrors() { - t.Fatal("expected config validation error") - } -} - -func TestBackend(t *testing.T) { - testACC(t) - - bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix()) - statePrefix := "multi/level/path/" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": statePrefix, - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": statePrefix, - })).(*Backend) - - createOSSBucket(t, b1.ossClient, bucketName) - defer deleteOSSBucket(t, b1.ossClient, bucketName) - - backend.TestBackendStates(t, b1) - backend.TestBackendStateLocks(t, b1, b2) - backend.TestBackendStateForceUnlock(t, b1, b2) -} - -func createOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) { - // Be clear about what we're doing in case the user needs to clean this up later. - if err := ossClient.CreateBucket(bucketName); err != nil { - t.Fatal("failed to create test OSS bucket:", err) - } -} - -func deleteOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) { - warning := "WARNING: Failed to delete the test OSS bucket. It may have been left in your Alibaba Cloud account and may incur storage charges. (error was %s)" - - // first we have to get rid of the env objects, or we can't delete the bucket - bucket, err := ossClient.Bucket(bucketName) - if err != nil { - t.Fatal("Error getting bucket:", err) - return - } - objects, err := bucket.ListObjects() - if err != nil { - t.Logf(warning, err) - return - } - for _, obj := range objects.Objects { - if err := bucket.DeleteObject(obj.Key); err != nil { - // this will need cleanup no matter what, so just warn and exit - t.Logf(warning, err) - return - } - } - - if err := ossClient.DeleteBucket(bucketName); err != nil { - t.Logf(warning, err) - } -} - -// create the tablestore table, and wait until we can query it. -func createTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) { - tableMeta := new(tablestore.TableMeta) - tableMeta.TableName = tableName - tableMeta.AddPrimaryKeyColumn(pkName, tablestore.PrimaryKeyType_STRING) - - tableOption := new(tablestore.TableOption) - tableOption.TimeToAlive = -1 - tableOption.MaxVersion = 1 - - reservedThroughput := new(tablestore.ReservedThroughput) - - _, err := otsClient.CreateTable(&tablestore.CreateTableRequest{ - TableMeta: tableMeta, - TableOption: tableOption, - ReservedThroughput: reservedThroughput, - }) - if err != nil { - t.Fatal(err) - } -} - -func deleteTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) { - params := &tablestore.DeleteTableRequest{ - TableName: tableName, - } - _, err := otsClient.DeleteTable(params) - if err != nil { - t.Logf("WARNING: Failed to delete the test TableStore table %q. It has been left in your Alibaba Cloud account and may incur charges. (error was %s)", tableName, err) - } -} diff --git a/internal/backend/remote-state/oss/client.go b/internal/backend/remote-state/oss/client.go deleted file mode 100644 index 0c2938d0e6c1..000000000000 --- a/internal/backend/remote-state/oss/client.go +++ /dev/null @@ -1,449 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "log" - "time" - - "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" - "github.com/hashicorp/go-multierror" - uuid "github.com/hashicorp/go-uuid" - "github.com/pkg/errors" - - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -const ( - // Store the last saved serial in tablestore with this suffix for consistency checks. - stateIDSuffix = "-md5" - - pkName = "LockID" -) - -var ( - // The amount of time we will retry a state waiting for it to match the - // expected checksum. - consistencyRetryTimeout = 10 * time.Second - - // delay when polling the state - consistencyRetryPollInterval = 2 * time.Second -) - -// test hook called when checksums don't match -var testChecksumHook func() - -type RemoteClient struct { - ossClient *oss.Client - otsClient *tablestore.TableStoreClient - bucketName string - stateFile string - lockFile string - serverSideEncryption bool - acl string - otsTable string -} - -func (c *RemoteClient) Get() (payload *remote.Payload, err error) { - deadline := time.Now().Add(consistencyRetryTimeout) - - // If we have a checksum, and the returned payload doesn't match, we retry - // up until deadline. - for { - payload, err = c.getObj() - if err != nil { - return nil, err - } - - // If the remote state was manually removed the payload will be nil, - // but if there's still a digest entry for that state we will still try - // to compare the MD5 below. - var digest []byte - if payload != nil { - digest = payload.MD5 - } - - // verify that this state is what we expect - if expected, err := c.getMD5(); err != nil { - log.Printf("[WARN] failed to fetch state md5: %s", err) - } else if len(expected) > 0 && !bytes.Equal(expected, digest) { - log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) - - if testChecksumHook != nil { - testChecksumHook() - } - - if time.Now().Before(deadline) { - time.Sleep(consistencyRetryPollInterval) - log.Println("[INFO] retrying OSS RemoteClient.Get...") - continue - } - - return nil, fmt.Errorf(errBadChecksumFmt, digest) - } - - break - } - return payload, nil -} - -func (c *RemoteClient) Put(data []byte) error { - bucket, err := c.ossClient.Bucket(c.bucketName) - if err != nil { - return fmt.Errorf("error getting bucket: %#v", err) - } - - body := bytes.NewReader(data) - - var options []oss.Option - if c.acl != "" { - options = append(options, oss.ACL(oss.ACLType(c.acl))) - } - options = append(options, oss.ContentType("application/json")) - if c.serverSideEncryption { - options = append(options, oss.ServerSideEncryption("AES256")) - } - options = append(options, oss.ContentLength(int64(len(data)))) - - if body != nil { - if err := bucket.PutObject(c.stateFile, body, options...); err != nil { - return fmt.Errorf("failed to upload state %s: %#v", c.stateFile, err) - } - } - - sum := md5.Sum(data) - if err := c.putMD5(sum[:]); err != nil { - // if this errors out, we unfortunately have to error out altogether, - // since the next Get will inevitably fail. - return fmt.Errorf("failed to store state MD5: %s", err) - } - return nil -} - -func (c *RemoteClient) Delete() error { - bucket, err := c.ossClient.Bucket(c.bucketName) - if err != nil { - return fmt.Errorf("error getting bucket %s: %#v", c.bucketName, err) - } - - log.Printf("[DEBUG] Deleting remote state from OSS: %#v", c.stateFile) - - if err := bucket.DeleteObject(c.stateFile); err != nil { - return fmt.Errorf("error deleting state %s: %#v", c.stateFile, err) - } - - if err := c.deleteMD5(); err != nil { - log.Printf("[WARN] Error deleting state MD5: %s", err) - } - return nil -} - -func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { - if c.otsTable == "" { - return "", nil - } - - info.Path = c.lockPath() - - if info.ID == "" { - lockID, err := uuid.GenerateUUID() - if err != nil { - return "", err - } - info.ID = lockID - } - - putParams := &tablestore.PutRowChange{ - TableName: c.otsTable, - PrimaryKey: &tablestore.PrimaryKey{ - PrimaryKeys: []*tablestore.PrimaryKeyColumn{ - { - ColumnName: pkName, - Value: c.lockPath(), - }, - }, - }, - Columns: []tablestore.AttributeColumn{ - { - ColumnName: "Info", - Value: string(info.Marshal()), - }, - }, - Condition: &tablestore.RowCondition{ - RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST, - }, - } - - log.Printf("[DEBUG] Recording state lock in tablestore: %#v; LOCKID:%s", putParams, c.lockPath()) - - _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ - PutRowChange: putParams, - }) - if err != nil { - err = fmt.Errorf("invoking PutRow got an error: %#v", err) - lockInfo, infoErr := c.getLockInfo() - if infoErr != nil { - err = multierror.Append(err, fmt.Errorf("\ngetting lock info got an error: %#v", infoErr)) - } - lockErr := &statemgr.LockError{ - Err: err, - Info: lockInfo, - } - log.Printf("[ERROR] state lock error: %s", lockErr.Error()) - return "", lockErr - } - - return info.ID, nil -} - -func (c *RemoteClient) getMD5() ([]byte, error) { - if c.otsTable == "" { - return nil, nil - } - - getParams := &tablestore.SingleRowQueryCriteria{ - TableName: c.otsTable, - PrimaryKey: &tablestore.PrimaryKey{ - PrimaryKeys: []*tablestore.PrimaryKeyColumn{ - { - ColumnName: pkName, - Value: c.lockPath() + stateIDSuffix, - }, - }, - }, - ColumnsToGet: []string{pkName, "Digest"}, - MaxVersion: 1, - } - - log.Printf("[DEBUG] Retrieving state serial in tablestore: %#v", getParams) - - object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ - SingleRowQueryCriteria: getParams, - }) - - if err != nil { - return nil, err - } - - var val string - if v, ok := object.GetColumnMap().Columns["Digest"]; ok && len(v) > 0 { - val = v[0].Value.(string) - } - - sum, err := hex.DecodeString(val) - if err != nil || len(sum) != md5.Size { - return nil, errors.New("invalid md5") - } - - return sum, nil -} - -// store the hash of the state to that clients can check for stale state files. -func (c *RemoteClient) putMD5(sum []byte) error { - if c.otsTable == "" { - return nil - } - - if len(sum) != md5.Size { - return errors.New("invalid payload md5") - } - - putParams := &tablestore.PutRowChange{ - TableName: c.otsTable, - PrimaryKey: &tablestore.PrimaryKey{ - PrimaryKeys: []*tablestore.PrimaryKeyColumn{ - { - ColumnName: pkName, - Value: c.lockPath() + stateIDSuffix, - }, - }, - }, - Columns: []tablestore.AttributeColumn{ - { - ColumnName: "Digest", - Value: hex.EncodeToString(sum), - }, - }, - Condition: &tablestore.RowCondition{ - RowExistenceExpectation: tablestore.RowExistenceExpectation_IGNORE, - }, - } - - log.Printf("[DEBUG] Recoring state serial in tablestore: %#v", putParams) - - _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ - PutRowChange: putParams, - }) - - if err != nil { - log.Printf("[WARN] failed to record state serial in tablestore: %s", err) - } - - return nil -} - -// remove the hash value for a deleted state -func (c *RemoteClient) deleteMD5() error { - if c.otsTable == "" { - return nil - } - - params := &tablestore.DeleteRowRequest{ - DeleteRowChange: &tablestore.DeleteRowChange{ - TableName: c.otsTable, - PrimaryKey: &tablestore.PrimaryKey{ - PrimaryKeys: []*tablestore.PrimaryKeyColumn{ - { - ColumnName: pkName, - Value: c.lockPath() + stateIDSuffix, - }, - }, - }, - Condition: &tablestore.RowCondition{ - RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST, - }, - }, - } - - log.Printf("[DEBUG] Deleting state serial in tablestore: %#v", params) - - if _, err := c.otsClient.DeleteRow(params); err != nil { - return err - } - - return nil -} - -func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { - getParams := &tablestore.SingleRowQueryCriteria{ - TableName: c.otsTable, - PrimaryKey: &tablestore.PrimaryKey{ - PrimaryKeys: []*tablestore.PrimaryKeyColumn{ - { - ColumnName: pkName, - Value: c.lockPath(), - }, - }, - }, - ColumnsToGet: []string{pkName, "Info"}, - MaxVersion: 1, - } - - log.Printf("[DEBUG] Retrieving state lock info from tablestore: %#v", getParams) - - object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ - SingleRowQueryCriteria: getParams, - }) - if err != nil { - return nil, err - } - - var infoData string - if v, ok := object.GetColumnMap().Columns["Info"]; ok && len(v) > 0 { - infoData = v[0].Value.(string) - } - lockInfo := &statemgr.LockInfo{} - err = json.Unmarshal([]byte(infoData), lockInfo) - if err != nil { - return nil, err - } - return lockInfo, nil -} -func (c *RemoteClient) Unlock(id string) error { - if c.otsTable == "" { - return nil - } - - lockErr := &statemgr.LockError{} - - lockInfo, err := c.getLockInfo() - if err != nil { - lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) - return lockErr - } - lockErr.Info = lockInfo - - if lockInfo.ID != id { - lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) - return lockErr - } - params := &tablestore.DeleteRowRequest{ - DeleteRowChange: &tablestore.DeleteRowChange{ - TableName: c.otsTable, - PrimaryKey: &tablestore.PrimaryKey{ - PrimaryKeys: []*tablestore.PrimaryKeyColumn{ - { - ColumnName: pkName, - Value: c.lockPath(), - }, - }, - }, - Condition: &tablestore.RowCondition{ - RowExistenceExpectation: tablestore.RowExistenceExpectation_IGNORE, - }, - }, - } - - _, err = c.otsClient.DeleteRow(params) - - if err != nil { - lockErr.Err = err - return lockErr - } - - return nil -} - -func (c *RemoteClient) lockPath() string { - return fmt.Sprintf("%s/%s", c.bucketName, c.stateFile) -} - -func (c *RemoteClient) getObj() (*remote.Payload, error) { - bucket, err := c.ossClient.Bucket(c.bucketName) - if err != nil { - return nil, fmt.Errorf("error getting bucket %s: %#v", c.bucketName, err) - } - - if exist, err := bucket.IsObjectExist(c.stateFile); err != nil { - return nil, fmt.Errorf("estimating object %s is exist got an error: %#v", c.stateFile, err) - } else if !exist { - return nil, nil - } - - var options []oss.Option - output, err := bucket.GetObject(c.stateFile, options...) - if err != nil { - return nil, fmt.Errorf("error getting object: %#v", err) - } - - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, output); err != nil { - return nil, fmt.Errorf("failed to read remote state: %s", err) - } - sum := md5.Sum(buf.Bytes()) - payload := &remote.Payload{ - Data: buf.Bytes(), - MD5: sum[:], - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - return payload, nil -} - -const errBadChecksumFmt = `state data in OSS does not have the expected content. - -This may be caused by unusually long delays in OSS processing a previous state -update. Please wait for a minute or two and try again. If this problem -persists, and neither OSS nor TableStore are experiencing an outage, you may need -to manually verify the remote state and update the Digest value stored in the -TableStore table to the following value: %x` diff --git a/internal/backend/remote-state/oss/client_test.go b/internal/backend/remote-state/oss/client_test.go deleted file mode 100644 index 1fc62792be66..000000000000 --- a/internal/backend/remote-state/oss/client_test.go +++ /dev/null @@ -1,377 +0,0 @@ -package oss - -import ( - "fmt" - "strings" - "testing" - "time" - - "bytes" - "crypto/md5" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -// NOTE: Before running this testcase, please create a OTS instance called 'tf-oss-remote' -var RemoteTestUsedOTSEndpoint = "https://tf-oss-remote.cn-hangzhou.ots.aliyuncs.com" - -func TestRemoteClient_impl(t *testing.T) { - var _ remote.Client = new(RemoteClient) - var _ remote.ClientLocker = new(RemoteClient) -} - -func TestRemoteClient(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) - path := "testState" - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "encrypt": true, - })).(*Backend) - - createOSSBucket(t, b.ossClient, bucketName) - defer deleteOSSBucket(t, b.ossClient, bucketName) - - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, state.(*remote.State).Client) -} - -func TestRemoteClientLocks(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) - tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) - path := "testState" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "encrypt": true, - "tablestore_table": tableName, - "tablestore_endpoint": RemoteTestUsedOTSEndpoint, - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "encrypt": true, - "tablestore_table": tableName, - "tablestore_endpoint": RemoteTestUsedOTSEndpoint, - })).(*Backend) - - createOSSBucket(t, b1.ossClient, bucketName) - defer deleteOSSBucket(t, b1.ossClient, bucketName) - createTablestoreTable(t, b1.otsClient, tableName) - defer deleteTablestoreTable(t, b1.otsClient, tableName) - - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) -} - -// verify that the backend can handle more than one state in the same table -func TestRemoteClientLocks_multipleStates(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix()) - tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) - path := "testState" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "encrypt": true, - "tablestore_table": tableName, - "tablestore_endpoint": RemoteTestUsedOTSEndpoint, - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "encrypt": true, - "tablestore_table": tableName, - "tablestore_endpoint": RemoteTestUsedOTSEndpoint, - })).(*Backend) - - createOSSBucket(t, b1.ossClient, bucketName) - defer deleteOSSBucket(t, b1.ossClient, bucketName) - createTablestoreTable(t, b1.otsClient, tableName) - defer deleteTablestoreTable(t, b1.otsClient, tableName) - - s1, err := b1.StateMgr("s1") - if err != nil { - t.Fatal(err) - } - if _, err := s1.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatal("failed to get lock for s1:", err) - } - - // s1 is now locked, s2 should not be locked as it's a different state file - s2, err := b2.StateMgr("s2") - if err != nil { - t.Fatal(err) - } - if _, err := s2.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatal("failed to get lock for s2:", err) - } -} - -// verify that we can unlock a state with an existing lock -func TestRemoteForceUnlock(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix()) - tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) - path := "testState" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "encrypt": true, - "tablestore_table": tableName, - "tablestore_endpoint": RemoteTestUsedOTSEndpoint, - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "encrypt": true, - "tablestore_table": tableName, - "tablestore_endpoint": RemoteTestUsedOTSEndpoint, - })).(*Backend) - - createOSSBucket(t, b1.ossClient, bucketName) - defer deleteOSSBucket(t, b1.ossClient, bucketName) - createTablestoreTable(t, b1.otsClient, tableName) - defer deleteTablestoreTable(t, b1.otsClient, tableName) - - // first test with default - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - info := statemgr.NewLockInfo() - info.Operation = "test" - info.Who = "clientA" - - lockID, err := s1.Lock(info) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - // s1 is now locked, get the same state through s2 and unlock it - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal("failed to get default state to force unlock:", err) - } - - if err := s2.Unlock(lockID); err != nil { - t.Fatal("failed to force-unlock default state") - } - - // now try the same thing with a named state - // first test with default - s1, err = b1.StateMgr("test") - if err != nil { - t.Fatal(err) - } - - info = statemgr.NewLockInfo() - info.Operation = "test" - info.Who = "clientA" - - lockID, err = s1.Lock(info) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - // s1 is now locked, get the same state through s2 and unlock it - s2, err = b2.StateMgr("test") - if err != nil { - t.Fatal("failed to get named state to force unlock:", err) - } - - if err = s2.Unlock(lockID); err != nil { - t.Fatal("failed to force-unlock named state") - } -} - -func TestRemoteClient_clientMD5(t *testing.T) { - testACC(t) - - bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) - tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) - path := "testState" - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "tablestore_table": tableName, - "tablestore_endpoint": RemoteTestUsedOTSEndpoint, - })).(*Backend) - - createOSSBucket(t, b.ossClient, bucketName) - defer deleteOSSBucket(t, b.ossClient, bucketName) - createTablestoreTable(t, b.otsClient, tableName) - defer deleteTablestoreTable(t, b.otsClient, tableName) - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - client := s.(*remote.State).Client.(*RemoteClient) - - sum := md5.Sum([]byte("test")) - - if err := client.putMD5(sum[:]); err != nil { - t.Fatal(err) - } - - getSum, err := client.getMD5() - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(getSum, sum[:]) { - t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum) - } - - if err := client.deleteMD5(); err != nil { - t.Fatal(err) - } - - if getSum, err := client.getMD5(); err == nil { - t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum) - } -} - -// verify that a client won't return a state with an incorrect checksum. -func TestRemoteClient_stateChecksum(t *testing.T) { - testACC(t) - - bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) - tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) - path := "testState" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - "tablestore_table": tableName, - "tablestore_endpoint": RemoteTestUsedOTSEndpoint, - })).(*Backend) - - createOSSBucket(t, b1.ossClient, bucketName) - defer deleteOSSBucket(t, b1.ossClient, bucketName) - createTablestoreTable(t, b1.otsClient, tableName) - defer deleteTablestoreTable(t, b1.otsClient, tableName) - - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - client1 := s1.(*remote.State).Client - - // create an old and new state version to persist - s := statemgr.TestFullInitialState() - sf := &statefile.File{State: s} - var oldState bytes.Buffer - if err := statefile.Write(sf, &oldState); err != nil { - t.Fatal(err) - } - sf.Serial++ - var newState bytes.Buffer - if err := statefile.Write(sf, &newState); err != nil { - t.Fatal(err) - } - - // Use b2 without a tablestore_table to bypass the lock table to write the state directly. - // client2 will write the "incorrect" state, simulating oss eventually consistency delays - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "prefix": path, - })).(*Backend) - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - client2 := s2.(*remote.State).Client - - // write the new state through client2 so that there is no checksum yet - if err := client2.Put(newState.Bytes()); err != nil { - t.Fatal(err) - } - - // verify that we can pull a state without a checksum - if _, err := client1.Get(); err != nil { - t.Fatal(err) - } - - // write the new state back with its checksum - if err := client1.Put(newState.Bytes()); err != nil { - t.Fatal(err) - } - - // put an empty state in place to check for panics during get - if err := client2.Put([]byte{}); err != nil { - t.Fatal(err) - } - - // remove the timeouts so we can fail immediately - origTimeout := consistencyRetryTimeout - origInterval := consistencyRetryPollInterval - defer func() { - consistencyRetryTimeout = origTimeout - consistencyRetryPollInterval = origInterval - }() - consistencyRetryTimeout = 0 - consistencyRetryPollInterval = 0 - - // fetching an empty state through client1 should now error out due to a - // mismatched checksum. - if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { - t.Fatalf("expected state checksum error: got %s", err) - } - - // put the old state in place of the new, without updating the checksum - if err := client2.Put(oldState.Bytes()); err != nil { - t.Fatal(err) - } - - // fetching the wrong state through client1 should now error out due to a - // mismatched checksum. - if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { - t.Fatalf("expected state checksum error: got %s", err) - } - - // update the state with the correct one after we Get again - testChecksumHook = func() { - if err := client2.Put(newState.Bytes()); err != nil { - t.Fatal(err) - } - testChecksumHook = nil - } - - consistencyRetryTimeout = origTimeout - - // this final Get will fail to fail the checksum verification, the above - // callback will update the state with the correct version, and Get should - // retry automatically. - if _, err := client1.Get(); err != nil { - t.Fatal(err) - } -} diff --git a/internal/backend/remote-state/pg/backend.go b/internal/backend/remote-state/pg/backend.go deleted file mode 100644 index cdcfb3a6e462..000000000000 --- a/internal/backend/remote-state/pg/backend.go +++ /dev/null @@ -1,133 +0,0 @@ -package pg - -import ( - "context" - "database/sql" - "fmt" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" - "github.com/lib/pq" -) - -const ( - statesTableName = "states" - statesIndexName = "states_by_name" -) - -// New creates a new backend for Postgres remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "conn_str": { - Type: schema.TypeString, - Required: true, - Description: "Postgres connection string; a `postgres://` URL", - }, - - "schema_name": { - Type: schema.TypeString, - Optional: true, - Description: "Name of the automatically managed Postgres schema to store state", - Default: "terraform_remote_state", - }, - - "skip_schema_creation": { - Type: schema.TypeBool, - Optional: true, - Description: "If set to `true`, Terraform won't try to create the Postgres schema", - Default: false, - }, - - "skip_table_creation": { - Type: schema.TypeBool, - Optional: true, - Description: "If set to `true`, Terraform won't try to create the Postgres table", - }, - - "skip_index_creation": { - Type: schema.TypeBool, - Optional: true, - Description: "If set to `true`, Terraform won't try to create the Postgres index", - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - db *sql.DB - configData *schema.ResourceData - connStr string - schemaName string -} - -func (b *Backend) configure(ctx context.Context) error { - // Grab the resource data - b.configData = schema.FromContextBackendConfig(ctx) - data := b.configData - - b.connStr = data.Get("conn_str").(string) - b.schemaName = pq.QuoteIdentifier(data.Get("schema_name").(string)) - - db, err := sql.Open("postgres", b.connStr) - if err != nil { - return err - } - - // Prepare database schema, tables, & indexes. - var query string - - if !data.Get("skip_schema_creation").(bool) { - // list all schemas to see if it exists - var count int - query = `select count(1) from information_schema.schemata where schema_name = $1` - if err := db.QueryRow(query, data.Get("schema_name").(string)).Scan(&count); err != nil { - return err - } - - // skip schema creation if schema already exists - // `CREATE SCHEMA IF NOT EXISTS` is to be avoided if ever - // a user hasn't been granted the `CREATE SCHEMA` privilege - if count < 1 { - // tries to create the schema - query = `CREATE SCHEMA IF NOT EXISTS %s` - if _, err := db.Exec(fmt.Sprintf(query, b.schemaName)); err != nil { - return err - } - } - } - - if !data.Get("skip_table_creation").(bool) { - if _, err := db.Exec("CREATE SEQUENCE IF NOT EXISTS public.global_states_id_seq AS bigint"); err != nil { - return err - } - - query = `CREATE TABLE IF NOT EXISTS %s.%s ( - id bigint NOT NULL DEFAULT nextval('public.global_states_id_seq') PRIMARY KEY, - name text UNIQUE, - data text - )` - if _, err := db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName)); err != nil { - return err - } - } - - if !data.Get("skip_index_creation").(bool) { - query = `CREATE UNIQUE INDEX IF NOT EXISTS %s ON %s.%s (name)` - if _, err := db.Exec(fmt.Sprintf(query, statesIndexName, b.schemaName, statesTableName)); err != nil { - return err - } - } - - // Assign db after its schema is prepared. - b.db = db - - return nil -} diff --git a/internal/backend/remote-state/pg/backend_state.go b/internal/backend/remote-state/pg/backend_state.go deleted file mode 100644 index a1a5544ddb74..000000000000 --- a/internal/backend/remote-state/pg/backend_state.go +++ /dev/null @@ -1,115 +0,0 @@ -package pg - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func (b *Backend) Workspaces() ([]string, error) { - query := `SELECT name FROM %s.%s WHERE name != 'default' ORDER BY name` - rows, err := b.db.Query(fmt.Sprintf(query, b.schemaName, statesTableName)) - if err != nil { - return nil, err - } - defer rows.Close() - - result := []string{ - backend.DefaultStateName, - } - - for rows.Next() { - var name string - if err := rows.Scan(&name); err != nil { - return nil, err - } - result = append(result, name) - } - if err := rows.Err(); err != nil { - return nil, err - } - - return result, nil -} - -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - query := `DELETE FROM %s.%s WHERE name = $1` - _, err := b.db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName), name) - if err != nil { - return err - } - - return nil -} - -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - // Build the state client - var stateMgr statemgr.Full = &remote.State{ - Client: &RemoteClient{ - Client: b.db, - Name: name, - SchemaName: b.schemaName, - }, - } - - // Check to see if this state already exists. - // If the state doesn't exist, we have to assume this - // is a normal create operation, and take the lock at that point. - existing, err := b.Workspaces() - if err != nil { - return nil, err - } - - exists := false - for _, s := range existing { - if s == name { - exists = true - break - } - } - - // Grab a lock, we use this to write an empty state if one doesn't - // exist already. We have to write an empty state as a sentinel value - // so Workspaces() knows it exists. - if !exists { - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := stateMgr.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock state in Postgres: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(`error unlocking Postgres state: %s`, err) - } - return parent - } - - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(states.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(nil); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - } - - return stateMgr, nil -} diff --git a/internal/backend/remote-state/pg/backend_test.go b/internal/backend/remote-state/pg/backend_test.go deleted file mode 100644 index 064c001f5b3b..000000000000 --- a/internal/backend/remote-state/pg/backend_test.go +++ /dev/null @@ -1,376 +0,0 @@ -package pg - -// Create the test database: createdb terraform_backend_pg_test -// TF_ACC=1 GO111MODULE=on go test -v -mod=vendor -timeout=2m -parallel=4 github.com/hashicorp/terraform/backend/remote-state/pg - -import ( - "database/sql" - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/lib/pq" - _ "github.com/lib/pq" -) - -// Function to skip a test unless in ACCeptance test mode. -// -// A running Postgres server identified by env variable -// DATABASE_URL is required for acceptance tests. -func testACC(t *testing.T) { - skip := os.Getenv("TF_ACC") == "" - if skip { - t.Log("pg backend tests require setting TF_ACC") - t.Skip() - } - if os.Getenv("DATABASE_URL") == "" { - os.Setenv("DATABASE_URL", "postgres://localhost/terraform_backend_pg_test?sslmode=disable") - } -} - -func TestBackend_impl(t *testing.T) { - var _ backend.Backend = new(Backend) -} - -func TestBackendConfig(t *testing.T) { - testACC(t) - connStr := getDatabaseUrl() - schemaName := pq.QuoteIdentifier(fmt.Sprintf("terraform_%s", t.Name())) - - config := backend.TestWrapConfig(map[string]interface{}{ - "conn_str": connStr, - "schema_name": schemaName, - }) - schemaName = pq.QuoteIdentifier(schemaName) - - dbCleaner, err := sql.Open("postgres", connStr) - if err != nil { - t.Fatal(err) - } - defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) - - b := backend.TestBackendConfig(t, New(), config).(*Backend) - - if b == nil { - t.Fatal("Backend could not be configured") - } - - _, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName)) - if err != nil { - t.Fatal(err) - } - - _, err = b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - c := s.(*remote.State).Client.(*RemoteClient) - if c.Name != backend.DefaultStateName { - t.Fatal("RemoteClient name is not configured") - } - - backend.TestBackendStates(t, b) -} - -func TestBackendConfigSkipOptions(t *testing.T) { - testACC(t) - connStr := getDatabaseUrl() - - testCases := []struct { - Name string - SkipSchemaCreation bool - SkipTableCreation bool - SkipIndexCreation bool - TestIndexIsPresent bool - Setup func(t *testing.T, db *sql.DB, schemaName string) - }{ - { - Name: "skip_schema_creation", - SkipSchemaCreation: true, - TestIndexIsPresent: true, - Setup: func(t *testing.T, db *sql.DB, schemaName string) { - // create the schema as a prerequisites - _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s`, schemaName)) - if err != nil { - t.Fatal(err) - } - }, - }, - { - Name: "skip_table_creation", - SkipTableCreation: true, - TestIndexIsPresent: true, - Setup: func(t *testing.T, db *sql.DB, schemaName string) { - // since the table needs to be already created the schema must be too - _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA %s`, schemaName)) - if err != nil { - t.Fatal(err) - } - _, err = db.Query(fmt.Sprintf(`CREATE TABLE %s.%s ( - id SERIAL PRIMARY KEY, - name TEXT, - data TEXT - )`, schemaName, statesTableName)) - if err != nil { - t.Fatal(err) - } - }, - }, - { - Name: "skip_index_creation", - SkipIndexCreation: true, - TestIndexIsPresent: true, - Setup: func(t *testing.T, db *sql.DB, schemaName string) { - // Everything need to exists for the index to be created - _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA %s`, schemaName)) - if err != nil { - t.Fatal(err) - } - _, err = db.Query(fmt.Sprintf(`CREATE TABLE %s.%s ( - id SERIAL PRIMARY KEY, - name TEXT, - data TEXT - )`, schemaName, statesTableName)) - if err != nil { - t.Fatal(err) - } - _, err = db.Exec(fmt.Sprintf(`CREATE UNIQUE INDEX IF NOT EXISTS %s ON %s.%s (name)`, statesIndexName, schemaName, statesTableName)) - if err != nil { - t.Fatal(err) - } - }, - }, - { - Name: "missing_index", - SkipIndexCreation: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - schemaName := tc.Name - - config := backend.TestWrapConfig(map[string]interface{}{ - "conn_str": connStr, - "schema_name": schemaName, - "skip_schema_creation": tc.SkipSchemaCreation, - "skip_table_creation": tc.SkipTableCreation, - "skip_index_creation": tc.SkipIndexCreation, - }) - schemaName = pq.QuoteIdentifier(schemaName) - db, err := sql.Open("postgres", connStr) - if err != nil { - t.Fatal(err) - } - - if tc.Setup != nil { - tc.Setup(t, db, schemaName) - } - defer db.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) - - b := backend.TestBackendConfig(t, New(), config).(*Backend) - - if b == nil { - t.Fatal("Backend could not be configured") - } - - // Make sure everything has been created - - // This tests that both the schema and the table have been created - _, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName)) - if err != nil { - t.Fatal(err) - } - if tc.TestIndexIsPresent { - // Make sure that the index exists - query := `select count(*) from pg_indexes where schemaname=$1 and tablename=$2 and indexname=$3;` - var count int - if err := b.db.QueryRow(query, tc.Name, statesTableName, statesIndexName).Scan(&count); err != nil { - t.Fatal(err) - } - if count != 1 { - t.Fatalf("The index has not been created (%d)", count) - } - } - - _, err = b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - c := s.(*remote.State).Client.(*RemoteClient) - if c.Name != backend.DefaultStateName { - t.Fatal("RemoteClient name is not configured") - } - - // Make sure that all workspace must have a unique name - _, err = db.Exec(fmt.Sprintf(`INSERT INTO %s.%s VALUES (100, 'unique_name_test', '')`, schemaName, statesTableName)) - if err != nil { - t.Fatal(err) - } - _, err = db.Exec(fmt.Sprintf(`INSERT INTO %s.%s VALUES (101, 'unique_name_test', '')`, schemaName, statesTableName)) - if err == nil { - t.Fatal("Creating two workspaces with the same name did not raise an error") - } - }) - } - -} - -func TestBackendStates(t *testing.T) { - testACC(t) - connStr := getDatabaseUrl() - - testCases := []string{ - fmt.Sprintf("terraform_%s", t.Name()), - fmt.Sprintf("test with spaces: %s", t.Name()), - } - for _, schemaName := range testCases { - t.Run(schemaName, func(t *testing.T) { - dbCleaner, err := sql.Open("postgres", connStr) - if err != nil { - t.Fatal(err) - } - defer dbCleaner.Query("DROP SCHEMA IF EXISTS %s CASCADE", pq.QuoteIdentifier(schemaName)) - - config := backend.TestWrapConfig(map[string]interface{}{ - "conn_str": connStr, - "schema_name": schemaName, - }) - b := backend.TestBackendConfig(t, New(), config).(*Backend) - - if b == nil { - t.Fatal("Backend could not be configured") - } - - backend.TestBackendStates(t, b) - }) - } -} - -func TestBackendStateLocks(t *testing.T) { - testACC(t) - connStr := getDatabaseUrl() - schemaName := fmt.Sprintf("terraform_%s", t.Name()) - dbCleaner, err := sql.Open("postgres", connStr) - if err != nil { - t.Fatal(err) - } - defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) - - config := backend.TestWrapConfig(map[string]interface{}{ - "conn_str": connStr, - "schema_name": schemaName, - }) - b := backend.TestBackendConfig(t, New(), config).(*Backend) - - if b == nil { - t.Fatal("Backend could not be configured") - } - - bb := backend.TestBackendConfig(t, New(), config).(*Backend) - - if bb == nil { - t.Fatal("Backend could not be configured") - } - - backend.TestBackendStateLocks(t, b, bb) -} - -func TestBackendConcurrentLock(t *testing.T) { - testACC(t) - connStr := getDatabaseUrl() - dbCleaner, err := sql.Open("postgres", connStr) - if err != nil { - t.Fatal(err) - } - - getStateMgr := func(schemaName string) (statemgr.Full, *statemgr.LockInfo) { - defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) - config := backend.TestWrapConfig(map[string]interface{}{ - "conn_str": connStr, - "schema_name": schemaName, - }) - b := backend.TestBackendConfig(t, New(), config).(*Backend) - - if b == nil { - t.Fatal("Backend could not be configured") - } - stateMgr, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("Failed to get the state manager: %v", err) - } - - info := statemgr.NewLockInfo() - info.Operation = "test" - info.Who = schemaName - - return stateMgr, info - } - - s1, i1 := getStateMgr(fmt.Sprintf("terraform_%s_1", t.Name())) - s2, i2 := getStateMgr(fmt.Sprintf("terraform_%s_2", t.Name())) - - // First we need to create the workspace as the lock for creating them is - // global - lockID1, err := s1.Lock(i1) - if err != nil { - t.Fatalf("failed to lock first state: %v", err) - } - - if err = s1.PersistState(nil); err != nil { - t.Fatalf("failed to persist state: %v", err) - } - - if err := s1.Unlock(lockID1); err != nil { - t.Fatalf("failed to unlock first state: %v", err) - } - - lockID2, err := s2.Lock(i2) - if err != nil { - t.Fatalf("failed to lock second state: %v", err) - } - - if err = s2.PersistState(nil); err != nil { - t.Fatalf("failed to persist state: %v", err) - } - - if err := s2.Unlock(lockID2); err != nil { - t.Fatalf("failed to unlock first state: %v", err) - } - - // Now we can test concurrent lock - lockID1, err = s1.Lock(i1) - if err != nil { - t.Fatalf("failed to lock first state: %v", err) - } - - lockID2, err = s2.Lock(i2) - if err != nil { - t.Fatalf("failed to lock second state: %v", err) - } - - if err := s1.Unlock(lockID1); err != nil { - t.Fatalf("failed to unlock first state: %v", err) - } - - if err := s2.Unlock(lockID2); err != nil { - t.Fatalf("failed to unlock first state: %v", err) - } -} - -func getDatabaseUrl() string { - return os.Getenv("DATABASE_URL") -} diff --git a/internal/backend/remote-state/pg/client.go b/internal/backend/remote-state/pg/client.go deleted file mode 100644 index 7ff9cd2468a1..000000000000 --- a/internal/backend/remote-state/pg/client.go +++ /dev/null @@ -1,142 +0,0 @@ -package pg - -import ( - "crypto/md5" - "database/sql" - "fmt" - - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - _ "github.com/lib/pq" -) - -// RemoteClient is a remote client that stores data in a Postgres database -type RemoteClient struct { - Client *sql.DB - Name string - SchemaName string - - info *statemgr.LockInfo -} - -func (c *RemoteClient) Get() (*remote.Payload, error) { - query := `SELECT data FROM %s.%s WHERE name = $1` - row := c.Client.QueryRow(fmt.Sprintf(query, c.SchemaName, statesTableName), c.Name) - var data []byte - err := row.Scan(&data) - switch { - case err == sql.ErrNoRows: - // No existing state returns empty. - return nil, nil - case err != nil: - return nil, err - default: - md5 := md5.Sum(data) - return &remote.Payload{ - Data: data, - MD5: md5[:], - }, nil - } -} - -func (c *RemoteClient) Put(data []byte) error { - query := `INSERT INTO %s.%s (name, data) VALUES ($1, $2) - ON CONFLICT (name) DO UPDATE - SET data = $2 WHERE %s.name = $1` - _, err := c.Client.Exec(fmt.Sprintf(query, c.SchemaName, statesTableName, statesTableName), c.Name, data) - if err != nil { - return err - } - return nil -} - -func (c *RemoteClient) Delete() error { - query := `DELETE FROM %s.%s WHERE name = $1` - _, err := c.Client.Exec(fmt.Sprintf(query, c.SchemaName, statesTableName), c.Name) - if err != nil { - return err - } - return nil -} - -func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { - var err error - var lockID string - - if info.ID == "" { - lockID, err = uuid.GenerateUUID() - if err != nil { - return "", err - } - info.ID = lockID - } - - // Local helper function so we can call it multiple places - // - lockUnlock := func(pgLockId string) error { - query := `SELECT pg_advisory_unlock(%s)` - row := c.Client.QueryRow(fmt.Sprintf(query, pgLockId)) - var didUnlock []byte - err := row.Scan(&didUnlock) - if err != nil { - return &statemgr.LockError{Info: info, Err: err} - } - return nil - } - - // Try to acquire locks for the existing row `id` and the creation lock `-1`. - query := `SELECT %s.id, pg_try_advisory_lock(%s.id), pg_try_advisory_lock(-1) FROM %s.%s WHERE %s.name = $1` - row := c.Client.QueryRow(fmt.Sprintf(query, statesTableName, statesTableName, c.SchemaName, statesTableName, statesTableName), c.Name) - var pgLockId, didLock, didLockForCreate []byte - err = row.Scan(&pgLockId, &didLock, &didLockForCreate) - switch { - case err == sql.ErrNoRows: - // No rows means we're creating the workspace. Take the creation lock. - innerRow := c.Client.QueryRow(`SELECT pg_try_advisory_lock(-1)`) - var innerDidLock []byte - err := innerRow.Scan(&innerDidLock) - if err != nil { - return "", &statemgr.LockError{Info: info, Err: err} - } - if string(innerDidLock) == "false" { - return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Already locked for workspace creation: %s", c.Name)} - } - info.Path = "-1" - case err != nil: - return "", &statemgr.LockError{Info: info, Err: err} - case string(didLock) == "false": - // Existing workspace is already locked. Release the attempted creation lock. - lockUnlock("-1") - return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Workspace is already locked: %s", c.Name)} - case string(didLockForCreate) == "false": - // Someone has the creation lock already. Release the existing workspace because it might not be safe to touch. - lockUnlock(string(pgLockId)) - return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Cannot lock workspace; already locked for workspace creation: %s", c.Name)} - default: - // Existing workspace is now locked. Release the attempted creation lock. - lockUnlock("-1") - info.Path = string(pgLockId) - } - c.info = info - - return info.ID, nil -} - -func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { - return c.info, nil -} - -func (c *RemoteClient) Unlock(id string) error { - if c.info != nil && c.info.Path != "" { - query := `SELECT pg_advisory_unlock(%s)` - row := c.Client.QueryRow(fmt.Sprintf(query, c.info.Path)) - var didUnlock []byte - err := row.Scan(&didUnlock) - if err != nil { - return &statemgr.LockError{Info: c.info, Err: err} - } - c.info = nil - } - return nil -} diff --git a/internal/backend/remote-state/pg/client_test.go b/internal/backend/remote-state/pg/client_test.go deleted file mode 100644 index 7bf21ac8483c..000000000000 --- a/internal/backend/remote-state/pg/client_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package pg - -// Create the test database: createdb terraform_backend_pg_test -// TF_ACC=1 GO111MODULE=on go test -v -mod=vendor -timeout=2m -parallel=4 github.com/hashicorp/terraform/backend/remote-state/pg - -import ( - "database/sql" - "fmt" - "testing" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/remote" -) - -func TestRemoteClient_impl(t *testing.T) { - var _ remote.Client = new(RemoteClient) - var _ remote.ClientLocker = new(RemoteClient) -} - -func TestRemoteClient(t *testing.T) { - testACC(t) - connStr := getDatabaseUrl() - schemaName := fmt.Sprintf("terraform_%s", t.Name()) - dbCleaner, err := sql.Open("postgres", connStr) - if err != nil { - t.Fatal(err) - } - defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) - - config := backend.TestWrapConfig(map[string]interface{}{ - "conn_str": connStr, - "schema_name": schemaName, - }) - b := backend.TestBackendConfig(t, New(), config).(*Backend) - - if b == nil { - t.Fatal("Backend could not be configured") - } - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, s.(*remote.State).Client) -} - -func TestRemoteLocks(t *testing.T) { - testACC(t) - connStr := getDatabaseUrl() - schemaName := fmt.Sprintf("terraform_%s", t.Name()) - dbCleaner, err := sql.Open("postgres", connStr) - if err != nil { - t.Fatal(err) - } - defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) - - config := backend.TestWrapConfig(map[string]interface{}{ - "conn_str": connStr, - "schema_name": schemaName, - }) - - b1 := backend.TestBackendConfig(t, New(), config).(*Backend) - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - b2 := backend.TestBackendConfig(t, New(), config).(*Backend) - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) -} diff --git a/internal/backend/remote-state/s3/backend.go b/internal/backend/remote-state/s3/backend.go deleted file mode 100644 index c56e390a7f18..000000000000 --- a/internal/backend/remote-state/s3/backend.go +++ /dev/null @@ -1,413 +0,0 @@ -package s3 - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/s3" - awsbase "github.com/hashicorp/aws-sdk-go-base" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/legacy/helper/schema" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/version" -) - -// New creates a new backend for S3 remote state. -func New() backend.Backend { - s := &schema.Backend{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "The name of the S3 bucket", - }, - - "key": { - Type: schema.TypeString, - Required: true, - Description: "The path to the state file inside the bucket", - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - // s3 will strip leading slashes from an object, so while this will - // technically be accepted by s3, it will break our workspace hierarchy. - if strings.HasPrefix(v.(string), "/") { - return nil, []error{errors.New("key must not start with '/'")} - } - // s3 will recognize objects with a trailing slash as a directory - // so they should not be valid keys - if strings.HasSuffix(v.(string), "/") { - return nil, []error{errors.New("key must not end with '/'")} - } - return nil, nil - }, - }, - - "region": { - Type: schema.TypeString, - Required: true, - Description: "AWS region of the S3 Bucket and DynamoDB Table (if used).", - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", - }, nil), - }, - - "dynamodb_endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom endpoint for the DynamoDB API", - DefaultFunc: schema.EnvDefaultFunc("AWS_DYNAMODB_ENDPOINT", ""), - }, - - "endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom endpoint for the S3 API", - DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""), - }, - - "iam_endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom endpoint for the IAM API", - DefaultFunc: schema.EnvDefaultFunc("AWS_IAM_ENDPOINT", ""), - }, - - "sts_endpoint": { - Type: schema.TypeString, - Optional: true, - Description: "A custom endpoint for the STS API", - DefaultFunc: schema.EnvDefaultFunc("AWS_STS_ENDPOINT", ""), - }, - - "encrypt": { - Type: schema.TypeBool, - Optional: true, - Description: "Whether to enable server side encryption of the state file", - Default: false, - }, - - "acl": { - Type: schema.TypeString, - Optional: true, - Description: "Canned ACL to be applied to the state file", - Default: "", - }, - - "access_key": { - Type: schema.TypeString, - Optional: true, - Description: "AWS access key", - Default: "", - }, - - "secret_key": { - Type: schema.TypeString, - Optional: true, - Description: "AWS secret key", - Default: "", - }, - - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Description: "The ARN of a KMS Key to use for encrypting the state", - Default: "", - }, - - "dynamodb_table": { - Type: schema.TypeString, - Optional: true, - Description: "DynamoDB table for state locking and consistency", - Default: "", - }, - - "profile": { - Type: schema.TypeString, - Optional: true, - Description: "AWS profile name", - Default: "", - }, - - "shared_credentials_file": { - Type: schema.TypeString, - Optional: true, - Description: "Path to a shared credentials file", - Default: "", - }, - - "token": { - Type: schema.TypeString, - Optional: true, - Description: "MFA token", - Default: "", - }, - - "skip_credentials_validation": { - Type: schema.TypeBool, - Optional: true, - Description: "Skip the credentials validation via STS API.", - Default: false, - }, - - "skip_region_validation": { - Type: schema.TypeBool, - Optional: true, - Description: "Skip static validation of region name.", - Default: false, - }, - - "skip_metadata_api_check": { - Type: schema.TypeBool, - Optional: true, - Description: "Skip the AWS Metadata API check.", - Default: false, - }, - - "sse_customer_key": { - Type: schema.TypeString, - Optional: true, - Description: "The base64-encoded encryption key to use for server-side encryption with customer-provided keys (SSE-C).", - DefaultFunc: schema.EnvDefaultFunc("AWS_SSE_CUSTOMER_KEY", ""), - Sensitive: true, - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - key := v.(string) - if key != "" && len(key) != 44 { - return nil, []error{errors.New("sse_customer_key must be 44 characters in length (256 bits, base64 encoded)")} - } - return nil, nil - }, - }, - - "role_arn": { - Type: schema.TypeString, - Optional: true, - Description: "The role to be assumed", - Default: "", - }, - - "session_name": { - Type: schema.TypeString, - Optional: true, - Description: "The session name to use when assuming the role.", - Default: "", - }, - - "external_id": { - Type: schema.TypeString, - Optional: true, - Description: "The external ID to use when assuming the role", - Default: "", - }, - - "assume_role_duration_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: "Seconds to restrict the assume role session duration.", - }, - - "assume_role_policy": { - Type: schema.TypeString, - Optional: true, - Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", - Default: "", - }, - - "assume_role_policy_arns": { - Type: schema.TypeSet, - Optional: true, - Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "assume_role_tags": { - Type: schema.TypeMap, - Optional: true, - Description: "Assume role session tags.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "assume_role_transitive_tag_keys": { - Type: schema.TypeSet, - Optional: true, - Description: "Assume role session tag keys to pass to any subsequent sessions.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "workspace_key_prefix": { - Type: schema.TypeString, - Optional: true, - Description: "The prefix applied to the non-default state path inside the bucket.", - Default: "env:", - ValidateFunc: func(v interface{}, s string) ([]string, []error) { - prefix := v.(string) - if strings.HasPrefix(prefix, "/") || strings.HasSuffix(prefix, "/") { - return nil, []error{errors.New("workspace_key_prefix must not start or end with '/'")} - } - return nil, nil - }, - }, - - "force_path_style": { - Type: schema.TypeBool, - Optional: true, - Description: "Force s3 to use path style api.", - Default: false, - }, - - "max_retries": { - Type: schema.TypeInt, - Optional: true, - Description: "The maximum number of times an AWS API request is retried on retryable failure.", - Default: 5, - }, - }, - } - - result := &Backend{Backend: s} - result.Backend.ConfigureFunc = result.configure - return result -} - -type Backend struct { - *schema.Backend - - // The fields below are set from configure - s3Client *s3.S3 - dynClient *dynamodb.DynamoDB - - bucketName string - keyName string - serverSideEncryption bool - customerEncryptionKey []byte - acl string - kmsKeyID string - ddbTable string - workspaceKeyPrefix string -} - -func (b *Backend) configure(ctx context.Context) error { - if b.s3Client != nil { - return nil - } - - // Grab the resource data - data := schema.FromContextBackendConfig(ctx) - - if !data.Get("skip_region_validation").(bool) { - if err := awsbase.ValidateRegion(data.Get("region").(string)); err != nil { - return err - } - } - - b.bucketName = data.Get("bucket").(string) - b.keyName = data.Get("key").(string) - b.acl = data.Get("acl").(string) - b.workspaceKeyPrefix = data.Get("workspace_key_prefix").(string) - b.serverSideEncryption = data.Get("encrypt").(bool) - b.kmsKeyID = data.Get("kms_key_id").(string) - b.ddbTable = data.Get("dynamodb_table").(string) - - customerKeyString := data.Get("sse_customer_key").(string) - if customerKeyString != "" { - if b.kmsKeyID != "" { - return errors.New(encryptionKeyConflictError) - } - - var err error - b.customerEncryptionKey, err = base64.StdEncoding.DecodeString(customerKeyString) - if err != nil { - return fmt.Errorf("Failed to decode sse_customer_key: %s", err.Error()) - } - } - - cfg := &awsbase.Config{ - AccessKey: data.Get("access_key").(string), - AssumeRoleARN: data.Get("role_arn").(string), - AssumeRoleDurationSeconds: data.Get("assume_role_duration_seconds").(int), - AssumeRoleExternalID: data.Get("external_id").(string), - AssumeRolePolicy: data.Get("assume_role_policy").(string), - AssumeRoleSessionName: data.Get("session_name").(string), - CallerDocumentationURL: "https://www.terraform.io/docs/language/settings/backends/s3.html", - CallerName: "S3 Backend", - CredsFilename: data.Get("shared_credentials_file").(string), - DebugLogging: logging.IsDebugOrHigher(), - IamEndpoint: data.Get("iam_endpoint").(string), - MaxRetries: data.Get("max_retries").(int), - Profile: data.Get("profile").(string), - Region: data.Get("region").(string), - SecretKey: data.Get("secret_key").(string), - SkipCredsValidation: data.Get("skip_credentials_validation").(bool), - SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool), - StsEndpoint: data.Get("sts_endpoint").(string), - Token: data.Get("token").(string), - UserAgentProducts: []*awsbase.UserAgentProduct{ - {Name: "APN", Version: "1.0"}, - {Name: "HashiCorp", Version: "1.0"}, - {Name: "Terraform", Version: version.String()}, - }, - } - - if policyARNSet := data.Get("assume_role_policy_arns").(*schema.Set); policyARNSet.Len() > 0 { - for _, policyARNRaw := range policyARNSet.List() { - policyARN, ok := policyARNRaw.(string) - - if !ok { - continue - } - - cfg.AssumeRolePolicyARNs = append(cfg.AssumeRolePolicyARNs, policyARN) - } - } - - if tagMap := data.Get("assume_role_tags").(map[string]interface{}); len(tagMap) > 0 { - cfg.AssumeRoleTags = make(map[string]string) - - for k, vRaw := range tagMap { - v, ok := vRaw.(string) - - if !ok { - continue - } - - cfg.AssumeRoleTags[k] = v - } - } - - if transitiveTagKeySet := data.Get("assume_role_transitive_tag_keys").(*schema.Set); transitiveTagKeySet.Len() > 0 { - for _, transitiveTagKeyRaw := range transitiveTagKeySet.List() { - transitiveTagKey, ok := transitiveTagKeyRaw.(string) - - if !ok { - continue - } - - cfg.AssumeRoleTransitiveTagKeys = append(cfg.AssumeRoleTransitiveTagKeys, transitiveTagKey) - } - } - - sess, err := awsbase.GetSession(cfg) - if err != nil { - return fmt.Errorf("error configuring S3 Backend: %w", err) - } - - b.dynClient = dynamodb.New(sess.Copy(&aws.Config{ - Endpoint: aws.String(data.Get("dynamodb_endpoint").(string)), - })) - b.s3Client = s3.New(sess.Copy(&aws.Config{ - Endpoint: aws.String(data.Get("endpoint").(string)), - S3ForcePathStyle: aws.Bool(data.Get("force_path_style").(bool)), - })) - - return nil -} - -const encryptionKeyConflictError = `Cannot have both kms_key_id and sse_customer_key set. - -The kms_key_id is used for encryption with KMS-Managed Keys (SSE-KMS) -while sse_customer_key is used for encryption with customer-managed keys (SSE-C). -Please choose one or the other.` diff --git a/internal/backend/remote-state/s3/backend_state.go b/internal/backend/remote-state/s3/backend_state.go deleted file mode 100644 index d5505f2733c3..000000000000 --- a/internal/backend/remote-state/s3/backend_state.go +++ /dev/null @@ -1,221 +0,0 @@ -package s3 - -import ( - "errors" - "fmt" - "path" - "sort" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func (b *Backend) Workspaces() ([]string, error) { - const maxKeys = 1000 - - prefix := "" - - if b.workspaceKeyPrefix != "" { - prefix = b.workspaceKeyPrefix + "/" - } - - params := &s3.ListObjectsInput{ - Bucket: &b.bucketName, - Prefix: aws.String(prefix), - MaxKeys: aws.Int64(maxKeys), - } - - wss := []string{backend.DefaultStateName} - err := b.s3Client.ListObjectsPages(params, func(page *s3.ListObjectsOutput, lastPage bool) bool { - for _, obj := range page.Contents { - ws := b.keyEnv(*obj.Key) - if ws != "" { - wss = append(wss, ws) - } - } - return !lastPage - }) - - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == s3.ErrCodeNoSuchBucket { - return nil, fmt.Errorf(errS3NoSuchBucket, err) - } - - sort.Strings(wss[1:]) - return wss, nil -} - -func (b *Backend) keyEnv(key string) string { - prefix := b.workspaceKeyPrefix - - if prefix == "" { - parts := strings.SplitN(key, "/", 2) - if len(parts) > 1 && parts[1] == b.keyName { - return parts[0] - } else { - return "" - } - } - - // add a slash to treat this as a directory - prefix += "/" - - parts := strings.SplitAfterN(key, prefix, 2) - if len(parts) < 2 { - return "" - } - - // shouldn't happen since we listed by prefix - if parts[0] != prefix { - return "" - } - - parts = strings.SplitN(parts[1], "/", 2) - - if len(parts) < 2 { - return "" - } - - // not our key, so don't include it in our listing - if parts[1] != b.keyName { - return "" - } - - return parts[0] -} - -func (b *Backend) DeleteWorkspace(name string, _ bool) error { - if name == backend.DefaultStateName || name == "" { - return fmt.Errorf("can't delete default state") - } - - client, err := b.remoteClient(name) - if err != nil { - return err - } - - return client.Delete() -} - -// get a remote client configured for this state -func (b *Backend) remoteClient(name string) (*RemoteClient, error) { - if name == "" { - return nil, errors.New("missing state name") - } - - client := &RemoteClient{ - s3Client: b.s3Client, - dynClient: b.dynClient, - bucketName: b.bucketName, - path: b.path(name), - serverSideEncryption: b.serverSideEncryption, - customerEncryptionKey: b.customerEncryptionKey, - acl: b.acl, - kmsKeyID: b.kmsKeyID, - ddbTable: b.ddbTable, - } - - return client, nil -} - -func (b *Backend) StateMgr(name string) (statemgr.Full, error) { - client, err := b.remoteClient(name) - if err != nil { - return nil, err - } - - stateMgr := &remote.State{Client: client} - // Check to see if this state already exists. - // If we're trying to force-unlock a state, we can't take the lock before - // fetching the state. If the state doesn't exist, we have to assume this - // is a normal create operation, and take the lock at that point. - // - // If we need to force-unlock, but for some reason the state no longer - // exists, the user will have to use aws tools to manually fix the - // situation. - existing, err := b.Workspaces() - if err != nil { - return nil, err - } - - exists := false - for _, s := range existing { - if s == name { - exists = true - break - } - } - - // We need to create the object so it's listed by States. - if !exists { - // take a lock on this state while we write it - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = "init" - lockId, err := client.Lock(lockInfo) - if err != nil { - return nil, fmt.Errorf("failed to lock s3 state: %s", err) - } - - // Local helper function so we can call it multiple places - lockUnlock := func(parent error) error { - if err := stateMgr.Unlock(lockId); err != nil { - return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) - } - return parent - } - - // Grab the value - // This is to ensure that no one beat us to writing a state between - // the `exists` check and taking the lock. - if err := stateMgr.RefreshState(); err != nil { - err = lockUnlock(err) - return nil, err - } - - // If we have no state, we have to create an empty state - if v := stateMgr.State(); v == nil { - if err := stateMgr.WriteState(states.NewState()); err != nil { - err = lockUnlock(err) - return nil, err - } - if err := stateMgr.PersistState(nil); err != nil { - err = lockUnlock(err) - return nil, err - } - } - - // Unlock, the state should now be initialized - if err := lockUnlock(nil); err != nil { - return nil, err - } - - } - - return stateMgr, nil -} - -func (b *Backend) client() *RemoteClient { - return &RemoteClient{} -} - -func (b *Backend) path(name string) string { - if name == backend.DefaultStateName { - return b.keyName - } - - return path.Join(b.workspaceKeyPrefix, name, b.keyName) -} - -const errStateUnlock = ` -Error unlocking S3 state. Lock ID: %s - -Error: %s - -You may have to force-unlock this state in order to use it again. -` diff --git a/internal/backend/remote-state/s3/backend_test.go b/internal/backend/remote-state/s3/backend_test.go deleted file mode 100644 index 268c44490411..000000000000 --- a/internal/backend/remote-state/s3/backend_test.go +++ /dev/null @@ -1,795 +0,0 @@ -package s3 - -import ( - "fmt" - "net/url" - "os" - "reflect" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/s3" - awsbase "github.com/hashicorp/aws-sdk-go-base" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" -) - -var ( - mockStsGetCallerIdentityRequestBody = url.Values{ - "Action": []string{"GetCallerIdentity"}, - "Version": []string{"2011-06-15"}, - }.Encode() -) - -// verify that we are doing ACC tests or the S3 tests specifically -func testACC(t *testing.T) { - skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_S3_TEST") == "" - if skip { - t.Log("s3 backend tests require setting TF_ACC or TF_S3_TEST") - t.Skip() - } - if os.Getenv("AWS_DEFAULT_REGION") == "" { - os.Setenv("AWS_DEFAULT_REGION", "us-west-2") - } -} - -func TestBackend_impl(t *testing.T) { - var _ backend.Backend = new(Backend) -} - -func TestBackendConfig(t *testing.T) { - testACC(t) - config := map[string]interface{}{ - "region": "us-west-1", - "bucket": "tf-test", - "key": "state", - "encrypt": true, - "dynamodb_table": "dynamoTable", - } - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) - - if *b.s3Client.Config.Region != "us-west-1" { - t.Fatalf("Incorrect region was populated") - } - if b.bucketName != "tf-test" { - t.Fatalf("Incorrect bucketName was populated") - } - if b.keyName != "state" { - t.Fatalf("Incorrect keyName was populated") - } - - credentials, err := b.s3Client.Config.Credentials.Get() - if err != nil { - t.Fatalf("Error when requesting credentials") - } - if credentials.AccessKeyID == "" { - t.Fatalf("No Access Key Id was populated") - } - if credentials.SecretAccessKey == "" { - t.Fatalf("No Secret Access Key was populated") - } -} - -func TestBackendConfig_AssumeRole(t *testing.T) { - testACC(t) - - testCases := []struct { - Config map[string]interface{} - Description string - MockStsEndpoints []*awsbase.MockEndpoint - }{ - { - Config: map[string]interface{}{ - "bucket": "tf-test", - "key": "state", - "region": "us-west-1", - "role_arn": awsbase.MockStsAssumeRoleArn, - "session_name": awsbase.MockStsAssumeRoleSessionName, - }, - Description: "role_arn", - MockStsEndpoints: []*awsbase.MockEndpoint{ - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ - "Action": []string{"AssumeRole"}, - "DurationSeconds": []string{"900"}, - "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, - "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, - "Version": []string{"2011-06-15"}, - }.Encode()}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, - }, - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, - }, - }, - }, - { - Config: map[string]interface{}{ - "assume_role_duration_seconds": 3600, - "bucket": "tf-test", - "key": "state", - "region": "us-west-1", - "role_arn": awsbase.MockStsAssumeRoleArn, - "session_name": awsbase.MockStsAssumeRoleSessionName, - }, - Description: "assume_role_duration_seconds", - MockStsEndpoints: []*awsbase.MockEndpoint{ - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ - "Action": []string{"AssumeRole"}, - "DurationSeconds": []string{"3600"}, - "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, - "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, - "Version": []string{"2011-06-15"}, - }.Encode()}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, - }, - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, - }, - }, - }, - { - Config: map[string]interface{}{ - "bucket": "tf-test", - "external_id": awsbase.MockStsAssumeRoleExternalId, - "key": "state", - "region": "us-west-1", - "role_arn": awsbase.MockStsAssumeRoleArn, - "session_name": awsbase.MockStsAssumeRoleSessionName, - }, - Description: "external_id", - MockStsEndpoints: []*awsbase.MockEndpoint{ - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ - "Action": []string{"AssumeRole"}, - "DurationSeconds": []string{"900"}, - "ExternalId": []string{awsbase.MockStsAssumeRoleExternalId}, - "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, - "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, - "Version": []string{"2011-06-15"}, - }.Encode()}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, - }, - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, - }, - }, - }, - { - Config: map[string]interface{}{ - "assume_role_policy": awsbase.MockStsAssumeRolePolicy, - "bucket": "tf-test", - "key": "state", - "region": "us-west-1", - "role_arn": awsbase.MockStsAssumeRoleArn, - "session_name": awsbase.MockStsAssumeRoleSessionName, - }, - Description: "assume_role_policy", - MockStsEndpoints: []*awsbase.MockEndpoint{ - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ - "Action": []string{"AssumeRole"}, - "DurationSeconds": []string{"900"}, - "Policy": []string{awsbase.MockStsAssumeRolePolicy}, - "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, - "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, - "Version": []string{"2011-06-15"}, - }.Encode()}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, - }, - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, - }, - }, - }, - { - Config: map[string]interface{}{ - "assume_role_policy_arns": []interface{}{awsbase.MockStsAssumeRolePolicyArn}, - "bucket": "tf-test", - "key": "state", - "region": "us-west-1", - "role_arn": awsbase.MockStsAssumeRoleArn, - "session_name": awsbase.MockStsAssumeRoleSessionName, - }, - Description: "assume_role_policy_arns", - MockStsEndpoints: []*awsbase.MockEndpoint{ - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ - "Action": []string{"AssumeRole"}, - "DurationSeconds": []string{"900"}, - "PolicyArns.member.1.arn": []string{awsbase.MockStsAssumeRolePolicyArn}, - "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, - "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, - "Version": []string{"2011-06-15"}, - }.Encode()}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, - }, - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, - }, - }, - }, - { - Config: map[string]interface{}{ - "assume_role_tags": map[string]interface{}{ - awsbase.MockStsAssumeRoleTagKey: awsbase.MockStsAssumeRoleTagValue, - }, - "bucket": "tf-test", - "key": "state", - "region": "us-west-1", - "role_arn": awsbase.MockStsAssumeRoleArn, - "session_name": awsbase.MockStsAssumeRoleSessionName, - }, - Description: "assume_role_tags", - MockStsEndpoints: []*awsbase.MockEndpoint{ - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ - "Action": []string{"AssumeRole"}, - "DurationSeconds": []string{"900"}, - "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, - "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, - "Tags.member.1.Key": []string{awsbase.MockStsAssumeRoleTagKey}, - "Tags.member.1.Value": []string{awsbase.MockStsAssumeRoleTagValue}, - "Version": []string{"2011-06-15"}, - }.Encode()}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, - }, - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, - }, - }, - }, - { - Config: map[string]interface{}{ - "assume_role_tags": map[string]interface{}{ - awsbase.MockStsAssumeRoleTagKey: awsbase.MockStsAssumeRoleTagValue, - }, - "assume_role_transitive_tag_keys": []interface{}{awsbase.MockStsAssumeRoleTagKey}, - "bucket": "tf-test", - "key": "state", - "region": "us-west-1", - "role_arn": awsbase.MockStsAssumeRoleArn, - "session_name": awsbase.MockStsAssumeRoleSessionName, - }, - Description: "assume_role_transitive_tag_keys", - MockStsEndpoints: []*awsbase.MockEndpoint{ - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ - "Action": []string{"AssumeRole"}, - "DurationSeconds": []string{"900"}, - "RoleArn": []string{awsbase.MockStsAssumeRoleArn}, - "RoleSessionName": []string{awsbase.MockStsAssumeRoleSessionName}, - "Tags.member.1.Key": []string{awsbase.MockStsAssumeRoleTagKey}, - "Tags.member.1.Value": []string{awsbase.MockStsAssumeRoleTagValue}, - "TransitiveTagKeys.member.1": []string{awsbase.MockStsAssumeRoleTagKey}, - "Version": []string{"2011-06-15"}, - }.Encode()}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, - }, - { - Request: &awsbase.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, - Response: &awsbase.MockResponse{StatusCode: 200, Body: awsbase.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, - }, - }, - }, - } - - for _, testCase := range testCases { - testCase := testCase - - t.Run(testCase.Description, func(t *testing.T) { - closeSts, mockStsSession, err := awsbase.GetMockedAwsApiSession("STS", testCase.MockStsEndpoints) - defer closeSts() - - if err != nil { - t.Fatalf("unexpected error creating mock STS server: %s", err) - } - - if mockStsSession != nil && mockStsSession.Config != nil { - testCase.Config["sts_endpoint"] = aws.StringValue(mockStsSession.Config.Endpoint) - } - - diags := New().Configure(hcl2shim.HCL2ValueFromConfigValue(testCase.Config)) - - if diags.HasErrors() { - for _, diag := range diags { - t.Errorf("unexpected error: %s", diag.Description().Summary) - } - } - }) - } -} - -func TestBackendConfig_invalidKey(t *testing.T) { - testACC(t) - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ - "region": "us-west-1", - "bucket": "tf-test", - "key": "/leading-slash", - "encrypt": true, - "dynamodb_table": "dynamoTable", - }) - - _, diags := New().PrepareConfig(cfg) - if !diags.HasErrors() { - t.Fatal("expected config validation error") - } - - cfg = hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ - "region": "us-west-1", - "bucket": "tf-test", - "key": "trailing-slash/", - "encrypt": true, - "dynamodb_table": "dynamoTable", - }) - - _, diags = New().PrepareConfig(cfg) - if !diags.HasErrors() { - t.Fatal("expected config validation error") - } -} - -func TestBackendConfig_invalidSSECustomerKeyLength(t *testing.T) { - testACC(t) - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ - "region": "us-west-1", - "bucket": "tf-test", - "encrypt": true, - "key": "state", - "dynamodb_table": "dynamoTable", - "sse_customer_key": "key", - }) - - _, diags := New().PrepareConfig(cfg) - if !diags.HasErrors() { - t.Fatal("expected error for invalid sse_customer_key length") - } -} - -func TestBackendConfig_invalidSSECustomerKeyEncoding(t *testing.T) { - testACC(t) - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ - "region": "us-west-1", - "bucket": "tf-test", - "encrypt": true, - "key": "state", - "dynamodb_table": "dynamoTable", - "sse_customer_key": "====CT70aTYB2JGff7AjQtwbiLkwH4npICay1PWtmdka", - }) - - diags := New().Configure(cfg) - if !diags.HasErrors() { - t.Fatal("expected error for failing to decode sse_customer_key") - } -} - -func TestBackendConfig_conflictingEncryptionSchema(t *testing.T) { - testACC(t) - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ - "region": "us-west-1", - "bucket": "tf-test", - "key": "state", - "encrypt": true, - "dynamodb_table": "dynamoTable", - "sse_customer_key": "1hwbcNPGWL+AwDiyGmRidTWAEVmCWMKbEHA+Es8w75o=", - "kms_key_id": "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", - }) - - diags := New().Configure(cfg) - if !diags.HasErrors() { - t.Fatal("expected error for simultaneous usage of kms_key_id and sse_customer_key") - } -} - -func TestBackend(t *testing.T) { - testACC(t) - - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - keyName := "testState" - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - })).(*Backend) - - createS3Bucket(t, b.s3Client, bucketName) - defer deleteS3Bucket(t, b.s3Client, bucketName) - - backend.TestBackendStates(t, b) -} - -func TestBackendLocked(t *testing.T) { - testACC(t) - - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - keyName := "test/state" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - "dynamodb_table": bucketName, - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - "dynamodb_table": bucketName, - })).(*Backend) - - createS3Bucket(t, b1.s3Client, bucketName) - defer deleteS3Bucket(t, b1.s3Client, bucketName) - createDynamoDBTable(t, b1.dynClient, bucketName) - defer deleteDynamoDBTable(t, b1.dynClient, bucketName) - - backend.TestBackendStateLocks(t, b1, b2) - backend.TestBackendStateForceUnlock(t, b1, b2) -} - -func TestBackendSSECustomerKey(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "encrypt": true, - "key": "test-SSE-C", - "sse_customer_key": "4Dm1n4rphuFgawxuzY/bEfvLf6rYK0gIjfaDSLlfXNk=", - })).(*Backend) - - createS3Bucket(t, b.s3Client, bucketName) - defer deleteS3Bucket(t, b.s3Client, bucketName) - - backend.TestBackendStates(t, b) -} - -// add some extra junk in S3 to try and confuse the env listing. -func TestBackendExtraPaths(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - keyName := "test/state/tfstate" - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - })).(*Backend) - - createS3Bucket(t, b.s3Client, bucketName) - defer deleteS3Bucket(t, b.s3Client, bucketName) - - // put multiple states in old env paths. - s1 := states.NewState() - s2 := states.NewState() - - // RemoteClient to Put things in various paths - client := &RemoteClient{ - s3Client: b.s3Client, - dynClient: b.dynClient, - bucketName: b.bucketName, - path: b.path("s1"), - serverSideEncryption: b.serverSideEncryption, - acl: b.acl, - kmsKeyID: b.kmsKeyID, - ddbTable: b.ddbTable, - } - - // Write the first state - stateMgr := &remote.State{Client: client} - stateMgr.WriteState(s1) - if err := stateMgr.PersistState(nil); err != nil { - t.Fatal(err) - } - - // Write the second state - // Note a new state manager - otherwise, because these - // states are equal, the state will not Put to the remote - client.path = b.path("s2") - stateMgr2 := &remote.State{Client: client} - stateMgr2.WriteState(s2) - if err := stateMgr2.PersistState(nil); err != nil { - t.Fatal(err) - } - - s2Lineage := stateMgr2.StateSnapshotMeta().Lineage - - if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { - t.Fatal(err) - } - - // put a state in an env directory name - client.path = b.workspaceKeyPrefix + "/error" - stateMgr.WriteState(states.NewState()) - if err := stateMgr.PersistState(nil); err != nil { - t.Fatal(err) - } - if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { - t.Fatal(err) - } - - // add state with the wrong key for an existing env - client.path = b.workspaceKeyPrefix + "/s2/notTestState" - stateMgr.WriteState(states.NewState()) - if err := stateMgr.PersistState(nil); err != nil { - t.Fatal(err) - } - if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { - t.Fatal(err) - } - - // remove the state with extra subkey - if err := client.Delete(); err != nil { - t.Fatal(err) - } - - // delete the real workspace - if err := b.DeleteWorkspace("s2", true); err != nil { - t.Fatal(err) - } - - if err := checkStateList(b, []string{"default", "s1"}); err != nil { - t.Fatal(err) - } - - // fetch that state again, which should produce a new lineage - s2Mgr, err := b.StateMgr("s2") - if err != nil { - t.Fatal(err) - } - if err := s2Mgr.RefreshState(); err != nil { - t.Fatal(err) - } - - if s2Mgr.(*remote.State).StateSnapshotMeta().Lineage == s2Lineage { - t.Fatal("state s2 was not deleted") - } - s2 = s2Mgr.State() - s2Lineage = stateMgr.StateSnapshotMeta().Lineage - - // add a state with a key that matches an existing environment dir name - client.path = b.workspaceKeyPrefix + "/s2/" - stateMgr.WriteState(states.NewState()) - if err := stateMgr.PersistState(nil); err != nil { - t.Fatal(err) - } - - // make sure s2 is OK - s2Mgr, err = b.StateMgr("s2") - if err != nil { - t.Fatal(err) - } - if err := s2Mgr.RefreshState(); err != nil { - t.Fatal(err) - } - - if stateMgr.StateSnapshotMeta().Lineage != s2Lineage { - t.Fatal("we got the wrong state for s2") - } - - if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { - t.Fatal(err) - } -} - -// ensure we can separate the workspace prefix when it also matches the prefix -// of the workspace name itself. -func TestBackendPrefixInWorkspace(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": "test-env.tfstate", - "workspace_key_prefix": "env", - })).(*Backend) - - createS3Bucket(t, b.s3Client, bucketName) - defer deleteS3Bucket(t, b.s3Client, bucketName) - - // get a state that contains the prefix as a substring - sMgr, err := b.StateMgr("env-1") - if err != nil { - t.Fatal(err) - } - if err := sMgr.RefreshState(); err != nil { - t.Fatal(err) - } - - if err := checkStateList(b, []string{"default", "env-1"}); err != nil { - t.Fatal(err) - } -} - -func TestKeyEnv(t *testing.T) { - testACC(t) - keyName := "some/paths/tfstate" - - bucket0Name := fmt.Sprintf("terraform-remote-s3-test-%x-0", time.Now().Unix()) - b0 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucket0Name, - "key": keyName, - "encrypt": true, - "workspace_key_prefix": "", - })).(*Backend) - - createS3Bucket(t, b0.s3Client, bucket0Name) - defer deleteS3Bucket(t, b0.s3Client, bucket0Name) - - bucket1Name := fmt.Sprintf("terraform-remote-s3-test-%x-1", time.Now().Unix()) - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucket1Name, - "key": keyName, - "encrypt": true, - "workspace_key_prefix": "project/env:", - })).(*Backend) - - createS3Bucket(t, b1.s3Client, bucket1Name) - defer deleteS3Bucket(t, b1.s3Client, bucket1Name) - - bucket2Name := fmt.Sprintf("terraform-remote-s3-test-%x-2", time.Now().Unix()) - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucket2Name, - "key": keyName, - "encrypt": true, - })).(*Backend) - - createS3Bucket(t, b2.s3Client, bucket2Name) - defer deleteS3Bucket(t, b2.s3Client, bucket2Name) - - if err := testGetWorkspaceForKey(b0, "some/paths/tfstate", ""); err != nil { - t.Fatal(err) - } - - if err := testGetWorkspaceForKey(b0, "ws1/some/paths/tfstate", "ws1"); err != nil { - t.Fatal(err) - } - - if err := testGetWorkspaceForKey(b1, "project/env:/ws1/some/paths/tfstate", "ws1"); err != nil { - t.Fatal(err) - } - - if err := testGetWorkspaceForKey(b1, "project/env:/ws2/some/paths/tfstate", "ws2"); err != nil { - t.Fatal(err) - } - - if err := testGetWorkspaceForKey(b2, "env:/ws3/some/paths/tfstate", "ws3"); err != nil { - t.Fatal(err) - } - - backend.TestBackendStates(t, b0) - backend.TestBackendStates(t, b1) - backend.TestBackendStates(t, b2) -} - -func testGetWorkspaceForKey(b *Backend, key string, expected string) error { - if actual := b.keyEnv(key); actual != expected { - return fmt.Errorf("incorrect workspace for key[%q]. Expected[%q]: Actual[%q]", key, expected, actual) - } - return nil -} - -func checkStateList(b backend.Backend, expected []string) error { - states, err := b.Workspaces() - if err != nil { - return err - } - - if !reflect.DeepEqual(states, expected) { - return fmt.Errorf("incorrect states listed: %q", states) - } - return nil -} - -func createS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) { - createBucketReq := &s3.CreateBucketInput{ - Bucket: &bucketName, - } - - // Be clear about what we're doing in case the user needs to clean - // this up later. - t.Logf("creating S3 bucket %s in %s", bucketName, *s3Client.Config.Region) - _, err := s3Client.CreateBucket(createBucketReq) - if err != nil { - t.Fatal("failed to create test S3 bucket:", err) - } -} - -func deleteS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) { - warning := "WARNING: Failed to delete the test S3 bucket. It may have been left in your AWS account and may incur storage charges. (error was %s)" - - // first we have to get rid of the env objects, or we can't delete the bucket - resp, err := s3Client.ListObjects(&s3.ListObjectsInput{Bucket: &bucketName}) - if err != nil { - t.Logf(warning, err) - return - } - for _, obj := range resp.Contents { - if _, err := s3Client.DeleteObject(&s3.DeleteObjectInput{Bucket: &bucketName, Key: obj.Key}); err != nil { - // this will need cleanup no matter what, so just warn and exit - t.Logf(warning, err) - return - } - } - - if _, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{Bucket: &bucketName}); err != nil { - t.Logf(warning, err) - } -} - -// create the dynamoDB table, and wait until we can query it. -func createDynamoDBTable(t *testing.T, dynClient *dynamodb.DynamoDB, tableName string) { - createInput := &dynamodb.CreateTableInput{ - AttributeDefinitions: []*dynamodb.AttributeDefinition{ - { - AttributeName: aws.String("LockID"), - AttributeType: aws.String("S"), - }, - }, - KeySchema: []*dynamodb.KeySchemaElement{ - { - AttributeName: aws.String("LockID"), - KeyType: aws.String("HASH"), - }, - }, - ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ - ReadCapacityUnits: aws.Int64(5), - WriteCapacityUnits: aws.Int64(5), - }, - TableName: aws.String(tableName), - } - - _, err := dynClient.CreateTable(createInput) - if err != nil { - t.Fatal(err) - } - - // now wait until it's ACTIVE - start := time.Now() - time.Sleep(time.Second) - - describeInput := &dynamodb.DescribeTableInput{ - TableName: aws.String(tableName), - } - - for { - resp, err := dynClient.DescribeTable(describeInput) - if err != nil { - t.Fatal(err) - } - - if *resp.Table.TableStatus == "ACTIVE" { - return - } - - if time.Since(start) > time.Minute { - t.Fatalf("timed out creating DynamoDB table %s", tableName) - } - - time.Sleep(3 * time.Second) - } - -} - -func deleteDynamoDBTable(t *testing.T, dynClient *dynamodb.DynamoDB, tableName string) { - params := &dynamodb.DeleteTableInput{ - TableName: aws.String(tableName), - } - _, err := dynClient.DeleteTable(params) - if err != nil { - t.Logf("WARNING: Failed to delete the test DynamoDB table %q. It has been left in your AWS account and may incur charges. (error was %s)", tableName, err) - } -} diff --git a/internal/backend/remote-state/s3/client.go b/internal/backend/remote-state/s3/client.go deleted file mode 100644 index 75e89a616a37..000000000000 --- a/internal/backend/remote-state/s3/client.go +++ /dev/null @@ -1,422 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/s3" - multierror "github.com/hashicorp/go-multierror" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -// Store the last saved serial in dynamo with this suffix for consistency checks. -const ( - s3EncryptionAlgorithm = "AES256" - stateIDSuffix = "-md5" - s3ErrCodeInternalError = "InternalError" -) - -type RemoteClient struct { - s3Client *s3.S3 - dynClient *dynamodb.DynamoDB - bucketName string - path string - serverSideEncryption bool - customerEncryptionKey []byte - acl string - kmsKeyID string - ddbTable string -} - -var ( - // The amount of time we will retry a state waiting for it to match the - // expected checksum. - consistencyRetryTimeout = 10 * time.Second - - // delay when polling the state - consistencyRetryPollInterval = 2 * time.Second -) - -// test hook called when checksums don't match -var testChecksumHook func() - -func (c *RemoteClient) Get() (payload *remote.Payload, err error) { - deadline := time.Now().Add(consistencyRetryTimeout) - - // If we have a checksum, and the returned payload doesn't match, we retry - // up until deadline. - for { - payload, err = c.get() - if err != nil { - return nil, err - } - - // If the remote state was manually removed the payload will be nil, - // but if there's still a digest entry for that state we will still try - // to compare the MD5 below. - var digest []byte - if payload != nil { - digest = payload.MD5 - } - - // verify that this state is what we expect - if expected, err := c.getMD5(); err != nil { - log.Printf("[WARN] failed to fetch state md5: %s", err) - } else if len(expected) > 0 && !bytes.Equal(expected, digest) { - log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) - - if testChecksumHook != nil { - testChecksumHook() - } - - if time.Now().Before(deadline) { - time.Sleep(consistencyRetryPollInterval) - log.Println("[INFO] retrying S3 RemoteClient.Get...") - continue - } - - return nil, fmt.Errorf(errBadChecksumFmt, digest) - } - - break - } - - return payload, err -} - -func (c *RemoteClient) get() (*remote.Payload, error) { - var output *s3.GetObjectOutput - var err error - - input := &s3.GetObjectInput{ - Bucket: &c.bucketName, - Key: &c.path, - } - - if c.serverSideEncryption && c.customerEncryptionKey != nil { - input.SetSSECustomerKey(string(c.customerEncryptionKey)) - input.SetSSECustomerAlgorithm(s3EncryptionAlgorithm) - input.SetSSECustomerKeyMD5(c.getSSECustomerKeyMD5()) - } - - output, err = c.s3Client.GetObject(input) - - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - switch awserr.Code() { - case s3.ErrCodeNoSuchBucket: - return nil, fmt.Errorf(errS3NoSuchBucket, err) - case s3.ErrCodeNoSuchKey: - return nil, nil - } - } - return nil, err - } - - defer output.Body.Close() - - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, output.Body); err != nil { - return nil, fmt.Errorf("Failed to read remote state: %s", err) - } - - sum := md5.Sum(buf.Bytes()) - payload := &remote.Payload{ - Data: buf.Bytes(), - MD5: sum[:], - } - - // If there was no data, then return nil - if len(payload.Data) == 0 { - return nil, nil - } - - return payload, nil -} - -func (c *RemoteClient) Put(data []byte) error { - contentType := "application/json" - contentLength := int64(len(data)) - - i := &s3.PutObjectInput{ - ContentType: &contentType, - ContentLength: &contentLength, - Body: bytes.NewReader(data), - Bucket: &c.bucketName, - Key: &c.path, - } - - if c.serverSideEncryption { - if c.kmsKeyID != "" { - i.SSEKMSKeyId = &c.kmsKeyID - i.ServerSideEncryption = aws.String("aws:kms") - } else if c.customerEncryptionKey != nil { - i.SetSSECustomerKey(string(c.customerEncryptionKey)) - i.SetSSECustomerAlgorithm(s3EncryptionAlgorithm) - i.SetSSECustomerKeyMD5(c.getSSECustomerKeyMD5()) - } else { - i.ServerSideEncryption = aws.String(s3EncryptionAlgorithm) - } - } - - if c.acl != "" { - i.ACL = aws.String(c.acl) - } - - log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) - - _, err := c.s3Client.PutObject(i) - if err != nil { - return fmt.Errorf("failed to upload state: %s", err) - } - - sum := md5.Sum(data) - if err := c.putMD5(sum[:]); err != nil { - // if this errors out, we unfortunately have to error out altogether, - // since the next Get will inevitably fail. - return fmt.Errorf("failed to store state MD5: %s", err) - - } - - return nil -} - -func (c *RemoteClient) Delete() error { - _, err := c.s3Client.DeleteObject(&s3.DeleteObjectInput{ - Bucket: &c.bucketName, - Key: &c.path, - }) - - if err != nil { - return err - } - - if err := c.deleteMD5(); err != nil { - log.Printf("error deleting state md5: %s", err) - } - - return nil -} - -func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { - if c.ddbTable == "" { - return "", nil - } - - info.Path = c.lockPath() - - if info.ID == "" { - lockID, err := uuid.GenerateUUID() - if err != nil { - return "", err - } - - info.ID = lockID - } - - putParams := &dynamodb.PutItemInput{ - Item: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath())}, - "Info": {S: aws.String(string(info.Marshal()))}, - }, - TableName: aws.String(c.ddbTable), - ConditionExpression: aws.String("attribute_not_exists(LockID)"), - } - _, err := c.dynClient.PutItem(putParams) - - if err != nil { - lockInfo, infoErr := c.getLockInfo() - if infoErr != nil { - err = multierror.Append(err, infoErr) - } - - lockErr := &statemgr.LockError{ - Err: err, - Info: lockInfo, - } - return "", lockErr - } - - return info.ID, nil -} - -func (c *RemoteClient) getMD5() ([]byte, error) { - if c.ddbTable == "" { - return nil, nil - } - - getParams := &dynamodb.GetItemInput{ - Key: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, - }, - ProjectionExpression: aws.String("LockID, Digest"), - TableName: aws.String(c.ddbTable), - ConsistentRead: aws.Bool(true), - } - - resp, err := c.dynClient.GetItem(getParams) - if err != nil { - return nil, err - } - - var val string - if v, ok := resp.Item["Digest"]; ok && v.S != nil { - val = *v.S - } - - sum, err := hex.DecodeString(val) - if err != nil || len(sum) != md5.Size { - return nil, errors.New("invalid md5") - } - - return sum, nil -} - -// store the hash of the state so that clients can check for stale state files. -func (c *RemoteClient) putMD5(sum []byte) error { - if c.ddbTable == "" { - return nil - } - - if len(sum) != md5.Size { - return errors.New("invalid payload md5") - } - - putParams := &dynamodb.PutItemInput{ - Item: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, - "Digest": {S: aws.String(hex.EncodeToString(sum))}, - }, - TableName: aws.String(c.ddbTable), - } - _, err := c.dynClient.PutItem(putParams) - if err != nil { - log.Printf("[WARN] failed to record state serial in dynamodb: %s", err) - } - - return nil -} - -// remove the hash value for a deleted state -func (c *RemoteClient) deleteMD5() error { - if c.ddbTable == "" { - return nil - } - - params := &dynamodb.DeleteItemInput{ - Key: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, - }, - TableName: aws.String(c.ddbTable), - } - if _, err := c.dynClient.DeleteItem(params); err != nil { - return err - } - return nil -} - -func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { - getParams := &dynamodb.GetItemInput{ - Key: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath())}, - }, - ProjectionExpression: aws.String("LockID, Info"), - TableName: aws.String(c.ddbTable), - ConsistentRead: aws.Bool(true), - } - - resp, err := c.dynClient.GetItem(getParams) - if err != nil { - return nil, err - } - - var infoData string - if v, ok := resp.Item["Info"]; ok && v.S != nil { - infoData = *v.S - } - - lockInfo := &statemgr.LockInfo{} - err = json.Unmarshal([]byte(infoData), lockInfo) - if err != nil { - return nil, err - } - - return lockInfo, nil -} - -func (c *RemoteClient) Unlock(id string) error { - if c.ddbTable == "" { - return nil - } - - lockErr := &statemgr.LockError{} - - // TODO: store the path and lock ID in separate fields, and have proper - // projection expression only delete the lock if both match, rather than - // checking the ID from the info field first. - lockInfo, err := c.getLockInfo() - if err != nil { - lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) - return lockErr - } - lockErr.Info = lockInfo - - if lockInfo.ID != id { - lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) - return lockErr - } - - params := &dynamodb.DeleteItemInput{ - Key: map[string]*dynamodb.AttributeValue{ - "LockID": {S: aws.String(c.lockPath())}, - }, - TableName: aws.String(c.ddbTable), - } - _, err = c.dynClient.DeleteItem(params) - - if err != nil { - lockErr.Err = err - return lockErr - } - return nil -} - -func (c *RemoteClient) lockPath() string { - return fmt.Sprintf("%s/%s", c.bucketName, c.path) -} - -func (c *RemoteClient) getSSECustomerKeyMD5() string { - b := md5.Sum(c.customerEncryptionKey) - return base64.StdEncoding.EncodeToString(b[:]) -} - -const errBadChecksumFmt = `state data in S3 does not have the expected content. - -This may be caused by unusually long delays in S3 processing a previous state -update. Please wait for a minute or two and try again. If this problem -persists, and neither S3 nor DynamoDB are experiencing an outage, you may need -to manually verify the remote state and update the Digest value stored in the -DynamoDB table to the following value: %x -` - -const errS3NoSuchBucket = `S3 bucket does not exist. - -The referenced S3 bucket must have been previously created. If the S3 bucket -was created within the last minute, please wait for a minute or two and try -again. - -Error: %s -` diff --git a/internal/backend/remote-state/s3/client_test.go b/internal/backend/remote-state/s3/client_test.go deleted file mode 100644 index abbd4257c10f..000000000000 --- a/internal/backend/remote-state/s3/client_test.go +++ /dev/null @@ -1,317 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func TestRemoteClient_impl(t *testing.T) { - var _ remote.Client = new(RemoteClient) - var _ remote.ClientLocker = new(RemoteClient) -} - -func TestRemoteClient(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - keyName := "testState" - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - })).(*Backend) - - createS3Bucket(t, b.s3Client, bucketName) - defer deleteS3Bucket(t, b.s3Client, bucketName) - - state, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestClient(t, state.(*remote.State).Client) -} - -func TestRemoteClientLocks(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - keyName := "testState" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - "dynamodb_table": bucketName, - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - "dynamodb_table": bucketName, - })).(*Backend) - - createS3Bucket(t, b1.s3Client, bucketName) - defer deleteS3Bucket(t, b1.s3Client, bucketName) - createDynamoDBTable(t, b1.dynClient, bucketName) - defer deleteDynamoDBTable(t, b1.dynClient, bucketName) - - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) -} - -// verify that we can unlock a state with an existing lock -func TestForceUnlock(t *testing.T) { - testACC(t) - bucketName := fmt.Sprintf("terraform-remote-s3-test-force-%x", time.Now().Unix()) - keyName := "testState" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - "dynamodb_table": bucketName, - })).(*Backend) - - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "encrypt": true, - "dynamodb_table": bucketName, - })).(*Backend) - - createS3Bucket(t, b1.s3Client, bucketName) - defer deleteS3Bucket(t, b1.s3Client, bucketName) - createDynamoDBTable(t, b1.dynClient, bucketName) - defer deleteDynamoDBTable(t, b1.dynClient, bucketName) - - // first test with default - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - info := statemgr.NewLockInfo() - info.Operation = "test" - info.Who = "clientA" - - lockID, err := s1.Lock(info) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - // s1 is now locked, get the same state through s2 and unlock it - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal("failed to get default state to force unlock:", err) - } - - if err := s2.Unlock(lockID); err != nil { - t.Fatal("failed to force-unlock default state") - } - - // now try the same thing with a named state - // first test with default - s1, err = b1.StateMgr("test") - if err != nil { - t.Fatal(err) - } - - info = statemgr.NewLockInfo() - info.Operation = "test" - info.Who = "clientA" - - lockID, err = s1.Lock(info) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - // s1 is now locked, get the same state through s2 and unlock it - s2, err = b2.StateMgr("test") - if err != nil { - t.Fatal("failed to get named state to force unlock:", err) - } - - if err = s2.Unlock(lockID); err != nil { - t.Fatal("failed to force-unlock named state") - } -} - -func TestRemoteClient_clientMD5(t *testing.T) { - testACC(t) - - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - keyName := "testState" - - b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "dynamodb_table": bucketName, - })).(*Backend) - - createS3Bucket(t, b.s3Client, bucketName) - defer deleteS3Bucket(t, b.s3Client, bucketName) - createDynamoDBTable(t, b.dynClient, bucketName) - defer deleteDynamoDBTable(t, b.dynClient, bucketName) - - s, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - client := s.(*remote.State).Client.(*RemoteClient) - - sum := md5.Sum([]byte("test")) - - if err := client.putMD5(sum[:]); err != nil { - t.Fatal(err) - } - - getSum, err := client.getMD5() - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(getSum, sum[:]) { - t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum) - } - - if err := client.deleteMD5(); err != nil { - t.Fatal(err) - } - - if getSum, err := client.getMD5(); err == nil { - t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum) - } -} - -// verify that a client won't return a state with an incorrect checksum. -func TestRemoteClient_stateChecksum(t *testing.T) { - testACC(t) - - bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - keyName := "testState" - - b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - "dynamodb_table": bucketName, - })).(*Backend) - - createS3Bucket(t, b1.s3Client, bucketName) - defer deleteS3Bucket(t, b1.s3Client, bucketName) - createDynamoDBTable(t, b1.dynClient, bucketName) - defer deleteDynamoDBTable(t, b1.dynClient, bucketName) - - s1, err := b1.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - client1 := s1.(*remote.State).Client - - // create an old and new state version to persist - s := statemgr.TestFullInitialState() - sf := &statefile.File{State: s} - var oldState bytes.Buffer - if err := statefile.Write(sf, &oldState); err != nil { - t.Fatal(err) - } - sf.Serial++ - var newState bytes.Buffer - if err := statefile.Write(sf, &newState); err != nil { - t.Fatal(err) - } - - // Use b2 without a dynamodb_table to bypass the lock table to write the state directly. - // client2 will write the "incorrect" state, simulating s3 eventually consistency delays - b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "bucket": bucketName, - "key": keyName, - })).(*Backend) - s2, err := b2.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - client2 := s2.(*remote.State).Client - - // write the new state through client2 so that there is no checksum yet - if err := client2.Put(newState.Bytes()); err != nil { - t.Fatal(err) - } - - // verify that we can pull a state without a checksum - if _, err := client1.Get(); err != nil { - t.Fatal(err) - } - - // write the new state back with its checksum - if err := client1.Put(newState.Bytes()); err != nil { - t.Fatal(err) - } - - // put an empty state in place to check for panics during get - if err := client2.Put([]byte{}); err != nil { - t.Fatal(err) - } - - // remove the timeouts so we can fail immediately - origTimeout := consistencyRetryTimeout - origInterval := consistencyRetryPollInterval - defer func() { - consistencyRetryTimeout = origTimeout - consistencyRetryPollInterval = origInterval - }() - consistencyRetryTimeout = 0 - consistencyRetryPollInterval = 0 - - // fetching an empty state through client1 should now error out due to a - // mismatched checksum. - if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { - t.Fatalf("expected state checksum error: got %s", err) - } - - // put the old state in place of the new, without updating the checksum - if err := client2.Put(oldState.Bytes()); err != nil { - t.Fatal(err) - } - - // fetching the wrong state through client1 should now error out due to a - // mismatched checksum. - if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { - t.Fatalf("expected state checksum error: got %s", err) - } - - // update the state with the correct one after we Get again - testChecksumHook = func() { - if err := client2.Put(newState.Bytes()); err != nil { - t.Fatal(err) - } - testChecksumHook = nil - } - - consistencyRetryTimeout = origTimeout - - // this final Get will fail to fail the checksum verification, the above - // callback will update the state with the correct version, and Get should - // retry automatically. - if _, err := client1.Get(); err != nil { - t.Fatal(err) - } -} diff --git a/internal/backend/remote/backend.go b/internal/backend/remote/backend.go deleted file mode 100644 index a26415fdaf6e..000000000000 --- a/internal/backend/remote/backend.go +++ /dev/null @@ -1,1079 +0,0 @@ -package remote - -import ( - "context" - "fmt" - "log" - "net/http" - "net/url" - "os" - "sort" - "strings" - "sync" - "time" - - tfe "github.com/hashicorp/go-tfe" - version "github.com/hashicorp/go-version" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - - backendLocal "github.com/hashicorp/terraform/internal/backend/local" -) - -const ( - defaultHostname = "app.terraform.io" - defaultParallelism = 10 - stateServiceID = "state.v2" - tfeServiceID = "tfe.v2.1" - genericHostname = "localterraform.com" -) - -// Remote is an implementation of EnhancedBackend that performs all -// operations in a remote backend. -type Remote struct { - // CLI and Colorize control the CLI output. If CLI is nil then no CLI - // output will be done. If CLIColor is nil then no coloring will be done. - CLI cli.Ui - CLIColor *colorstring.Colorize - - // ContextOpts are the base context options to set when initializing a - // new Terraform context. Many of these will be overridden or merged by - // Operation. See Operation for more details. - ContextOpts *terraform.ContextOpts - - // client is the remote backend API client. - client *tfe.Client - - // lastRetry is set to the last time a request was retried. - lastRetry time.Time - - // hostname of the remote backend server. - hostname string - - // organization is the organization that contains the target workspaces. - organization string - - // workspace is used to map the default workspace to a remote workspace. - workspace string - - // prefix is used to filter down a set of workspaces that use a single - // configuration. - prefix string - - // services is used for service discovery - services *disco.Disco - - // local, if non-nil, will be used for all enhanced behavior. This - // allows local behavior with the remote backend functioning as remote - // state storage backend. - local backend.Enhanced - - // forceLocal, if true, will force the use of the local backend. - forceLocal bool - - // opLock locks operations - opLock sync.Mutex - - // ignoreVersionConflict, if true, will disable the requirement that the - // local Terraform version matches the remote workspace's configured - // version. This will also cause VerifyWorkspaceTerraformVersion to return - // a warning diagnostic instead of an error. - ignoreVersionConflict bool -} - -var _ backend.Backend = (*Remote)(nil) -var _ backend.Enhanced = (*Remote)(nil) -var _ backend.Local = (*Remote)(nil) - -// New creates a new initialized remote backend. -func New(services *disco.Disco) *Remote { - return &Remote{ - services: services, - } -} - -// ConfigSchema implements backend.Enhanced. -func (b *Remote) ConfigSchema() *configschema.Block { - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "hostname": { - Type: cty.String, - Optional: true, - Description: schemaDescriptions["hostname"], - }, - "organization": { - Type: cty.String, - Required: true, - Description: schemaDescriptions["organization"], - }, - "token": { - Type: cty.String, - Optional: true, - Description: schemaDescriptions["token"], - }, - }, - - BlockTypes: map[string]*configschema.NestedBlock{ - "workspaces": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": { - Type: cty.String, - Optional: true, - Description: schemaDescriptions["name"], - }, - "prefix": { - Type: cty.String, - Optional: true, - Description: schemaDescriptions["prefix"], - }, - }, - }, - Nesting: configschema.NestingSingle, - }, - }, - } -} - -// PrepareConfig implements backend.Backend. -func (b *Remote) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - if obj.IsNull() { - return obj, diags - } - - if val := obj.GetAttr("organization"); val.IsNull() || val.AsString() == "" { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid organization value", - `The "organization" attribute value must not be empty.`, - cty.Path{cty.GetAttrStep{Name: "organization"}}, - )) - } - - var name, prefix string - if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { - if val := workspaces.GetAttr("name"); !val.IsNull() { - name = val.AsString() - } - if val := workspaces.GetAttr("prefix"); !val.IsNull() { - prefix = val.AsString() - } - } - - // Make sure that we have either a workspace name or a prefix. - if name == "" && prefix == "" { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid workspaces configuration", - `Either workspace "name" or "prefix" is required.`, - cty.Path{cty.GetAttrStep{Name: "workspaces"}}, - )) - } - - // Make sure that only one of workspace name or a prefix is configured. - if name != "" && prefix != "" { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid workspaces configuration", - `Only one of workspace "name" or "prefix" is allowed.`, - cty.Path{cty.GetAttrStep{Name: "workspaces"}}, - )) - } - - return obj, diags -} - -// configureGenericHostname aliases the remote backend hostname configuration -// as a generic "localterraform.com" hostname. This was originally added as a -// Terraform Enterprise feature and is useful for re-using whatever the -// Cloud/Enterprise backend host is in nested module sources in order -// to prevent code churn when re-using config between multiple -// Terraform Enterprise environments. -func (b *Remote) configureGenericHostname() { - // This won't be an error for the given constant value - genericHost, _ := svchost.ForComparison(genericHostname) - - // This won't be an error because, by this time, the hostname has been parsed and - // service discovery requests made against it. - targetHost, _ := svchost.ForComparison(b.hostname) - - b.services.Alias(genericHost, targetHost) -} - -// Configure implements backend.Enhanced. -func (b *Remote) Configure(obj cty.Value) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if obj.IsNull() { - return diags - } - - // Get the hostname. - if val := obj.GetAttr("hostname"); !val.IsNull() && val.AsString() != "" { - b.hostname = val.AsString() - } else { - b.hostname = defaultHostname - } - - // Get the organization. - if val := obj.GetAttr("organization"); !val.IsNull() { - b.organization = val.AsString() - } - - // Get the workspaces configuration block and retrieve the - // default workspace name and prefix. - if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { - if val := workspaces.GetAttr("name"); !val.IsNull() { - b.workspace = val.AsString() - } - if val := workspaces.GetAttr("prefix"); !val.IsNull() { - b.prefix = val.AsString() - } - } - - // Determine if we are forced to use the local backend. - b.forceLocal = os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" - - serviceID := tfeServiceID - if b.forceLocal { - serviceID = stateServiceID - } - - // Discover the service URL for this host to confirm that it provides - // a remote backend API and to get the version constraints. - service, constraints, err := b.discover(serviceID) - - // First check any contraints we might have received. - if constraints != nil { - diags = diags.Append(b.checkConstraints(constraints)) - if diags.HasErrors() { - return diags - } - } - - // When we don't have any constraints errors, also check for discovery - // errors before we continue. - if err != nil { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - strings.ToUpper(err.Error()[:1])+err.Error()[1:], - "", // no description is needed here, the error is clear - cty.Path{cty.GetAttrStep{Name: "hostname"}}, - )) - return diags - } - - // Get the token from the config. - var token string - if val := obj.GetAttr("token"); !val.IsNull() { - token = val.AsString() - } - - // Retrieve the token for this host as configured in the credentials - // section of the CLI Config File if no token was configured for this - // host in the config. - if token == "" { - token, err = b.token() - if err != nil { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - strings.ToUpper(err.Error()[:1])+err.Error()[1:], - "", // no description is needed here, the error is clear - cty.Path{cty.GetAttrStep{Name: "hostname"}}, - )) - return diags - } - } - - // Return an error if we still don't have a token at this point. - if token == "" { - loginCommand := "terraform login" - if b.hostname != defaultHostname { - loginCommand = loginCommand + " " + b.hostname - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Required token could not be found", - fmt.Sprintf( - "Run the following command to generate a token for %s:\n %s", - b.hostname, - loginCommand, - ), - )) - return diags - } - - b.configureGenericHostname() - - cfg := &tfe.Config{ - Address: service.String(), - BasePath: service.Path, - Token: token, - Headers: make(http.Header), - RetryLogHook: b.retryLogHook, - } - - // Set the version header to the current version. - cfg.Headers.Set(tfversion.Header, tfversion.Version) - - // Create the remote backend API client. - b.client, err = tfe.NewClient(cfg) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create the Terraform Enterprise client", - fmt.Sprintf( - `The "remote" backend encountered an unexpected error while creating the `+ - `Terraform Enterprise client: %s.`, err, - ), - )) - return diags - } - - // Check if the organization exists by reading its entitlements. - entitlements, err := b.client.Organizations.ReadEntitlements(context.Background(), b.organization) - if err != nil { - if err == tfe.ErrResourceNotFound { - err = fmt.Errorf("organization %q at host %s not found.\n\n"+ - "Please ensure that the organization and hostname are correct "+ - "and that your API token for %s is valid.", - b.organization, b.hostname, b.hostname) - } - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - fmt.Sprintf("Failed to read organization %q at host %s", b.organization, b.hostname), - fmt.Sprintf("The \"remote\" backend encountered an unexpected error while reading the "+ - "organization settings: %s", err), - cty.Path{cty.GetAttrStep{Name: "organization"}}, - )) - return diags - } - - // Configure a local backend for when we need to run operations locally. - b.local = backendLocal.NewWithBackend(b) - b.forceLocal = b.forceLocal || !entitlements.Operations - - // Enable retries for server errors as the backend is now fully configured. - b.client.RetryServerErrors(true) - - return diags -} - -// discover the remote backend API service URL and version constraints. -func (b *Remote) discover(serviceID string) (*url.URL, *disco.Constraints, error) { - hostname, err := svchost.ForComparison(b.hostname) - if err != nil { - return nil, nil, err - } - - host, err := b.services.Discover(hostname) - if err != nil { - return nil, nil, err - } - - service, err := host.ServiceURL(serviceID) - // Return the error, unless its a disco.ErrVersionNotSupported error. - if _, ok := err.(*disco.ErrVersionNotSupported); !ok && err != nil { - return nil, nil, err - } - - // We purposefully ignore the error and return the previous error, as - // checking for version constraints is considered optional. - constraints, _ := host.VersionConstraints(serviceID, "terraform") - - return service, constraints, err -} - -// checkConstraints checks service version constrains against our own -// version and returns rich and informational diagnostics in case any -// incompatibilities are detected. -func (b *Remote) checkConstraints(c *disco.Constraints) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - if c == nil || c.Minimum == "" || c.Maximum == "" { - return diags - } - - // Generate a parsable constraints string. - excluding := "" - if len(c.Excluding) > 0 { - excluding = fmt.Sprintf(", != %s", strings.Join(c.Excluding, ", != ")) - } - constStr := fmt.Sprintf(">= %s%s, <= %s", c.Minimum, excluding, c.Maximum) - - // Create the constraints to check against. - constraints, err := version.NewConstraint(constStr) - if err != nil { - return diags.Append(checkConstraintsWarning(err)) - } - - // Create the version to check. - v, err := version.NewVersion(tfversion.Version) - if err != nil { - return diags.Append(checkConstraintsWarning(err)) - } - - // Return if we satisfy all constraints. - if constraints.Check(v) { - return diags - } - - // Find out what action (upgrade/downgrade) we should advice. - minimum, err := version.NewVersion(c.Minimum) - if err != nil { - return diags.Append(checkConstraintsWarning(err)) - } - - maximum, err := version.NewVersion(c.Maximum) - if err != nil { - return diags.Append(checkConstraintsWarning(err)) - } - - var excludes []*version.Version - for _, exclude := range c.Excluding { - v, err := version.NewVersion(exclude) - if err != nil { - return diags.Append(checkConstraintsWarning(err)) - } - excludes = append(excludes, v) - } - - // Sort all the excludes. - sort.Sort(version.Collection(excludes)) - - var action, toVersion string - switch { - case minimum.GreaterThan(v): - action = "upgrade" - toVersion = ">= " + minimum.String() - case maximum.LessThan(v): - action = "downgrade" - toVersion = "<= " + maximum.String() - case len(excludes) > 0: - // Get the latest excluded version. - action = "upgrade" - toVersion = "> " + excludes[len(excludes)-1].String() - } - - switch { - case len(excludes) == 1: - excluding = fmt.Sprintf(", excluding version %s", excludes[0].String()) - case len(excludes) > 1: - var vs []string - for _, v := range excludes { - vs = append(vs, v.String()) - } - excluding = fmt.Sprintf(", excluding versions %s", strings.Join(vs, ", ")) - default: - excluding = "" - } - - summary := fmt.Sprintf("Incompatible Terraform version v%s", v.String()) - details := fmt.Sprintf( - "The configured Terraform Enterprise backend is compatible with Terraform "+ - "versions >= %s, <= %s%s.", c.Minimum, c.Maximum, excluding, - ) - - if action != "" && toVersion != "" { - summary = fmt.Sprintf("Please %s Terraform to %s", action, toVersion) - details += fmt.Sprintf(" Please %s to a supported version and try again.", action) - } - - // Return the customized and informational error message. - return diags.Append(tfdiags.Sourceless(tfdiags.Error, summary, details)) -} - -// token returns the token for this host as configured in the credentials -// section of the CLI Config File. If no token was configured, an empty -// string will be returned instead. -func (b *Remote) token() (string, error) { - hostname, err := svchost.ForComparison(b.hostname) - if err != nil { - return "", err - } - creds, err := b.services.CredentialsForHost(hostname) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", b.hostname, err) - return "", nil - } - if creds != nil { - return creds.Token(), nil - } - return "", nil -} - -// retryLogHook is invoked each time a request is retried allowing the -// backend to log any connection issues to prevent data loss. -func (b *Remote) retryLogHook(attemptNum int, resp *http.Response) { - if b.CLI != nil { - // Ignore the first retry to make sure any delayed output will - // be written to the console before we start logging retries. - // - // The retry logic in the TFE client will retry both rate limited - // requests and server errors, but in the remote backend we only - // care about server errors so we ignore rate limit (429) errors. - if attemptNum == 0 || (resp != nil && resp.StatusCode == 429) { - // Reset the last retry time. - b.lastRetry = time.Now() - return - } - - if attemptNum == 1 { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(initialRetryError))) - } else { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace( - fmt.Sprintf(repeatedRetryError, time.Since(b.lastRetry).Round(time.Second))))) - } - } -} - -// Workspaces implements backend.Enhanced. -func (b *Remote) Workspaces() ([]string, error) { - if b.prefix == "" { - return nil, backend.ErrWorkspacesNotSupported - } - return b.workspaces() -} - -// workspaces returns a filtered list of remote workspace names. -func (b *Remote) workspaces() ([]string, error) { - options := &tfe.WorkspaceListOptions{} - switch { - case b.workspace != "": - options.Search = b.workspace - case b.prefix != "": - options.Search = b.prefix - } - - // Create a slice to contain all the names. - var names []string - - for { - wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) - if err != nil { - return nil, err - } - - for _, w := range wl.Items { - if b.workspace != "" && w.Name == b.workspace { - names = append(names, backend.DefaultStateName) - continue - } - if b.prefix != "" && strings.HasPrefix(w.Name, b.prefix) { - names = append(names, strings.TrimPrefix(w.Name, b.prefix)) - } - } - - // Exit the loop when we've seen all pages. - if wl.CurrentPage >= wl.TotalPages { - break - } - - // Update the page number to get the next page. - options.PageNumber = wl.NextPage - } - - // Sort the result so we have consistent output. - sort.StringSlice(names).Sort() - - return names, nil -} - -// WorkspaceNamePattern provides an appropriate workspace renaming pattern for backend migration -// purposes (handled outside of this package), based on previous usage of this backend with the -// 'prefix' workspace functionality. As of this writing, see meta_backend.migrate.go -func (b *Remote) WorkspaceNamePattern() string { - if b.prefix != "" { - return b.prefix + "*" - } - - return "" -} - -// DeleteWorkspace implements backend.Enhanced. -func (b *Remote) DeleteWorkspace(name string, _ bool) error { - if b.workspace == "" && name == backend.DefaultStateName { - return backend.ErrDefaultWorkspaceNotSupported - } - if b.prefix == "" && name != backend.DefaultStateName { - return backend.ErrWorkspacesNotSupported - } - - // Configure the remote workspace name. - switch { - case name == backend.DefaultStateName: - name = b.workspace - case b.prefix != "" && !strings.HasPrefix(name, b.prefix): - name = b.prefix + name - } - - client := &remoteClient{ - client: b.client, - organization: b.organization, - workspace: &tfe.Workspace{ - Name: name, - }, - } - - return client.Delete() -} - -// StateMgr implements backend.Enhanced. -func (b *Remote) StateMgr(name string) (statemgr.Full, error) { - if b.workspace == "" && name == backend.DefaultStateName { - return nil, backend.ErrDefaultWorkspaceNotSupported - } - if b.prefix == "" && name != backend.DefaultStateName { - return nil, backend.ErrWorkspacesNotSupported - } - - // Configure the remote workspace name. - switch { - case name == backend.DefaultStateName: - name = b.workspace - case b.prefix != "" && !strings.HasPrefix(name, b.prefix): - name = b.prefix + name - } - - workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) - if err != nil && err != tfe.ErrResourceNotFound { - return nil, fmt.Errorf("Failed to retrieve workspace %s: %v", name, err) - } - - if err == tfe.ErrResourceNotFound { - options := tfe.WorkspaceCreateOptions{ - Name: tfe.String(name), - } - - // We only set the Terraform Version for the new workspace if this is - // a release candidate or a final release. - if tfversion.Prerelease == "" || strings.HasPrefix(tfversion.Prerelease, "rc") { - options.TerraformVersion = tfe.String(tfversion.String()) - } - - workspace, err = b.client.Workspaces.Create(context.Background(), b.organization, options) - if err != nil { - return nil, fmt.Errorf("Error creating workspace %s: %v", name, err) - } - } - - // This is a fallback error check. Most code paths should use other - // mechanisms to check the version, then set the ignoreVersionConflict - // field to true. This check is only in place to ensure that we don't - // accidentally upgrade state with a new code path, and the version check - // logic is coarser and simpler. - if !b.ignoreVersionConflict { - wsv := workspace.TerraformVersion - // Explicitly ignore the pseudo-version "latest" here, as it will cause - // plan and apply to always fail. - if wsv != tfversion.String() && wsv != "latest" { - return nil, fmt.Errorf("Remote workspace Terraform version %q does not match local Terraform version %q", workspace.TerraformVersion, tfversion.String()) - } - } - - client := &remoteClient{ - client: b.client, - organization: b.organization, - workspace: workspace, - - // This is optionally set during Terraform Enterprise runs. - runID: os.Getenv("TFE_RUN_ID"), - } - - return &remote.State{Client: client}, nil -} - -func isLocalExecutionMode(execMode string) bool { - return execMode == "local" -} - -func (b *Remote) fetchWorkspace(ctx context.Context, organization string, name string) (*tfe.Workspace, error) { - remoteWorkspaceName := b.getRemoteWorkspaceName(name) - // Retrieve the workspace for this operation. - w, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) - if err != nil { - switch err { - case context.Canceled: - return nil, err - case tfe.ErrResourceNotFound: - return nil, fmt.Errorf( - "workspace %s not found\n\n"+ - "The configured \"remote\" backend returns '404 Not Found' errors for resources\n"+ - "that do not exist, as well as for resources that a user doesn't have access\n"+ - "to. If the resource does exist, please check the rights for the used token", - name, - ) - default: - err := fmt.Errorf( - "the configured \"remote\" backend encountered an unexpected error:\n\n%s", - err, - ) - return nil, err - } - } - - return w, nil -} - -// Operation implements backend.Enhanced. -func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { - w, err := b.fetchWorkspace(ctx, b.organization, op.Workspace) - - if err != nil { - return nil, err - } - - // Terraform remote version conflicts are not a concern for operations. We - // are in one of three states: - // - // - Running remotely, in which case the local version is irrelevant; - // - Workspace configured for local operations, in which case the remote - // version is meaningless; - // - Forcing local operations with a remote backend, which should only - // happen in the Terraform Cloud worker, in which case the Terraform - // versions by definition match. - b.IgnoreVersionConflict() - - // Check if we need to use the local backend to run the operation. - if b.forceLocal || isLocalExecutionMode(w.ExecutionMode) { - // Record that we're forced to run operations locally to allow the - // command package UI to operate correctly - b.forceLocal = true - log.Printf("[DEBUG] Remote backend is delegating %s to the local backend", op.Type) - return b.local.Operation(ctx, op) - } - - // Set the remote workspace name. - op.Workspace = w.Name - - // Determine the function to call for our operation - var f func(context.Context, context.Context, *backend.Operation, *tfe.Workspace) (*tfe.Run, error) - switch op.Type { - case backend.OperationTypePlan: - f = b.opPlan - case backend.OperationTypeApply: - f = b.opApply - case backend.OperationTypeRefresh: - return nil, fmt.Errorf( - "\n\nThe \"refresh\" operation is not supported when using the \"remote\" backend. " + - "Use \"terraform apply -refresh-only\" instead.") - default: - return nil, fmt.Errorf( - "\n\nThe \"remote\" backend does not support the %q operation.", op.Type) - } - - // Lock - b.opLock.Lock() - - // Build our running operation - // the runninCtx is only used to block until the operation returns. - runningCtx, done := context.WithCancel(context.Background()) - runningOp := &backend.RunningOperation{ - Context: runningCtx, - PlanEmpty: true, - } - - // stopCtx wraps the context passed in, and is used to signal a graceful Stop. - stopCtx, stop := context.WithCancel(ctx) - runningOp.Stop = stop - - // cancelCtx is used to cancel the operation immediately, usually - // indicating that the process is exiting. - cancelCtx, cancel := context.WithCancel(context.Background()) - runningOp.Cancel = cancel - - // Do it. - go func() { - defer logging.PanicHandler() - defer done() - defer stop() - defer cancel() - - defer b.opLock.Unlock() - - r, opErr := f(stopCtx, cancelCtx, op, w) - if opErr != nil && opErr != context.Canceled { - var diags tfdiags.Diagnostics - diags = diags.Append(opErr) - op.ReportResult(runningOp, diags) - return - } - - if r == nil && opErr == context.Canceled { - runningOp.Result = backend.OperationFailure - return - } - - if r != nil { - // Retrieve the run to get its current status. - r, err := b.client.Runs.Read(cancelCtx, r.ID) - if err != nil { - var diags tfdiags.Diagnostics - diags = diags.Append(generalError("Failed to retrieve run", err)) - op.ReportResult(runningOp, diags) - return - } - - // Record if there are any changes. - runningOp.PlanEmpty = !r.HasChanges - - if opErr == context.Canceled { - if err := b.cancel(cancelCtx, op, r); err != nil { - var diags tfdiags.Diagnostics - diags = diags.Append(generalError("Failed to retrieve run", err)) - op.ReportResult(runningOp, diags) - return - } - } - - if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { - runningOp.Result = backend.OperationFailure - } - } - }() - - // Return the running operation. - return runningOp, nil -} - -func (b *Remote) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { - if r.Actions.IsCancelable { - // Only ask if the remote operation should be canceled - // if the auto approve flag is not set. - if !op.AutoApprove { - v, err := op.UIIn.Input(cancelCtx, &terraform.InputOpts{ - Id: "cancel", - Query: "\nDo you want to cancel the remote operation?", - Description: "Only 'yes' will be accepted to cancel.", - }) - if err != nil { - return generalError("Failed asking to cancel", err) - } - if v != "yes" { - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled))) - } - return nil - } - } else { - if b.CLI != nil { - // Insert a blank line to separate the ouputs. - b.CLI.Output("") - } - } - - // Try to cancel the remote operation. - err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{}) - if err != nil { - return generalError("Failed to cancel run", err) - } - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled))) - } - } - - return nil -} - -// IgnoreVersionConflict allows commands to disable the fall-back check that -// the local Terraform version matches the remote workspace's configured -// Terraform version. This should be called by commands where this check is -// unnecessary, such as those performing remote operations, or read-only -// operations. It will also be called if the user uses a command-line flag to -// override this check. -func (b *Remote) IgnoreVersionConflict() { - b.ignoreVersionConflict = true -} - -// VerifyWorkspaceTerraformVersion compares the local Terraform version against -// the workspace's configured Terraform version. If they are equal, this means -// that there are no compatibility concerns, so it returns no diagnostics. -// -// If the versions differ, -func (b *Remote) VerifyWorkspaceTerraformVersion(workspaceName string) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - workspace, err := b.getRemoteWorkspace(context.Background(), workspaceName) - if err != nil { - // If the workspace doesn't exist, there can be no compatibility - // problem, so we can return. This is most likely to happen when - // migrating state from a local backend to a new workspace. - if err == tfe.ErrResourceNotFound { - return nil - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error looking up workspace", - fmt.Sprintf("Workspace read failed: %s", err), - )) - return diags - } - - // If the workspace has the pseudo-version "latest", all bets are off. We - // cannot reasonably determine what the intended Terraform version is, so - // we'll skip version verification. - if workspace.TerraformVersion == "latest" { - return nil - } - - // If the workspace has remote operations disabled, the remote Terraform - // version is effectively meaningless, so we'll skip version verification. - if isLocalExecutionMode(workspace.ExecutionMode) { - return nil - } - - remoteVersion, err := version.NewSemver(workspace.TerraformVersion) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error looking up workspace", - fmt.Sprintf("Invalid Terraform version: %s", err), - )) - return diags - } - - v014 := version.Must(version.NewSemver("0.14.0")) - if tfversion.SemVer.LessThan(v014) || remoteVersion.LessThan(v014) { - // Versions of Terraform prior to 0.14.0 will refuse to load state files - // written by a newer version of Terraform, even if it is only a patch - // level difference. As a result we require an exact match. - if tfversion.SemVer.Equal(remoteVersion) { - return diags - } - } - if tfversion.SemVer.GreaterThanOrEqual(v014) && remoteVersion.GreaterThanOrEqual(v014) { - // Versions of Terraform after 0.14.0 should be compatible with each - // other. At the time this code was written, the only constraints we - // are aware of are: - // - // - 0.14.0 is guaranteed to be compatible with versions up to but not - // including 1.3.0 - v130 := version.Must(version.NewSemver("1.3.0")) - if tfversion.SemVer.LessThan(v130) && remoteVersion.LessThan(v130) { - return diags - } - // - Any new Terraform state version will require at least minor patch - // increment, so x.y.* will always be compatible with each other - tfvs := tfversion.SemVer.Segments64() - rwvs := remoteVersion.Segments64() - if len(tfvs) == 3 && len(rwvs) == 3 && tfvs[0] == rwvs[0] && tfvs[1] == rwvs[1] { - return diags - } - } - - // Even if ignoring version conflicts, it may still be useful to call this - // method and warn the user about a mismatch between the local and remote - // Terraform versions. - severity := tfdiags.Error - if b.ignoreVersionConflict { - severity = tfdiags.Warning - } - - suggestion := " If you're sure you want to upgrade the state, you can force Terraform to continue using the -ignore-remote-version flag. This may result in an unusable workspace." - if b.ignoreVersionConflict { - suggestion = "" - } - diags = diags.Append(tfdiags.Sourceless( - severity, - "Terraform version mismatch", - fmt.Sprintf( - "The local Terraform version (%s) does not match the configured version for remote workspace %s/%s (%s).%s", - tfversion.String(), - b.organization, - workspace.Name, - workspace.TerraformVersion, - suggestion, - ), - )) - - return diags -} - -func (b *Remote) IsLocalOperations() bool { - return b.forceLocal -} - -func generalError(msg string, err error) error { - var diags tfdiags.Diagnostics - - if urlErr, ok := err.(*url.Error); ok { - err = urlErr.Err - } - - switch err { - case context.Canceled: - return err - case tfe.ErrResourceNotFound: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("%s: %v", msg, err), - `The configured "remote" backend returns '404 Not Found' errors for resources `+ - `that do not exist, as well as for resources that a user doesn't have access `+ - `to. If the resource does exist, please check the rights for the used token.`, - )) - return diags.Err() - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("%s: %v", msg, err), - `The configured "remote" backend encountered an unexpected error. Sometimes `+ - `this is caused by network connection problems, in which case you could retry `+ - `the command. If the issue persists please open a support ticket to get help `+ - `resolving the problem.`, - )) - return diags.Err() - } -} - -func checkConstraintsWarning(err error) tfdiags.Diagnostic { - return tfdiags.Sourceless( - tfdiags.Warning, - fmt.Sprintf("Failed to check version constraints: %v", err), - "Checking version constraints is considered optional, but this is an"+ - "unexpected error which should be reported.", - ) -} - -// The newline in this error is to make it look good in the CLI! -const initialRetryError = ` -[reset][yellow]There was an error connecting to the remote backend. Please do not exit -Terraform to prevent data loss! Trying to restore the connection... -[reset] -` - -const repeatedRetryError = ` -[reset][yellow]Still trying to restore the connection... (%s elapsed)[reset] -` - -const operationCanceled = ` -[reset][red]The remote operation was successfully cancelled.[reset] -` - -const operationNotCanceled = ` -[reset][red]The remote operation was not cancelled.[reset] -` - -var schemaDescriptions = map[string]string{ - "hostname": "The remote backend hostname to connect to (defaults to app.terraform.io).", - "organization": "The name of the organization containing the targeted workspace(s).", - "token": "The token used to authenticate with the remote backend. If credentials for the\n" + - "host are configured in the CLI Config File, then those will be used instead.", - "name": "A workspace name used to map the default workspace to a named remote workspace.\n" + - "When configured only the default workspace can be used. This option conflicts\n" + - "with \"prefix\"", - "prefix": "A prefix used to filter workspaces using a single configuration. New workspaces\n" + - "will automatically be prefixed with this prefix. If omitted only the default\n" + - "workspace can be used. This option conflicts with \"name\"", -} diff --git a/internal/backend/remote/backend_apply.go b/internal/backend/remote/backend_apply.go deleted file mode 100644 index ef89466a235b..000000000000 --- a/internal/backend/remote/backend_apply.go +++ /dev/null @@ -1,301 +0,0 @@ -package remote - -import ( - "bufio" - "context" - "fmt" - "io" - "log" - - tfe "github.com/hashicorp/go-tfe" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { - log.Printf("[INFO] backend/remote: starting Apply operation") - - var diags tfdiags.Diagnostics - - // We should remove the `CanUpdate` part of this test, but for now - // (to remain compatible with tfe.v2.1) we'll leave it in here. - if !w.Permissions.CanUpdate && !w.Permissions.CanQueueApply { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Insufficient rights to apply changes", - "The provided credentials have insufficient rights to apply changes. In order "+ - "to apply changes at least write permissions on the workspace are required.", - )) - return nil, diags.Err() - } - - if w.VCSRepo != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Apply not allowed for workspaces with a VCS connection", - "A workspace that is connected to a VCS requires the VCS-driven workflow "+ - "to ensure that the VCS remains the single source of truth.", - )) - return nil, diags.Err() - } - - if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Custom parallelism values are currently not supported", - `The "remote" backend does not support setting a custom parallelism `+ - `value at this time.`, - )) - } - - if op.PlanFile != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Applying a saved plan is currently not supported", - `The "remote" backend currently requires configuration to be present and `+ - `does not accept an existing saved plan as an argument at this time.`, - )) - } - - if b.hasExplicitVariableValues(op) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Run variables are currently not supported", - fmt.Sprintf( - "The \"remote\" backend does not support setting run variables at this time. "+ - "Currently the only to way to pass variables to the remote backend is by "+ - "creating a '*.auto.tfvars' variables file. This file will automatically "+ - "be loaded by the \"remote\" backend when the workspace is configured to use "+ - "Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+ - "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", - b.hostname, b.organization, op.Workspace, - ), - )) - } - - if !op.HasConfig() && op.PlanMode != plans.DestroyMode { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "No configuration files found", - `Apply requires configuration to be present. Applying without a configuration `+ - `would mark everything for destruction, which is normally not what is desired. `+ - `If you would like to destroy everything, please run 'terraform destroy' which `+ - `does not require any configuration files.`, - )) - } - - // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, - // so if there's an error when parsing the RemoteAPIVersion, it's handled as - // equivalent to an API version < 2.3. - currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) - - if !op.PlanRefresh { - desiredAPIVersion, _ := version.NewVersion("2.4") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Planning without refresh is not supported", - fmt.Sprintf( - `The host %s does not support the -refresh=false option for `+ - `remote plans.`, - b.hostname, - ), - )) - } - } - - if op.PlanMode == plans.RefreshOnlyMode { - desiredAPIVersion, _ := version.NewVersion("2.4") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Refresh-only mode is not supported", - fmt.Sprintf( - `The host %s does not support -refresh-only mode for `+ - `remote plans.`, - b.hostname, - ), - )) - } - } - - if len(op.ForceReplace) != 0 { - desiredAPIVersion, _ := version.NewVersion("2.4") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Planning resource replacements is not supported", - fmt.Sprintf( - `The host %s does not support the -replace option for `+ - `remote plans.`, - b.hostname, - ), - )) - } - } - - if len(op.Targets) != 0 { - desiredAPIVersion, _ := version.NewVersion("2.3") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource targeting is not supported", - fmt.Sprintf( - `The host %s does not support the -target option for `+ - `remote plans.`, - b.hostname, - ), - )) - } - } - - // Return if there are any errors. - if diags.HasErrors() { - return nil, diags.Err() - } - - // Run the plan phase. - r, err := b.plan(stopCtx, cancelCtx, op, w) - if err != nil { - return r, err - } - - // This check is also performed in the plan method to determine if - // the policies should be checked, but we need to check the values - // here again to determine if we are done and should return. - if !r.HasChanges || r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { - return r, nil - } - - // Retrieve the run to get its current status. - r, err = b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - return r, generalError("Failed to retrieve run", err) - } - - // Return if the run cannot be confirmed. - if !w.AutoApply && !r.Actions.IsConfirmable { - return r, nil - } - - // Since we already checked the permissions before creating the run - // this should never happen. But it doesn't hurt to keep this in as - // a safeguard for any unexpected situations. - if !w.AutoApply && !r.Permissions.CanApply { - // Make sure we discard the run if possible. - if r.Actions.IsDiscardable { - err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) - if err != nil { - switch op.PlanMode { - case plans.DestroyMode: - return r, generalError("Failed to discard destroy", err) - default: - return r, generalError("Failed to discard apply", err) - } - } - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Insufficient rights to approve the pending changes", - fmt.Sprintf("There are pending changes, but the provided credentials have "+ - "insufficient rights to approve them. The run will be discarded to prevent "+ - "it from blocking the queue waiting for external approval. To queue a run "+ - "that can be approved by someone else, please use the 'Queue Plan' button in "+ - "the web UI:\nhttps://%s/app/%s/%s/runs", b.hostname, b.organization, op.Workspace), - )) - return r, diags.Err() - } - - mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove - - if !w.AutoApply { - if mustConfirm { - opts := &terraform.InputOpts{Id: "approve"} - - if op.PlanMode == plans.DestroyMode { - opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" - opts.Description = "Terraform will destroy all your managed infrastructure, as shown above.\n" + - "There is no undo. Only 'yes' will be accepted to confirm." - } else { - opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?" - opts.Description = "Terraform will perform the actions described above.\n" + - "Only 'yes' will be accepted to approve." - } - - err = b.confirm(stopCtx, op, opts, r, "yes") - if err != nil && err != errRunApproved { - return r, err - } - } - - if err != errRunApproved { - if err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}); err != nil { - return r, generalError("Failed to approve the apply command", err) - } - } - } - - // If we don't need to ask for confirmation, insert a blank - // line to separate the ouputs. - if w.AutoApply || !mustConfirm { - if b.CLI != nil { - b.CLI.Output("") - } - } - - r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w) - if err != nil { - return r, err - } - - logs, err := b.client.Applies.Logs(stopCtx, r.Apply.ID) - if err != nil { - return r, generalError("Failed to retrieve logs", err) - } - reader := bufio.NewReaderSize(logs, 64*1024) - - if b.CLI != nil { - skip := 0 - for next := true; next; { - var l, line []byte - - for isPrefix := true; isPrefix; { - l, isPrefix, err = reader.ReadLine() - if err != nil { - if err != io.EOF { - return r, generalError("Failed to read logs", err) - } - next = false - } - line = append(line, l...) - } - - // Skip the first 3 lines to prevent duplicate output. - if skip < 3 { - skip++ - continue - } - - if next || len(line) > 0 { - b.CLI.Output(b.Colorize().Color(string(line))) - } - } - } - - return r, nil -} - -const applyDefaultHeader = ` -[reset][yellow]Running apply in the remote backend. Output will stream here. Pressing Ctrl-C -will cancel the remote apply if it's still pending. If the apply started it -will stop streaming the logs, but will not stop the apply running remotely.[reset] - -Preparing the remote apply... -` diff --git a/internal/backend/remote/backend_apply_test.go b/internal/backend/remote/backend_apply_test.go deleted file mode 100644 index c0e8aef2047f..000000000000 --- a/internal/backend/remote/backend_apply_test.go +++ /dev/null @@ -1,1665 +0,0 @@ -package remote - -import ( - "context" - "os" - "os/signal" - "strings" - "syscall" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - tfe "github.com/hashicorp/go-tfe" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/cloud" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - tfversion "github.com/hashicorp/terraform/version" - "github.com/mitchellh/cli" -) - -func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - return testOperationApplyWithTimeout(t, configDir, 0) -} - -func testOperationApplyWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewView(streams) - stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) - operationView := views.NewOperation(arguments.ViewHuman, false, view) - - // Many of our tests use an overridden "null" provider that's just in-memory - // inside the test process, not a separate plugin on disk. - depLocks := depsfile.NewLocks() - depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/null")) - - return &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - PlanRefresh: true, - StateLocker: clistate.NewLocker(timeout, stateLockerView), - Type: backend.OperationTypeApply, - View: operationView, - DependencyLocks: depLocks, - }, configCleanup, done -} - -func TestRemote_applyBasic(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } - - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - // An error suggests that the state was not unlocked after apply - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after apply: %s", err.Error()) - } -} - -func TestRemote_applyCanceled(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // Stop the run to simulate a Ctrl-C. - run.Stop() - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after cancelling apply: %s", err.Error()) - } -} - -func TestRemote_applyWithoutPermissions(t *testing.T) { - b, bCleanup := testBackendNoDefault(t) - defer bCleanup() - - // Create a named workspace without permissions. - w, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String(b.prefix + "prod"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - w.Permissions.CanQueueApply = false - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - op.UIOut = b.CLI - op.Workspace = "prod" - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Insufficient rights to apply changes") { - t.Fatalf("expected a permissions error, got: %v", errOutput) - } -} - -func TestRemote_applyWithVCS(t *testing.T) { - b, bCleanup := testBackendNoDefault(t) - defer bCleanup() - - // Create a named workspace with a VCS. - _, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String(b.prefix + "prod"), - VCSRepo: &tfe.VCSRepoOptions{}, - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - op.Workspace = "prod" - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "not allowed for workspaces with a VCS") { - t.Fatalf("expected a VCS error, got: %v", errOutput) - } -} - -func TestRemote_applyWithParallelism(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - if b.ContextOpts == nil { - b.ContextOpts = &terraform.ContextOpts{} - } - b.ContextOpts.Parallelism = 3 - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "parallelism values are currently not supported") { - t.Fatalf("expected a parallelism error, got: %v", errOutput) - } -} - -func TestRemote_applyWithPlan(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - op.PlanFile = &planfile.Reader{} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "saved plan is currently not supported") { - t.Fatalf("expected a saved plan error, got: %v", errOutput) - } -} - -func TestRemote_applyWithoutRefresh(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - op.PlanRefresh = false - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has refresh set - // to false. - runsAPI := b.client.Runs.(*cloud.MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff(false, run.Refresh); diff != "" { - t.Errorf("wrong Refresh setting in the created run\n%s", diff) - } - } -} - -func TestRemote_applyWithoutRefreshIncompatibleAPIVersion(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - b.client.SetFakeRemoteAPIVersion("2.3") - - op.PlanRefresh = false - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Planning without refresh is not supported") { - t.Fatalf("expected a not supported error, got: %v", errOutput) - } -} - -func TestRemote_applyWithRefreshOnly(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.RefreshOnlyMode - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has refresh-only set - // to true. - runsAPI := b.client.Runs.(*cloud.MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { - t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) - } - } -} - -func TestRemote_applyWithRefreshOnlyIncompatibleAPIVersion(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - b.client.SetFakeRemoteAPIVersion("2.3") - - op.PlanMode = plans.RefreshOnlyMode - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Refresh-only mode is not supported") { - t.Fatalf("expected a not supported error, got: %v", errOutput) - } -} - -func TestRemote_applyWithTarget(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") - - op.Targets = []addrs.Targetable{addr} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("expected apply operation to succeed") - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has the same - // target address we requested above. - runsAPI := b.client.Runs.(*cloud.MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { - t.Errorf("wrong TargetAddrs in the created run\n%s", diff) - } - } -} - -func TestRemote_applyWithTargetIncompatibleAPIVersion(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - // Set the tfe client's RemoteAPIVersion to an empty string, to mimic - // API versions prior to 2.3. - b.client.SetFakeRemoteAPIVersion("") - - addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") - - op.Targets = []addrs.Targetable{addr} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Resource targeting is not supported") { - t.Fatalf("expected a targeting error, got: %v", errOutput) - } -} - -func TestRemote_applyWithReplace(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") - - op.ForceReplace = []addrs.AbsResourceInstance{addr} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("expected plan operation to succeed") - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has the same - // refresh address we requested above. - runsAPI := b.client.Runs.(*cloud.MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { - t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) - } - } -} - -func TestRemote_applyWithReplaceIncompatibleAPIVersion(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - b.client.SetFakeRemoteAPIVersion("2.3") - - addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") - - op.ForceReplace = []addrs.AbsResourceInstance{addr} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Planning resource replacements is not supported") { - t.Fatalf("expected a not supported error, got: %v", errOutput) - } -} - -func TestRemote_applyWithVariables(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-variables") - defer configCleanup() - - op.Variables = testVariables(terraform.ValueFromNamedFile, "foo", "bar") - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "variables are currently not supported") { - t.Fatalf("expected a variables error, got: %v", errOutput) - } -} - -func TestRemote_applyNoConfig(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/empty") - defer configCleanup() - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "configuration files found") { - t.Fatalf("expected configuration files error, got: %v", errOutput) - } - - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - // An error suggests that the state was not unlocked after apply - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after failed apply: %s", err.Error()) - } -} - -func TestRemote_applyNoChanges(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-no-changes") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { - t.Fatalf("expected no changes in plan summery: %s", output) - } - if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("expected policy check result in output: %s", output) - } -} - -func TestRemote_applyNoApprove(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - input := testInput(t, map[string]string{ - "approve": "no", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Apply discarded") { - t.Fatalf("expected an apply discarded error, got: %v", errOutput) - } -} - -func TestRemote_applyAutoApprove(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "no", - }) - - op.AutoApprove = true - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) != 1 { - t.Fatalf("expected an unused answer, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestRemote_applyApprovedExternally(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "wait-for-external-update", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - ctx := context.Background() - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // Wait 50 milliseconds to make sure the run started. - time.Sleep(50 * time.Millisecond) - - wl, err := b.client.Workspaces.List( - ctx, - b.organization, - nil, - ) - if err != nil { - t.Fatalf("unexpected error listing workspaces: %v", err) - } - if len(wl.Items) != 1 { - t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) - } - - rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) - if err != nil { - t.Fatalf("unexpected error listing runs: %v", err) - } - if len(rl.Items) != 1 { - t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) - } - - err = b.client.Runs.Apply(context.Background(), rl.Items[0].ID, tfe.RunApplyOptions{}) - if err != nil { - t.Fatalf("unexpected error approving run: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "approved using the UI or API") { - t.Fatalf("expected external approval in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestRemote_applyDiscardedExternally(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "wait-for-external-update", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - ctx := context.Background() - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // Wait 50 milliseconds to make sure the run started. - time.Sleep(50 * time.Millisecond) - - wl, err := b.client.Workspaces.List( - ctx, - b.organization, - nil, - ) - if err != nil { - t.Fatalf("unexpected error listing workspaces: %v", err) - } - if len(wl.Items) != 1 { - t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) - } - - rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) - if err != nil { - t.Fatalf("unexpected error listing runs: %v", err) - } - if len(rl.Items) != 1 { - t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) - } - - err = b.client.Runs.Discard(context.Background(), rl.Items[0].ID, tfe.RunDiscardOptions{}) - if err != nil { - t.Fatalf("unexpected error discarding run: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "discarded using the UI or API") { - t.Fatalf("expected external discard output: %s", output) - } - if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("unexpected apply summery in output: %s", output) - } -} - -func TestRemote_applyWithAutoApply(t *testing.T) { - b, bCleanup := testBackendNoDefault(t) - defer bCleanup() - - // Create a named workspace that auto applies. - _, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - AutoApply: tfe.Bool(true), - Name: tfe.String(b.prefix + "prod"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = "prod" - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) != 1 { - t.Fatalf("expected an unused answer, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestRemote_applyForceLocal(t *testing.T) { - // Set TF_FORCE_LOCAL_BACKEND so the remote backend will use - // the local backend with itself as embedded backend. - if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { - t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) - } - defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") - - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("unexpected remote backend header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } - if !run.State.HasManagedResourceInstanceObjects() { - t.Fatalf("expected resources in state") - } -} - -func TestRemote_applyWorkspaceWithoutOperations(t *testing.T) { - b, bCleanup := testBackendNoDefault(t) - defer bCleanup() - - ctx := context.Background() - - // Create a named workspace that doesn't allow operations. - _, err := b.client.Workspaces.Create( - ctx, - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String(b.prefix + "no-operations"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = "no-operations" - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("unexpected remote backend header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } - if !run.State.HasManagedResourceInstanceObjects() { - t.Fatalf("expected resources in state") - } -} - -func TestRemote_applyLockTimeout(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - ctx := context.Background() - - // Retrieve the workspace used to run this operation in. - w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace) - if err != nil { - t.Fatalf("error retrieving workspace: %v", err) - } - - // Create a new configuration version. - c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) - if err != nil { - t.Fatalf("error creating configuration version: %v", err) - } - - // Create a pending run to block this run. - _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ - ConfigurationVersion: c, - Workspace: w, - }) - if err != nil { - t.Fatalf("error creating pending run: %v", err) - } - - op, configCleanup, done := testOperationApplyWithTimeout(t, "./testdata/apply", 50*time.Millisecond) - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "cancel": "yes", - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - _, err = b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - sigint := make(chan os.Signal, 1) - signal.Notify(sigint, syscall.SIGINT) - select { - case <-sigint: - // Stop redirecting SIGINT signals. - signal.Stop(sigint) - case <-time.After(200 * time.Millisecond): - t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") - } - - if len(input.answers) != 2 { - t.Fatalf("expected unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "Lock timeout exceeded") { - t.Fatalf("expected lock timout error in output: %s", output) - } - if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("unexpected plan summery in output: %s", output) - } - if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("unexpected apply summery in output: %s", output) - } -} - -func TestRemote_applyDestroy(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-destroy") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.PlanMode = plans.DestroyMode - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestRemote_applyDestroyNoConfig(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op, configCleanup, done := testOperationApply(t, "./testdata/empty") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.DestroyMode - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } -} - -func TestRemote_applyPolicyPass(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-passed") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestRemote_applyPolicyHardFail(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-hard-failed") - defer configCleanup() - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - viewOutput := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - if len(input.answers) != 1 { - t.Fatalf("expected an unused answers, got: %v", input.answers) - } - - errOutput := viewOutput.Stderr() - if !strings.Contains(errOutput, "hard failed") { - t.Fatalf("expected a policy check error, got: %v", errOutput) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("unexpected apply summery in output: %s", output) - } -} - -func TestRemote_applyPolicySoftFail(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "override": "override", - "approve": "yes", - }) - - op.AutoApprove = false - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestRemote_applyPolicySoftFailAutoApproveSuccess(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") - defer configCleanup() - - input := testInput(t, map[string]string{}) - - op.AutoApprove = true - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - viewOutput := done(t) - if run.Result != backend.OperationSuccess { - t.Fatal("expected apply operation to success due to auto-approve") - } - - if run.PlanEmpty { - t.Fatalf("expected plan to not be empty, plan opertion completed without error") - } - - if len(input.answers) != 0 { - t.Fatalf("expected no answers, got: %v", input.answers) - } - - errOutput := viewOutput.Stderr() - if strings.Contains(errOutput, "soft failed") { - t.Fatalf("expected no policy check errors, instead got: %v", errOutput) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check to be false, insead got: %s", output) - } - if !strings.Contains(output, "Apply complete!") { - t.Fatalf("expected apply to be complete, instead got: %s", output) - } - - if !strings.Contains(output, "Resources: 1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected resources, instead got: %s", output) - } -} - -func TestRemote_applyPolicySoftFailAutoApply(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - // Create a named workspace that auto applies. - _, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - AutoApply: tfe.Bool(true), - Name: tfe.String(b.prefix + "prod"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "override": "override", - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = "prod" - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) != 1 { - t.Fatalf("expected an unused answer, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestRemote_applyWithRemoteError(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-with-error") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if run.Result.ExitStatus() != 1 { - t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "null_resource.foo: 1 error") { - t.Fatalf("expected apply error in output: %s", output) - } -} - -func TestRemote_applyVersionCheck(t *testing.T) { - testCases := map[string]struct { - localVersion string - remoteVersion string - forceLocal bool - executionMode string - wantErr string - }{ - "versions can be different for remote apply": { - localVersion: "0.14.0", - remoteVersion: "0.13.5", - executionMode: "remote", - }, - "versions can be different for local apply": { - localVersion: "0.14.0", - remoteVersion: "0.13.5", - executionMode: "local", - }, - "force local with remote operations and different versions is acceptable": { - localVersion: "0.14.0", - remoteVersion: "0.14.0-acme-provider-bundle", - forceLocal: true, - executionMode: "remote", - }, - "no error if versions are identical": { - localVersion: "0.14.0", - remoteVersion: "0.14.0", - forceLocal: true, - executionMode: "remote", - }, - "no error if force local but workspace has remote operations disabled": { - localVersion: "0.14.0", - remoteVersion: "0.13.5", - forceLocal: true, - executionMode: "local", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - // SETUP: Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // SETUP: Set local version for the test case - tfversion.Prerelease = "" - tfversion.Version = tc.localVersion - tfversion.SemVer = version.Must(version.NewSemver(tc.localVersion)) - - // SETUP: Set force local for the test case - b.forceLocal = tc.forceLocal - - ctx := context.Background() - - // SETUP: set the operations and Terraform Version fields on the - // remote workspace - _, err := b.client.Workspaces.Update( - ctx, - b.organization, - b.workspace, - tfe.WorkspaceUpdateOptions{ - ExecutionMode: tfe.String(tc.executionMode), - TerraformVersion: tfe.String(tc.remoteVersion), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - // RUN: prepare the apply operation and run it - op, configCleanup, _ := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // RUN: wait for completion - <-run.Done() - output := done(t) - - if tc.wantErr != "" { - // ASSERT: if the test case wants an error, check for failure - // and the error message - if run.Result != backend.OperationFailure { - t.Fatalf("expected run to fail, but result was %#v", run.Result) - } - errOutput := output.Stderr() - if !strings.Contains(errOutput, tc.wantErr) { - t.Fatalf("missing error %q\noutput: %s", tc.wantErr, errOutput) - } - } else { - // ASSERT: otherwise, check for success and appropriate output - // based on whether the run should be local or remote - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - output := b.CLI.(*cli.MockUi).OutputWriter.String() - hasRemote := strings.Contains(output, "Running apply in the remote backend") - hasSummary := strings.Contains(output, "1 added, 0 changed, 0 destroyed") - hasResources := run.State.HasManagedResourceInstanceObjects() - if !tc.forceLocal && !isLocalExecutionMode(tc.executionMode) { - if !hasRemote { - t.Errorf("missing remote backend header in output: %s", output) - } - if !hasSummary { - t.Errorf("expected apply summary in output: %s", output) - } - } else { - if hasRemote { - t.Errorf("unexpected remote backend header in output: %s", output) - } - if !hasResources { - t.Errorf("expected resources in state") - } - } - } - }) - } -} diff --git a/internal/backend/remote/backend_common.go b/internal/backend/remote/backend_common.go deleted file mode 100644 index 710cdfb84fba..000000000000 --- a/internal/backend/remote/backend_common.go +++ /dev/null @@ -1,577 +0,0 @@ -package remote - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "math" - "strconv" - "strings" - "time" - - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/terraform" -) - -var ( - errApplyDiscarded = errors.New("Apply discarded.") - errDestroyDiscarded = errors.New("Destroy discarded.") - errRunApproved = errors.New("approved using the UI or API") - errRunDiscarded = errors.New("discarded using the UI or API") - errRunOverridden = errors.New("overridden using the UI or API") -) - -var ( - backoffMin = 1000.0 - backoffMax = 3000.0 - - runPollInterval = 3 * time.Second -) - -// backoff will perform exponential backoff based on the iteration and -// limited by the provided min and max (in milliseconds) durations. -func backoff(min, max float64, iter int) time.Duration { - backoff := math.Pow(2, float64(iter)/5) * min - if backoff > max { - backoff = max - } - return time.Duration(backoff) * time.Millisecond -} - -func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) { - started := time.Now() - updated := started - for i := 0; ; i++ { - select { - case <-stopCtx.Done(): - return r, stopCtx.Err() - case <-cancelCtx.Done(): - return r, cancelCtx.Err() - case <-time.After(backoff(backoffMin, backoffMax, i)): - // Timer up, show status - } - - // Retrieve the run to get its current status. - r, err := b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - return r, generalError("Failed to retrieve run", err) - } - - // Return if the run is no longer pending. - if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed { - if i == 0 && opType == "plan" && b.CLI != nil { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType))) - } - if i > 0 && b.CLI != nil { - // Insert a blank line to separate the ouputs. - b.CLI.Output("") - } - return r, nil - } - - // Check if 30 seconds have passed since the last update. - current := time.Now() - if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { - updated = current - position := 0 - elapsed := "" - - // Calculate and set the elapsed time. - if i > 0 { - elapsed = fmt.Sprintf( - " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) - } - - // Retrieve the workspace used to run this operation in. - w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name) - if err != nil { - return nil, generalError("Failed to retrieve workspace", err) - } - - // If the workspace is locked the run will not be queued and we can - // update the status without making any expensive calls. - if w.Locked && w.CurrentRun != nil { - cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID) - if err != nil { - return r, generalError("Failed to retrieve current run", err) - } - if cr.Status == tfe.RunPending { - b.CLI.Output(b.Colorize().Color( - "Waiting for the manually locked workspace to be unlocked..." + elapsed)) - continue - } - } - - // Skip checking the workspace queue when we are the current run. - if w.CurrentRun == nil || w.CurrentRun.ID != r.ID { - found := false - options := &tfe.RunListOptions{} - runlist: - for { - rl, err := b.client.Runs.List(stopCtx, w.ID, options) - if err != nil { - return r, generalError("Failed to retrieve run list", err) - } - - // Loop through all runs to calculate the workspace queue position. - for _, item := range rl.Items { - if !found { - if r.ID == item.ID { - found = true - } - continue - } - - // If the run is in a final state, ignore it and continue. - switch item.Status { - case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored: - continue - case tfe.RunPlanned: - if op.Type == backend.OperationTypePlan { - continue - } - } - - // Increase the workspace queue position. - position++ - - // Stop searching when we reached the current run. - if w.CurrentRun != nil && w.CurrentRun.ID == item.ID { - break runlist - } - } - - // Exit the loop when we've seen all pages. - if rl.CurrentPage >= rl.TotalPages { - break - } - - // Update the page number to get the next page. - options.PageNumber = rl.NextPage - } - - if position > 0 { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "Waiting for %d run(s) to finish before being queued...%s", - position, - elapsed, - ))) - continue - } - } - - options := tfe.ReadRunQueueOptions{} - search: - for { - rq, err := b.client.Organizations.ReadRunQueue(stopCtx, b.organization, options) - if err != nil { - return r, generalError("Failed to retrieve queue", err) - } - - // Search through all queued items to find our run. - for _, item := range rq.Items { - if r.ID == item.ID { - position = item.PositionInQueue - break search - } - } - - // Exit the loop when we've seen all pages. - if rq.CurrentPage >= rq.TotalPages { - break - } - - // Update the page number to get the next page. - options.PageNumber = rq.NextPage - } - - if position > 0 { - c, err := b.client.Organizations.ReadCapacity(stopCtx, b.organization) - if err != nil { - return r, generalError("Failed to retrieve capacity", err) - } - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "Waiting for %d queued run(s) to finish before starting...%s", - position-c.Running, - elapsed, - ))) - continue - } - - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "Waiting for the %s to start...%s", opType, elapsed))) - } - } -} - -// hasExplicitVariableValues is a best-effort check to determine whether the -// user has provided -var or -var-file arguments to a remote operation. -// -// The results may be inaccurate if the configuration is invalid or if -// individual variable values are invalid. That's okay because we only use this -// result to hint the user to set variables a different way. It's always the -// remote system's responsibility to do final validation of the input. -func (b *Remote) hasExplicitVariableValues(op *backend.Operation) bool { - // Load the configuration using the caller-provided configuration loader. - config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir) - if configDiags.HasErrors() { - // If we can't load the configuration then we'll assume no explicit - // variable values just to let the remote operation start and let - // the remote system return the same set of configuration errors. - return false - } - - // We're intentionally ignoring the diagnostics here because validation - // of the variable values is the responsibilty of the remote system. Our - // goal here is just to make a best effort count of how many variable - // values are coming from -var or -var-file CLI arguments so that we can - // hint the user that those are not supported for remote operations. - variables, _ := backend.ParseVariableValues(op.Variables, config.Module.Variables) - - // Check for explicitly-defined (-var and -var-file) variables, which the - // remote backend does not support. All other source types are okay, - // because they are implicit from the execution context anyway and so - // their final values will come from the _remote_ execution context. - for _, v := range variables { - switch v.SourceType { - case terraform.ValueFromCLIArg, terraform.ValueFromNamedFile: - return true - } - } - - return false -} - -func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { - if r.CostEstimate == nil { - return nil - } - - msgPrefix := "Cost estimation" - started := time.Now() - updated := started - for i := 0; ; i++ { - select { - case <-stopCtx.Done(): - return stopCtx.Err() - case <-cancelCtx.Done(): - return cancelCtx.Err() - case <-time.After(backoff(backoffMin, backoffMax, i)): - } - - // Retrieve the cost estimate to get its current status. - ce, err := b.client.CostEstimates.Read(stopCtx, r.CostEstimate.ID) - if err != nil { - return generalError("Failed to retrieve cost estimate", err) - } - - // If the run is canceled or errored, but the cost-estimate still has - // no result, there is nothing further to render. - if ce.Status != tfe.CostEstimateFinished { - if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { - return nil - } - } - - // checking if i == 0 so as to avoid printing this starting horizontal-rule - // every retry, and that it only prints it on the first (i=0) attempt. - if b.CLI != nil && i == 0 { - b.CLI.Output("\n------------------------------------------------------------------------\n") - } - - switch ce.Status { - case tfe.CostEstimateFinished: - delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64) - if err != nil { - return generalError("Unexpected error", err) - } - - sign := "+" - if delta < 0 { - sign = "-" - } - - deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1) - - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) - b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount))) - b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr))) - - if len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply { - b.CLI.Output("\n------------------------------------------------------------------------") - } - } - - return nil - case tfe.CostEstimatePending, tfe.CostEstimateQueued: - // Check if 30 seconds have passed since the last update. - current := time.Now() - if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { - updated = current - elapsed := "" - - // Calculate and set the elapsed time. - if i > 0 { - elapsed = fmt.Sprintf( - " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) - } - b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) - b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n")) - } - continue - case tfe.CostEstimateSkippedDueToTargeting: - b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) - b.CLI.Output("Not available for this plan, because it was created with the -target option.") - b.CLI.Output("\n------------------------------------------------------------------------") - return nil - case tfe.CostEstimateErrored: - b.CLI.Output(msgPrefix + " errored.\n") - b.CLI.Output("\n------------------------------------------------------------------------") - return nil - case tfe.CostEstimateCanceled: - return fmt.Errorf(msgPrefix + " canceled.") - default: - return fmt.Errorf("Unknown or unexpected cost estimate state: %s", ce.Status) - } - } -} - -func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { - if b.CLI != nil { - b.CLI.Output("\n------------------------------------------------------------------------\n") - } - for i, pc := range r.PolicyChecks { - // Read the policy check logs. This is a blocking call that will only - // return once the policy check is complete. - logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID) - if err != nil { - return generalError("Failed to retrieve policy check logs", err) - } - reader := bufio.NewReaderSize(logs, 64*1024) - - // Retrieve the policy check to get its current status. - pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID) - if err != nil { - return generalError("Failed to retrieve policy check", err) - } - - // If the run is canceled or errored, but the policy check still has - // no result, there is nothing further to render. - if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { - switch pc.Status { - case tfe.PolicyPending, tfe.PolicyQueued, tfe.PolicyUnreachable: - continue - } - } - - var msgPrefix string - switch pc.Scope { - case tfe.PolicyScopeOrganization: - msgPrefix = "Organization policy check" - case tfe.PolicyScopeWorkspace: - msgPrefix = "Workspace policy check" - default: - msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope) - } - - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) - } - - if b.CLI != nil { - for next := true; next; { - var l, line []byte - - for isPrefix := true; isPrefix; { - l, isPrefix, err = reader.ReadLine() - if err != nil { - if err != io.EOF { - return generalError("Failed to read logs", err) - } - next = false - } - line = append(line, l...) - } - - if next || len(line) > 0 { - b.CLI.Output(b.Colorize().Color(string(line))) - } - } - } - - switch pc.Status { - case tfe.PolicyPasses: - if (r.HasChanges && op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil { - b.CLI.Output("\n------------------------------------------------------------------------") - } - continue - case tfe.PolicyErrored: - return fmt.Errorf(msgPrefix + " errored.") - case tfe.PolicyHardFailed: - return fmt.Errorf(msgPrefix + " hard failed.") - case tfe.PolicySoftFailed: - runUrl := fmt.Sprintf(runHeader, b.hostname, b.organization, op.Workspace, r.ID) - - if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil || - !pc.Actions.IsOverridable || !pc.Permissions.CanOverride { - return fmt.Errorf(msgPrefix + " soft failed.\n" + runUrl) - } - - if op.AutoApprove { - if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { - return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) - } - } else { - opts := &terraform.InputOpts{ - Id: "override", - Query: "\nDo you want to override the soft failed policy check?", - Description: "Only 'override' will be accepted to override.", - } - err = b.confirm(stopCtx, op, opts, r, "override") - if err != nil && err != errRunOverridden { - return fmt.Errorf( - fmt.Sprintf("Failed to override: %s\n%s\n", err.Error(), runUrl), - ) - } - - if err != errRunOverridden { - if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { - return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) - } - } else { - b.CLI.Output(fmt.Sprintf("The run needs to be manually overridden or discarded.\n%s\n", runUrl)) - } - } - - if b.CLI != nil { - b.CLI.Output("------------------------------------------------------------------------") - } - default: - return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status) - } - } - - return nil -} - -func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error { - doneCtx, cancel := context.WithCancel(stopCtx) - result := make(chan error, 2) - - go func() { - defer logging.PanicHandler() - - // Make sure we cancel doneCtx before we return - // so the input command is also canceled. - defer cancel() - - for { - select { - case <-doneCtx.Done(): - return - case <-stopCtx.Done(): - return - case <-time.After(runPollInterval): - // Retrieve the run again to get its current status. - r, err := b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - result <- generalError("Failed to retrieve run", err) - return - } - - switch keyword { - case "override": - if r.Status != tfe.RunPolicyOverride { - if r.Status == tfe.RunDiscarded { - err = errRunDiscarded - } else { - err = errRunOverridden - } - } - case "yes": - if !r.Actions.IsConfirmable { - if r.Status == tfe.RunDiscarded { - err = errRunDiscarded - } else { - err = errRunApproved - } - } - } - - if err != nil { - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color( - fmt.Sprintf("[reset][yellow]%s[reset]", err.Error()))) - } - - if err == errRunDiscarded { - err = errApplyDiscarded - if op.PlanMode == plans.DestroyMode { - err = errDestroyDiscarded - } - } - - result <- err - return - } - } - } - }() - - result <- func() error { - v, err := op.UIIn.Input(doneCtx, opts) - if err != nil && err != context.Canceled && stopCtx.Err() != context.Canceled { - return fmt.Errorf("Error asking %s: %v", opts.Id, err) - } - - // We return the error of our parent channel as we don't - // care about the error of the doneCtx which is only used - // within this function. So if the doneCtx was canceled - // because stopCtx was canceled, this will properly return - // a context.Canceled error and otherwise it returns nil. - if doneCtx.Err() == context.Canceled || stopCtx.Err() == context.Canceled { - return stopCtx.Err() - } - - // Make sure we cancel the context here so the loop that - // checks for external changes to the run is ended before - // we start to make changes ourselves. - cancel() - - if v != keyword { - // Retrieve the run again to get its current status. - r, err = b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - return generalError("Failed to retrieve run", err) - } - - // Make sure we discard the run if possible. - if r.Actions.IsDiscardable { - err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) - if err != nil { - if op.PlanMode == plans.DestroyMode { - return generalError("Failed to discard destroy", err) - } - return generalError("Failed to discard apply", err) - } - } - - // Even if the run was discarded successfully, we still - // return an error as the apply command was canceled. - if op.PlanMode == plans.DestroyMode { - return errDestroyDiscarded - } - return errApplyDiscarded - } - - return nil - }() - - return <-result -} diff --git a/internal/backend/remote/backend_context.go b/internal/backend/remote/backend_context.go deleted file mode 100644 index 372d1cf3d0bb..000000000000 --- a/internal/backend/remote/backend_context.go +++ /dev/null @@ -1,295 +0,0 @@ -package remote - -import ( - "context" - "fmt" - "log" - "strings" - - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Context implements backend.Local. -func (b *Remote) LocalRun(op *backend.Operation) (*backend.LocalRun, statemgr.Full, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := &backend.LocalRun{ - PlanOpts: &terraform.PlanOpts{ - Mode: op.PlanMode, - Targets: op.Targets, - }, - } - - op.StateLocker = op.StateLocker.WithContext(context.Background()) - - // Get the remote workspace name. - remoteWorkspaceName := b.getRemoteWorkspaceName(op.Workspace) - - // Get the latest state. - log.Printf("[TRACE] backend/remote: requesting state manager for workspace %q", remoteWorkspaceName) - stateMgr, err := b.StateMgr(op.Workspace) - if err != nil { - diags = diags.Append(fmt.Errorf("error loading state: %w", err)) - return nil, nil, diags - } - - log.Printf("[TRACE] backend/remote: requesting state lock for workspace %q", remoteWorkspaceName) - if diags := op.StateLocker.Lock(stateMgr, op.Type.String()); diags.HasErrors() { - return nil, nil, diags - } - - defer func() { - // If we're returning with errors, and thus not producing a valid - // context, we'll want to avoid leaving the remote workspace locked. - if diags.HasErrors() { - diags = diags.Append(op.StateLocker.Unlock()) - } - }() - - log.Printf("[TRACE] backend/remote: reading remote state for workspace %q", remoteWorkspaceName) - if err := stateMgr.RefreshState(); err != nil { - diags = diags.Append(fmt.Errorf("error loading state: %w", err)) - return nil, nil, diags - } - - // Initialize our context options - var opts terraform.ContextOpts - if v := b.ContextOpts; v != nil { - opts = *v - } - - // Copy set options from the operation - opts.UIInput = op.UIIn - - // Load the latest state. If we enter contextFromPlanFile below then the - // state snapshot in the plan file must match this, or else it'll return - // error diagnostics. - log.Printf("[TRACE] backend/remote: retrieving remote state snapshot for workspace %q", remoteWorkspaceName) - ret.InputState = stateMgr.State() - - log.Printf("[TRACE] backend/remote: loading configuration for the current working directory") - config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, nil, diags - } - ret.Config = config - - if op.AllowUnsetVariables { - // If we're not going to use the variables in an operation we'll be - // more lax about them, stubbing out any unset ones as unknown. - // This gives us enough information to produce a consistent context, - // but not enough information to run a real operation (plan, apply, etc) - ret.PlanOpts.SetVariables = stubAllVariables(op.Variables, config.Module.Variables) - } else { - // The underlying API expects us to use the opaque workspace id to request - // variables, so we'll need to look that up using our organization name - // and workspace name. - remoteWorkspaceID, err := b.getRemoteWorkspaceID(context.Background(), op.Workspace) - if err != nil { - diags = diags.Append(fmt.Errorf("error finding remote workspace: %w", err)) - return nil, nil, diags - } - - w, err := b.fetchWorkspace(context.Background(), b.organization, op.Workspace) - if err != nil { - diags = diags.Append(fmt.Errorf("error loading workspace: %w", err)) - return nil, nil, diags - } - - if isLocalExecutionMode(w.ExecutionMode) { - log.Printf("[TRACE] skipping retrieving variables from workspace %s/%s (%s), workspace is in Local Execution mode", remoteWorkspaceName, b.organization, remoteWorkspaceID) - } else { - log.Printf("[TRACE] backend/remote: retrieving variables from workspace %s/%s (%s)", remoteWorkspaceName, b.organization, remoteWorkspaceID) - tfeVariables, err := b.client.Variables.List(context.Background(), remoteWorkspaceID, nil) - if err != nil && err != tfe.ErrResourceNotFound { - diags = diags.Append(fmt.Errorf("error loading variables: %w", err)) - return nil, nil, diags - } - if tfeVariables != nil { - if op.Variables == nil { - op.Variables = make(map[string]backend.UnparsedVariableValue) - } - for _, v := range tfeVariables.Items { - if v.Category == tfe.CategoryTerraform { - if _, ok := op.Variables[v.Key]; !ok { - op.Variables[v.Key] = &remoteStoredVariableValue{ - definition: v, - } - } - } - } - } - } - - if op.Variables != nil { - variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) - diags = diags.Append(varDiags) - if diags.HasErrors() { - return nil, nil, diags - } - ret.PlanOpts.SetVariables = variables - } - } - - tfCtx, ctxDiags := terraform.NewContext(&opts) - diags = diags.Append(ctxDiags) - ret.Core = tfCtx - - log.Printf("[TRACE] backend/remote: finished building terraform.Context") - - return ret, stateMgr, diags -} - -func (b *Remote) getRemoteWorkspaceName(localWorkspaceName string) string { - switch { - case localWorkspaceName == backend.DefaultStateName: - // The default workspace name is a special case, for when the backend - // is configured to with to an exact remote workspace rather than with - // a remote workspace _prefix_. - return b.workspace - case b.prefix != "" && !strings.HasPrefix(localWorkspaceName, b.prefix): - return b.prefix + localWorkspaceName - default: - return localWorkspaceName - } -} - -func (b *Remote) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) { - remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName) - - log.Printf("[TRACE] backend/remote: looking up workspace for %s/%s", b.organization, remoteWorkspaceName) - remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) - if err != nil { - return nil, err - } - - return remoteWorkspace, nil -} - -func (b *Remote) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) { - remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName) - if err != nil { - return "", err - } - - return remoteWorkspace.ID, nil -} - -func stubAllVariables(vv map[string]backend.UnparsedVariableValue, decls map[string]*configs.Variable) terraform.InputValues { - ret := make(terraform.InputValues, len(decls)) - - for name, cfg := range decls { - raw, exists := vv[name] - if !exists { - ret[name] = &terraform.InputValue{ - Value: cty.UnknownVal(cfg.Type), - SourceType: terraform.ValueFromConfig, - } - continue - } - - val, diags := raw.ParseVariableValue(cfg.ParsingMode) - if diags.HasErrors() { - ret[name] = &terraform.InputValue{ - Value: cty.UnknownVal(cfg.Type), - SourceType: terraform.ValueFromConfig, - } - continue - } - ret[name] = val - } - - return ret -} - -// remoteStoredVariableValue is a backend.UnparsedVariableValue implementation -// that translates from the go-tfe representation of stored variables into -// the Terraform Core backend representation of variables. -type remoteStoredVariableValue struct { - definition *tfe.Variable -} - -var _ backend.UnparsedVariableValue = (*remoteStoredVariableValue)(nil) - -func (v *remoteStoredVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var val cty.Value - - switch { - case v.definition.Sensitive: - // If it's marked as sensitive then it's not available for use in - // local operations. We'll use an unknown value as a placeholder for - // it so that operations that don't need it might still work, but - // we'll also produce a warning about it to add context for any - // errors that might result here. - val = cty.DynamicVal - if !v.definition.HCL { - // If it's not marked as HCL then we at least know that the - // value must be a string, so we'll set that in case it allows - // us to do some more precise type checking. - val = cty.UnknownVal(cty.String) - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - fmt.Sprintf("Value for var.%s unavailable", v.definition.Key), - fmt.Sprintf("The value of variable %q is marked as sensitive in the remote workspace. This operation always runs locally, so the value for that variable is not available.", v.definition.Key), - )) - - case v.definition.HCL: - // If the variable value is marked as being in HCL syntax, we need to - // parse it the same way as it would be interpreted in a .tfvars - // file because that is how it would get passed to Terraform CLI for - // a remote operation and we want to mimic that result as closely as - // possible. - var exprDiags hcl.Diagnostics - expr, exprDiags := hclsyntax.ParseExpression([]byte(v.definition.Value), "", hcl.Pos{Line: 1, Column: 1}) - if expr != nil { - var moreDiags hcl.Diagnostics - val, moreDiags = expr.Value(nil) - exprDiags = append(exprDiags, moreDiags...) - } else { - // We'll have already put some errors in exprDiags above, so we'll - // just stub out the value here. - val = cty.DynamicVal - } - - // We don't have sufficient context to return decent error messages - // for syntax errors in the remote values, so we'll just return a - // generic message instead for now. - // (More complete error messages will still result from true remote - // operations, because they'll run on the remote system where we've - // materialized the values into a tfvars file we can report from.) - if exprDiags.HasErrors() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("Invalid expression for var.%s", v.definition.Key), - fmt.Sprintf("The value of variable %q is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.", v.definition.Key), - )) - } - - default: - // A variable value _not_ marked as HCL is always be a string, given - // literally. - val = cty.StringVal(v.definition.Value) - } - - return &terraform.InputValue{ - Value: val, - - // We mark these as "from input" with the rationale that entering - // variable values into the Terraform Cloud or Enterprise UI is, - // roughly speaking, a similar idea to entering variable values at - // the interactive CLI prompts. It's not a perfect correspondance, - // but it's closer than the other options. - SourceType: terraform.ValueFromInput, - }, diags -} diff --git a/internal/backend/remote/backend_context_test.go b/internal/backend/remote/backend_context_test.go deleted file mode 100644 index f3a133421aed..000000000000 --- a/internal/backend/remote/backend_context_test.go +++ /dev/null @@ -1,469 +0,0 @@ -package remote - -import ( - "context" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - "reflect" - "testing" - - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/zclconf/go-cty/cty" -) - -func TestRemoteStoredVariableValue(t *testing.T) { - tests := map[string]struct { - Def *tfe.Variable - Want cty.Value - WantError string - }{ - "string literal": { - &tfe.Variable{ - Key: "test", - Value: "foo", - HCL: false, - Sensitive: false, - }, - cty.StringVal("foo"), - ``, - }, - "string HCL": { - &tfe.Variable{ - Key: "test", - Value: `"foo"`, - HCL: true, - Sensitive: false, - }, - cty.StringVal("foo"), - ``, - }, - "list HCL": { - &tfe.Variable{ - Key: "test", - Value: `[]`, - HCL: true, - Sensitive: false, - }, - cty.EmptyTupleVal, - ``, - }, - "null HCL": { - &tfe.Variable{ - Key: "test", - Value: `null`, - HCL: true, - Sensitive: false, - }, - cty.NullVal(cty.DynamicPseudoType), - ``, - }, - "literal sensitive": { - &tfe.Variable{ - Key: "test", - HCL: false, - Sensitive: true, - }, - cty.UnknownVal(cty.String), - ``, - }, - "HCL sensitive": { - &tfe.Variable{ - Key: "test", - HCL: true, - Sensitive: true, - }, - cty.DynamicVal, - ``, - }, - "HCL computation": { - // This (stored expressions containing computation) is not a case - // we intentionally supported, but it became possible for remote - // operations in Terraform 0.12 (due to Terraform Cloud/Enterprise - // just writing the HCL verbatim into generated `.tfvars` files). - // We support it here for consistency, and we continue to support - // it in both places for backward-compatibility. In practice, - // there's little reason to do computation in a stored variable - // value because references are not supported. - &tfe.Variable{ - Key: "test", - Value: `[for v in ["a"] : v]`, - HCL: true, - Sensitive: false, - }, - cty.TupleVal([]cty.Value{cty.StringVal("a")}), - ``, - }, - "HCL syntax error": { - &tfe.Variable{ - Key: "test", - Value: `[`, - HCL: true, - Sensitive: false, - }, - cty.DynamicVal, - `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, - }, - "HCL with references": { - &tfe.Variable{ - Key: "test", - Value: `foo.bar`, - HCL: true, - Sensitive: false, - }, - cty.DynamicVal, - `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - v := &remoteStoredVariableValue{ - definition: test.Def, - } - // This ParseVariableValue implementation ignores the parsing mode, - // so we'll just always parse literal here. (The parsing mode is - // selected by the remote server, not by our local configuration.) - gotIV, diags := v.ParseVariableValue(configs.VariableParseLiteral) - if test.WantError != "" { - if !diags.HasErrors() { - t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) - } - errStr := diags.Err().Error() - if errStr != test.WantError { - t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) - } - } else { - if diags.HasErrors() { - t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) - } - got := gotIV.Value - if !test.Want.RawEquals(got) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - } - }) - } -} - -func TestRemoteContextWithVars(t *testing.T) { - catTerraform := tfe.CategoryTerraform - catEnv := tfe.CategoryEnv - - tests := map[string]struct { - Opts *tfe.VariableCreateOptions - WantError string - }{ - "Terraform variable": { - &tfe.VariableCreateOptions{ - Category: &catTerraform, - }, - `Value for undeclared variable: A variable named "key" was assigned a value, but the root module does not declare a variable of that name. To use this value, add a "variable" block to the configuration.`, - }, - "environment variable": { - &tfe.VariableCreateOptions{ - Category: &catEnv, - }, - ``, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - configDir := "./testdata/empty" - - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - defer configCleanup() - - workspaceID, err := b.getRemoteWorkspaceID(context.Background(), backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - streams, _ := terminal.StreamsForTesting(t) - view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) - - op := &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - StateLocker: clistate.NewLocker(0, view), - Workspace: backend.DefaultStateName, - } - - v := test.Opts - if v.Key == nil { - key := "key" - v.Key = &key - } - b.client.Variables.Create(context.TODO(), workspaceID, *v) - - _, _, diags := b.LocalRun(op) - - if test.WantError != "" { - if !diags.HasErrors() { - t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) - } - errStr := diags.Err().Error() - if errStr != test.WantError { - t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) - } - // When Context() returns an error, it should unlock the state, - // so re-locking it is expected to succeed. - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state: %s", err.Error()) - } - } else { - if diags.HasErrors() { - t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) - } - // When Context() succeeds, this should fail w/ "workspace already locked" - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { - t.Fatal("unexpected success locking state after Context") - } - } - }) - } -} - -func TestRemoteVariablesDoNotOverride(t *testing.T) { - catTerraform := tfe.CategoryTerraform - - varName1 := "key1" - varName2 := "key2" - varName3 := "key3" - - varValue1 := "value1" - varValue2 := "value2" - varValue3 := "value3" - - tests := map[string]struct { - localVariables map[string]backend.UnparsedVariableValue - remoteVariables []*tfe.VariableCreateOptions - expectedVariables terraform.InputValues - }{ - "no local variables": { - map[string]backend.UnparsedVariableValue{}, - []*tfe.VariableCreateOptions{ - { - Key: &varName1, - Value: &varValue1, - Category: &catTerraform, - }, - { - Key: &varName2, - Value: &varValue2, - Category: &catTerraform, - }, - { - Key: &varName3, - Value: &varValue3, - Category: &catTerraform, - }, - }, - terraform.InputValues{ - varName1: &terraform.InputValue{ - Value: cty.StringVal(varValue1), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName2: &terraform.InputValue{ - Value: cty.StringVal(varValue2), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName3: &terraform.InputValue{ - Value: cty.StringVal(varValue3), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - }, - }, - "single conflicting local variable": { - map[string]backend.UnparsedVariableValue{ - varName3: testUnparsedVariableValue(varValue3), - }, - []*tfe.VariableCreateOptions{ - { - Key: &varName1, - Value: &varValue1, - Category: &catTerraform, - }, { - Key: &varName2, - Value: &varValue2, - Category: &catTerraform, - }, { - Key: &varName3, - Value: &varValue3, - Category: &catTerraform, - }, - }, - terraform.InputValues{ - varName1: &terraform.InputValue{ - Value: cty.StringVal(varValue1), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName2: &terraform.InputValue{ - Value: cty.StringVal(varValue2), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName3: &terraform.InputValue{ - Value: cty.StringVal(varValue3), - SourceType: terraform.ValueFromNamedFile, - SourceRange: tfdiags.SourceRange{ - Filename: "fake.tfvars", - Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - }, - }, - }, - }, - "no conflicting local variable": { - map[string]backend.UnparsedVariableValue{ - varName3: testUnparsedVariableValue(varValue3), - }, - []*tfe.VariableCreateOptions{ - { - Key: &varName1, - Value: &varValue1, - Category: &catTerraform, - }, { - Key: &varName2, - Value: &varValue2, - Category: &catTerraform, - }, - }, - terraform.InputValues{ - varName1: &terraform.InputValue{ - Value: cty.StringVal(varValue1), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName2: &terraform.InputValue{ - Value: cty.StringVal(varValue2), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName3: &terraform.InputValue{ - Value: cty.StringVal(varValue3), - SourceType: terraform.ValueFromNamedFile, - SourceRange: tfdiags.SourceRange{ - Filename: "fake.tfvars", - Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - }, - }, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - configDir := "./testdata/variables" - - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - defer configCleanup() - - workspaceID, err := b.getRemoteWorkspaceID(context.Background(), backend.DefaultStateName) - if err != nil { - t.Fatal(err) - } - - streams, _ := terminal.StreamsForTesting(t) - view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) - - op := &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - StateLocker: clistate.NewLocker(0, view), - Workspace: backend.DefaultStateName, - Variables: test.localVariables, - } - - for _, v := range test.remoteVariables { - b.client.Variables.Create(context.TODO(), workspaceID, *v) - } - - lr, _, diags := b.LocalRun(op) - - if diags.HasErrors() { - t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) - } - // When Context() succeeds, this should fail w/ "workspace already locked" - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { - t.Fatal("unexpected success locking state after Context") - } - - actual := lr.PlanOpts.SetVariables - expected := test.expectedVariables - - for expectedKey := range expected { - actualValue := actual[expectedKey] - expectedValue := expected[expectedKey] - - if !reflect.DeepEqual(*actualValue, *expectedValue) { - t.Fatalf("unexpected variable '%s'\ngot: %v\nwant: %v", expectedKey, actualValue, expectedValue) - } - } - }) - } -} - -type testUnparsedVariableValue string - -func (v testUnparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { - return &terraform.InputValue{ - Value: cty.StringVal(string(v)), - SourceType: terraform.ValueFromNamedFile, - SourceRange: tfdiags.SourceRange{ - Filename: "fake.tfvars", - Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - }, - }, nil -} diff --git a/internal/backend/remote/backend_plan.go b/internal/backend/remote/backend_plan.go deleted file mode 100644 index ca74d18b6487..000000000000 --- a/internal/backend/remote/backend_plan.go +++ /dev/null @@ -1,442 +0,0 @@ -package remote - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - "syscall" - "time" - - tfe "github.com/hashicorp/go-tfe" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -var planConfigurationVersionsPollInterval = 500 * time.Millisecond - -func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { - log.Printf("[INFO] backend/remote: starting Plan operation") - - var diags tfdiags.Diagnostics - - if !w.Permissions.CanQueueRun { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Insufficient rights to generate a plan", - "The provided credentials have insufficient rights to generate a plan. In order "+ - "to generate plans, at least plan permissions on the workspace are required.", - )) - return nil, diags.Err() - } - - if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Custom parallelism values are currently not supported", - `The "remote" backend does not support setting a custom parallelism `+ - `value at this time.`, - )) - } - - if op.PlanFile != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Displaying a saved plan is currently not supported", - `The "remote" backend currently requires configuration to be present and `+ - `does not accept an existing saved plan as an argument at this time.`, - )) - } - - if op.PlanOutPath != "" { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Saving a generated plan is currently not supported", - `The "remote" backend does not support saving the generated execution `+ - `plan locally at this time.`, - )) - } - - if b.hasExplicitVariableValues(op) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Run variables are currently not supported", - fmt.Sprintf( - "The \"remote\" backend does not support setting run variables at this time. "+ - "Currently the only to way to pass variables to the remote backend is by "+ - "creating a '*.auto.tfvars' variables file. This file will automatically "+ - "be loaded by the \"remote\" backend when the workspace is configured to use "+ - "Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+ - "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", - b.hostname, b.organization, op.Workspace, - ), - )) - } - - if !op.HasConfig() && op.PlanMode != plans.DestroyMode { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "No configuration files found", - `Plan requires configuration to be present. Planning without a configuration `+ - `would mark everything for destruction, which is normally not what is desired. `+ - `If you would like to destroy everything, please run plan with the "-destroy" `+ - `flag or create a single empty configuration file. Otherwise, please create `+ - `a Terraform configuration file in the path being executed and try again.`, - )) - } - - // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, - // so if there's an error when parsing the RemoteAPIVersion, it's handled as - // equivalent to an API version < 2.3. - currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) - - if len(op.Targets) != 0 { - desiredAPIVersion, _ := version.NewVersion("2.3") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource targeting is not supported", - fmt.Sprintf( - `The host %s does not support the -target option for `+ - `remote plans.`, - b.hostname, - ), - )) - } - } - - if !op.PlanRefresh { - desiredAPIVersion, _ := version.NewVersion("2.4") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Planning without refresh is not supported", - fmt.Sprintf( - `The host %s does not support the -refresh=false option for `+ - `remote plans.`, - b.hostname, - ), - )) - } - } - - if len(op.ForceReplace) != 0 { - desiredAPIVersion, _ := version.NewVersion("2.4") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Planning resource replacements is not supported", - fmt.Sprintf( - `The host %s does not support the -replace option for `+ - `remote plans.`, - b.hostname, - ), - )) - } - } - - if op.PlanMode == plans.RefreshOnlyMode { - desiredAPIVersion, _ := version.NewVersion("2.4") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Refresh-only mode is not supported", - fmt.Sprintf( - `The host %s does not support -refresh-only mode for `+ - `remote plans.`, - b.hostname, - ), - )) - } - } - - // Return if there are any errors. - if diags.HasErrors() { - return nil, diags.Err() - } - - return b.plan(stopCtx, cancelCtx, op, w) -} - -func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { - if b.CLI != nil { - header := planDefaultHeader - if op.Type == backend.OperationTypeApply { - header = applyDefaultHeader - } - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(header) + "\n")) - } - - configOptions := tfe.ConfigurationVersionCreateOptions{ - AutoQueueRuns: tfe.Bool(false), - Speculative: tfe.Bool(op.Type == backend.OperationTypePlan), - } - - cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions) - if err != nil { - return nil, generalError("Failed to create configuration version", err) - } - - var configDir string - if op.ConfigDir != "" { - // De-normalize the configuration directory path. - configDir, err = filepath.Abs(op.ConfigDir) - if err != nil { - return nil, generalError( - "Failed to get absolute path of the configuration directory: %v", err) - } - - // Make sure to take the working directory into account by removing - // the working directory from the current path. This will result in - // a path that points to the expected root of the workspace. - configDir = filepath.Clean(strings.TrimSuffix( - filepath.Clean(configDir), - filepath.Clean(w.WorkingDirectory), - )) - - // If the workspace has a subdirectory as its working directory then - // our configDir will be some parent directory of the current working - // directory. Users are likely to find that surprising, so we'll - // produce an explicit message about it to be transparent about what - // we are doing and why. - if w.WorkingDirectory != "" && filepath.Base(configDir) != w.WorkingDirectory { - if b.CLI != nil { - b.CLI.Output(fmt.Sprintf(strings.TrimSpace(` -The remote workspace is configured to work with configuration at -%s relative to the target repository. - -Terraform will upload the contents of the following directory, -excluding files or directories as defined by a .terraformignore file -at %s/.terraformignore (if it is present), -in order to capture the filesystem context the remote workspace expects: - %s -`), w.WorkingDirectory, configDir, configDir) + "\n") - } - } - - } else { - // We did a check earlier to make sure we either have a config dir, - // or the plan is run with -destroy. So this else clause will only - // be executed when we are destroying and doesn't need the config. - configDir, err = ioutil.TempDir("", "tf") - if err != nil { - return nil, generalError("Failed to create temporary directory", err) - } - defer os.RemoveAll(configDir) - - // Make sure the configured working directory exists. - err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700) - if err != nil { - return nil, generalError( - "Failed to create temporary working directory", err) - } - } - - err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir) - if err != nil { - return nil, generalError("Failed to upload configuration files", err) - } - - uploaded := false - for i := 0; i < 60 && !uploaded; i++ { - select { - case <-stopCtx.Done(): - return nil, context.Canceled - case <-cancelCtx.Done(): - return nil, context.Canceled - case <-time.After(planConfigurationVersionsPollInterval): - cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) - if err != nil { - return nil, generalError("Failed to retrieve configuration version", err) - } - - if cv.Status == tfe.ConfigurationUploaded { - uploaded = true - } - } - } - - if !uploaded { - return nil, generalError( - "Failed to upload configuration files", errors.New("operation timed out")) - } - - runOptions := tfe.RunCreateOptions{ - ConfigurationVersion: cv, - Refresh: tfe.Bool(op.PlanRefresh), - Workspace: w, - } - - switch op.PlanMode { - case plans.NormalMode: - // okay, but we don't need to do anything special for this - case plans.RefreshOnlyMode: - runOptions.RefreshOnly = tfe.Bool(true) - case plans.DestroyMode: - runOptions.IsDestroy = tfe.Bool(true) - default: - // Shouldn't get here because we should update this for each new - // plan mode we add, mapping it to the corresponding RunCreateOptions - // field. - return nil, generalError( - "Invalid plan mode", - fmt.Errorf("remote backend doesn't support %s", op.PlanMode), - ) - } - - if len(op.Targets) != 0 { - runOptions.TargetAddrs = make([]string, 0, len(op.Targets)) - for _, addr := range op.Targets { - runOptions.TargetAddrs = append(runOptions.TargetAddrs, addr.String()) - } - } - - if len(op.ForceReplace) != 0 { - runOptions.ReplaceAddrs = make([]string, 0, len(op.ForceReplace)) - for _, addr := range op.ForceReplace { - runOptions.ReplaceAddrs = append(runOptions.ReplaceAddrs, addr.String()) - } - } - - r, err := b.client.Runs.Create(stopCtx, runOptions) - if err != nil { - return r, generalError("Failed to create run", err) - } - - // When the lock timeout is set, if the run is still pending and - // cancellable after that period, we attempt to cancel it. - if lockTimeout := op.StateLocker.Timeout(); lockTimeout > 0 { - go func() { - defer logging.PanicHandler() - - select { - case <-stopCtx.Done(): - return - case <-cancelCtx.Done(): - return - case <-time.After(lockTimeout): - // Retrieve the run to get its current status. - r, err := b.client.Runs.Read(cancelCtx, r.ID) - if err != nil { - log.Printf("[ERROR] error reading run: %v", err) - return - } - - if r.Status == tfe.RunPending && r.Actions.IsCancelable { - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr))) - } - - // We abuse the auto aprove flag to indicate that we do not - // want to ask if the remote operation should be canceled. - op.AutoApprove = true - - p, err := os.FindProcess(os.Getpid()) - if err != nil { - log.Printf("[ERROR] error searching process ID: %v", err) - return - } - p.Signal(syscall.SIGINT) - } - } - }() - } - - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( - runHeader, b.hostname, b.organization, op.Workspace, r.ID)) + "\n")) - } - - r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w) - if err != nil { - return r, err - } - - logs, err := b.client.Plans.Logs(stopCtx, r.Plan.ID) - if err != nil { - return r, generalError("Failed to retrieve logs", err) - } - reader := bufio.NewReaderSize(logs, 64*1024) - - if b.CLI != nil { - for next := true; next; { - var l, line []byte - - for isPrefix := true; isPrefix; { - l, isPrefix, err = reader.ReadLine() - if err != nil { - if err != io.EOF { - return r, generalError("Failed to read logs", err) - } - next = false - } - line = append(line, l...) - } - - if next || len(line) > 0 { - b.CLI.Output(b.Colorize().Color(string(line))) - } - } - } - - // Retrieve the run to get its current status. - r, err = b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - return r, generalError("Failed to retrieve run", err) - } - - // If the run is canceled or errored, we still continue to the - // cost-estimation and policy check phases to ensure we render any - // results available. In the case of a hard-failed policy check, the - // status of the run will be "errored", but there is still policy - // information which should be shown. - - // Show any cost estimation output. - if r.CostEstimate != nil { - err = b.costEstimate(stopCtx, cancelCtx, op, r) - if err != nil { - return r, err - } - } - - // Check any configured sentinel policies. - if len(r.PolicyChecks) > 0 { - err = b.checkPolicy(stopCtx, cancelCtx, op, r) - if err != nil { - return r, err - } - } - - return r, nil -} - -const planDefaultHeader = ` -[reset][yellow]Running plan in the remote backend. Output will stream here. Pressing Ctrl-C -will stop streaming the logs, but will not stop the plan running remotely.[reset] - -Preparing the remote plan... -` - -const runHeader = ` -[reset][yellow]To view this run in a browser, visit: -https://%s/app/%s/%s/runs/%s[reset] -` - -// The newline in this error is to make it look good in the CLI! -const lockTimeoutErr = ` -[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation. -[reset] -` diff --git a/internal/backend/remote/backend_plan_test.go b/internal/backend/remote/backend_plan_test.go deleted file mode 100644 index 3acf0796dae6..000000000000 --- a/internal/backend/remote/backend_plan_test.go +++ /dev/null @@ -1,1247 +0,0 @@ -package remote - -import ( - "context" - "os" - "os/signal" - "strings" - "syscall" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/cloud" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/mitchellh/cli" -) - -func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - return testOperationPlanWithTimeout(t, configDir, 0) -} - -func testOperationPlanWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewView(streams) - stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) - operationView := views.NewOperation(arguments.ViewHuman, false, view) - - // Many of our tests use an overridden "null" provider that's just in-memory - // inside the test process, not a separate plugin on disk. - depLocks := depsfile.NewLocks() - depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/null")) - - return &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - PlanRefresh: true, - StateLocker: clistate.NewLocker(timeout, stateLockerView), - Type: backend.OperationTypePlan, - View: operationView, - DependencyLocks: depLocks, - }, configCleanup, done -} - -func TestRemote_planBasic(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } - - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - // An error suggests that the state was not unlocked after the operation finished - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) - } -} - -func TestRemote_planCanceled(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // Stop the run to simulate a Ctrl-C. - run.Stop() - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - - stateMgr, _ := b.StateMgr(backend.DefaultStateName) - // An error suggests that the state was not unlocked after the operation finished - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after cancelled plan: %s", err.Error()) - } -} - -func TestRemote_planLongLine(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-long-line") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planWithoutPermissions(t *testing.T) { - b, bCleanup := testBackendNoDefault(t) - defer bCleanup() - - // Create a named workspace without permissions. - w, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String(b.prefix + "prod"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - w.Permissions.CanQueueRun = false - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - op.Workspace = "prod" - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Insufficient rights to generate a plan") { - t.Fatalf("expected a permissions error, got: %v", errOutput) - } -} - -func TestRemote_planWithParallelism(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - if b.ContextOpts == nil { - b.ContextOpts = &terraform.ContextOpts{} - } - b.ContextOpts.Parallelism = 3 - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "parallelism values are currently not supported") { - t.Fatalf("expected a parallelism error, got: %v", errOutput) - } -} - -func TestRemote_planWithPlan(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - op.PlanFile = &planfile.Reader{} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "saved plan is currently not supported") { - t.Fatalf("expected a saved plan error, got: %v", errOutput) - } -} - -func TestRemote_planWithPath(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - op.PlanOutPath = "./testdata/plan" - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "generated plan is currently not supported") { - t.Fatalf("expected a generated plan error, got: %v", errOutput) - } -} - -func TestRemote_planWithoutRefresh(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.PlanRefresh = false - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - // We should find a run inside the mock client that has refresh set - // to false. - runsAPI := b.client.Runs.(*cloud.MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff(false, run.Refresh); diff != "" { - t.Errorf("wrong Refresh setting in the created run\n%s", diff) - } - } -} - -func TestRemote_planWithoutRefreshIncompatibleAPIVersion(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - b.client.SetFakeRemoteAPIVersion("2.3") - - op.PlanRefresh = false - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Planning without refresh is not supported") { - t.Fatalf("expected not supported error, got: %v", errOutput) - } -} - -func TestRemote_planWithRefreshOnly(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.RefreshOnlyMode - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - // We should find a run inside the mock client that has refresh-only set - // to true. - runsAPI := b.client.Runs.(*cloud.MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { - t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) - } - } -} - -func TestRemote_planWithRefreshOnlyIncompatibleAPIVersion(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - b.client.SetFakeRemoteAPIVersion("2.3") - - op.PlanMode = plans.RefreshOnlyMode - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Refresh-only mode is not supported") { - t.Fatalf("expected not supported error, got: %v", errOutput) - } -} - -func TestRemote_planWithTarget(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - // When the backend code creates a new run, we'll tweak it so that it - // has a cost estimation object with the "skipped_due_to_targeting" status, - // emulating how a real server is expected to behave in that case. - b.client.Runs.(*cloud.MockRuns).ModifyNewRun = func(client *cloud.MockClient, options tfe.RunCreateOptions, run *tfe.Run) { - const fakeID = "fake" - // This is the cost estimate object embedded in the run itself which - // the backend will use to learn the ID to request from the cost - // estimates endpoint. It's pending to simulate what a freshly-created - // run is likely to look like. - run.CostEstimate = &tfe.CostEstimate{ - ID: fakeID, - Status: "pending", - } - // The backend will then use the main cost estimation API to retrieve - // the same ID indicated in the object above, where we'll then return - // the status "skipped_due_to_targeting" to trigger the special skip - // message in the backend output. - client.CostEstimates.Estimations[fakeID] = &tfe.CostEstimate{ - ID: fakeID, - Status: "skipped_due_to_targeting", - } - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") - - op.Targets = []addrs.Targetable{addr} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("expected plan operation to succeed") - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // testBackendDefault above attached a "mock UI" to our backend, so we - // can retrieve its non-error output via the OutputWriter in-memory buffer. - gotOutput := b.CLI.(*cli.MockUi).OutputWriter.String() - if wantOutput := "Not available for this plan, because it was created with the -target option."; !strings.Contains(gotOutput, wantOutput) { - t.Errorf("missing message about skipped cost estimation\ngot:\n%s\nwant substring: %s", gotOutput, wantOutput) - } - - // We should find a run inside the mock client that has the same - // target address we requested above. - runsAPI := b.client.Runs.(*cloud.MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { - t.Errorf("wrong TargetAddrs in the created run\n%s", diff) - } - } -} - -func TestRemote_planWithTargetIncompatibleAPIVersion(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - // Set the tfe client's RemoteAPIVersion to an empty string, to mimic - // API versions prior to 2.3. - b.client.SetFakeRemoteAPIVersion("") - - addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") - - op.Targets = []addrs.Targetable{addr} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Resource targeting is not supported") { - t.Fatalf("expected a targeting error, got: %v", errOutput) - } -} - -func TestRemote_planWithReplace(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") - - op.ForceReplace = []addrs.AbsResourceInstance{addr} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("expected plan operation to succeed") - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has the same - // refresh address we requested above. - runsAPI := b.client.Runs.(*cloud.MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { - t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) - } - } -} - -func TestRemote_planWithReplaceIncompatibleAPIVersion(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - b.client.SetFakeRemoteAPIVersion("2.3") - - addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") - - op.ForceReplace = []addrs.AbsResourceInstance{addr} - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Planning resource replacements is not supported") { - t.Fatalf("expected not supported error, got: %v", errOutput) - } -} - -func TestRemote_planWithVariables(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-variables") - defer configCleanup() - - op.Variables = testVariables(terraform.ValueFromCLIArg, "foo", "bar") - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "variables are currently not supported") { - t.Fatalf("expected a variables error, got: %v", errOutput) - } -} - -func TestRemote_planNoConfig(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/empty") - defer configCleanup() - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "configuration files found") { - t.Fatalf("expected configuration files error, got: %v", errOutput) - } -} - -func TestRemote_planNoChanges(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-no-changes") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { - t.Fatalf("expected no changes in plan summary: %s", output) - } - if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("expected policy check result in output: %s", output) - } -} - -func TestRemote_planForceLocal(t *testing.T) { - // Set TF_FORCE_LOCAL_BACKEND so the remote backend will use - // the local backend with itself as embedded backend. - if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { - t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) - } - defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") - - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("unexpected remote backend header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planWithoutOperationsEntitlement(t *testing.T) { - b, bCleanup := testBackendNoOperations(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("unexpected remote backend header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planWorkspaceWithoutOperations(t *testing.T) { - b, bCleanup := testBackendNoDefault(t) - defer bCleanup() - - ctx := context.Background() - - // Create a named workspace that doesn't allow operations. - _, err := b.client.Workspaces.Create( - ctx, - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String(b.prefix + "no-operations"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = "no-operations" - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("unexpected remote backend header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planLockTimeout(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - ctx := context.Background() - - // Retrieve the workspace used to run this operation in. - w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace) - if err != nil { - t.Fatalf("error retrieving workspace: %v", err) - } - - // Create a new configuration version. - c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) - if err != nil { - t.Fatalf("error creating configuration version: %v", err) - } - - // Create a pending run to block this run. - _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ - ConfigurationVersion: c, - Workspace: w, - }) - if err != nil { - t.Fatalf("error creating pending run: %v", err) - } - - op, configCleanup, done := testOperationPlanWithTimeout(t, "./testdata/plan", 50) - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "cancel": "yes", - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = backend.DefaultStateName - - _, err = b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - sigint := make(chan os.Signal, 1) - signal.Notify(sigint, syscall.SIGINT) - select { - case <-sigint: - // Stop redirecting SIGINT signals. - signal.Stop(sigint) - case <-time.After(200 * time.Millisecond): - t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") - } - - if len(input.answers) != 2 { - t.Fatalf("expected unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "Lock timeout exceeded") { - t.Fatalf("expected lock timout error in output: %s", output) - } - if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("unexpected plan summary in output: %s", output) - } -} - -func TestRemote_planDestroy(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.DestroyMode - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } -} - -func TestRemote_planDestroyNoConfig(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/empty") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.DestroyMode - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } -} - -func TestRemote_planWithWorkingDirectory(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - options := tfe.WorkspaceUpdateOptions{ - WorkingDirectory: tfe.String("terraform"), - } - - // Configure the workspace to use a custom working directory. - _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options) - if err != nil { - t.Fatalf("error configuring working directory: %v", err) - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-working-directory/terraform") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "The remote workspace is configured to work with configuration") { - t.Fatalf("expected working directory warning: %s", output) - } - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planWithWorkingDirectoryFromCurrentPath(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - options := tfe.WorkspaceUpdateOptions{ - WorkingDirectory: tfe.String("terraform"), - } - - // Configure the workspace to use a custom working directory. - _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options) - if err != nil { - t.Fatalf("error configuring working directory: %v", err) - } - - wd, err := os.Getwd() - if err != nil { - t.Fatalf("error getting current working directory: %v", err) - } - - // We need to change into the configuration directory to make sure - // the logic to upload the correct slug is working as expected. - if err := os.Chdir("./testdata/plan-with-working-directory/terraform"); err != nil { - t.Fatalf("error changing directory: %v", err) - } - defer os.Chdir(wd) // Make sure we change back again when were done. - - // For this test we need to give our current directory instead of the - // full path to the configuration as we already changed directories. - op, configCleanup, done := testOperationPlan(t, ".") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planCostEstimation(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cost-estimation") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "Resources: 1 of 1 estimated") { - t.Fatalf("expected cost estimate result in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planPolicyPass(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-passed") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planPolicyHardFail(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-hard-failed") - defer configCleanup() - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - viewOutput := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := viewOutput.Stderr() - if !strings.Contains(errOutput, "hard failed") { - t.Fatalf("expected a policy check error, got: %v", errOutput) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planPolicySoftFail(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-soft-failed") - defer configCleanup() - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - viewOutput := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := viewOutput.Stderr() - if !strings.Contains(errOutput, "soft failed") { - t.Fatalf("expected a policy check error, got: %v", errOutput) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestRemote_planWithRemoteError(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-error") - defer configCleanup() - defer done(t) - - op.Workspace = backend.DefaultStateName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if run.Result.ExitStatus() != 1 { - t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in the remote backend") { - t.Fatalf("expected remote backend header in output: %s", output) - } - if !strings.Contains(output, "null_resource.foo: 1 error") { - t.Fatalf("expected plan error in output: %s", output) - } -} - -func TestRemote_planOtherError(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = "network-error" // custom error response in backend_mock.go - - _, err := b.Operation(context.Background(), op) - if err == nil { - t.Errorf("expected error, got success") - } - - if !strings.Contains(err.Error(), - "the configured \"remote\" backend encountered an unexpected error:\n\nI'm a little teacup") { - t.Fatalf("expected error message, got: %s", err.Error()) - } -} diff --git a/internal/backend/remote/backend_state.go b/internal/backend/remote/backend_state.go deleted file mode 100644 index 54cdd0aadb84..000000000000 --- a/internal/backend/remote/backend_state.go +++ /dev/null @@ -1,195 +0,0 @@ -package remote - -import ( - "bytes" - "context" - "crypto/md5" - "encoding/base64" - "encoding/json" - "fmt" - - tfe "github.com/hashicorp/go-tfe" - - "github.com/hashicorp/terraform/internal/command/jsonstate" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -type remoteClient struct { - client *tfe.Client - lockInfo *statemgr.LockInfo - organization string - runID string - stateUploadErr bool - workspace *tfe.Workspace - forcePush bool -} - -// Get the remote state. -func (r *remoteClient) Get() (*remote.Payload, error) { - ctx := context.Background() - - sv, err := r.client.StateVersions.ReadCurrent(ctx, r.workspace.ID) - if err != nil { - if err == tfe.ErrResourceNotFound { - // If no state exists, then return nil. - return nil, nil - } - return nil, fmt.Errorf("Error retrieving state: %v", err) - } - - state, err := r.client.StateVersions.Download(ctx, sv.DownloadURL) - if err != nil { - return nil, fmt.Errorf("Error downloading state: %v", err) - } - - // If the state is empty, then return nil. - if len(state) == 0 { - return nil, nil - } - - // Get the MD5 checksum of the state. - sum := md5.Sum(state) - - return &remote.Payload{ - Data: state, - MD5: sum[:], - }, nil -} - -// Put the remote state. -func (r *remoteClient) Put(state []byte) error { - ctx := context.Background() - - // Read the raw state into a Terraform state. - stateFile, err := statefile.Read(bytes.NewReader(state)) - if err != nil { - return fmt.Errorf("Error reading state: %s", err) - } - - ov, err := jsonstate.MarshalOutputs(stateFile.State.RootModule().OutputValues) - if err != nil { - return fmt.Errorf("Error reading output values: %s", err) - } - o, err := json.Marshal(ov) - if err != nil { - return fmt.Errorf("Error converting output values to json: %s", err) - } - - options := tfe.StateVersionCreateOptions{ - Lineage: tfe.String(stateFile.Lineage), - Serial: tfe.Int64(int64(stateFile.Serial)), - MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), - State: tfe.String(base64.StdEncoding.EncodeToString(state)), - Force: tfe.Bool(r.forcePush), - JSONStateOutputs: tfe.String(base64.StdEncoding.EncodeToString(o)), - } - - // If we have a run ID, make sure to add it to the options - // so the state will be properly associated with the run. - if r.runID != "" { - options.Run = &tfe.Run{ID: r.runID} - } - - // Create the new state. - _, err = r.client.StateVersions.Create(ctx, r.workspace.ID, options) - if err != nil { - r.stateUploadErr = true - return fmt.Errorf("Error uploading state: %v", err) - } - - return nil -} - -// Delete the remote state. -func (r *remoteClient) Delete() error { - err := r.client.Workspaces.Delete(context.Background(), r.organization, r.workspace.Name) - if err != nil && err != tfe.ErrResourceNotFound { - return fmt.Errorf("Error deleting workspace %s: %v", r.workspace.Name, err) - } - - return nil -} - -// EnableForcePush to allow the remote client to overwrite state -// by implementing remote.ClientForcePusher -func (r *remoteClient) EnableForcePush() { - r.forcePush = true -} - -// Lock the remote state. -func (r *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { - ctx := context.Background() - - lockErr := &statemgr.LockError{Info: r.lockInfo} - - // Lock the workspace. - _, err := r.client.Workspaces.Lock(ctx, r.workspace.ID, tfe.WorkspaceLockOptions{ - Reason: tfe.String("Locked by Terraform"), - }) - if err != nil { - if err == tfe.ErrWorkspaceLocked { - lockErr.Info = info - err = fmt.Errorf("%s (lock ID: \"%s/%s\")", err, r.organization, r.workspace.Name) - } - lockErr.Err = err - return "", lockErr - } - - r.lockInfo = info - - return r.lockInfo.ID, nil -} - -// Unlock the remote state. -func (r *remoteClient) Unlock(id string) error { - ctx := context.Background() - - // We first check if there was an error while uploading the latest - // state. If so, we will not unlock the workspace to prevent any - // changes from being applied until the correct state is uploaded. - if r.stateUploadErr { - return nil - } - - lockErr := &statemgr.LockError{Info: r.lockInfo} - - // With lock info this should be treated as a normal unlock. - if r.lockInfo != nil { - // Verify the expected lock ID. - if r.lockInfo.ID != id { - lockErr.Err = fmt.Errorf("lock ID does not match existing lock") - return lockErr - } - - // Unlock the workspace. - _, err := r.client.Workspaces.Unlock(ctx, r.workspace.ID) - if err != nil { - lockErr.Err = err - return lockErr - } - - return nil - } - - // Verify the optional force-unlock lock ID. - if r.organization+"/"+r.workspace.Name != id { - lockErr.Err = fmt.Errorf( - "lock ID %q does not match existing lock ID \"%s/%s\"", - id, - r.organization, - r.workspace.Name, - ) - return lockErr - } - - // Force unlock the workspace. - _, err := r.client.Workspaces.ForceUnlock(ctx, r.workspace.ID) - if err != nil { - lockErr.Err = err - return lockErr - } - - return nil -} diff --git a/internal/backend/remote/backend_test.go b/internal/backend/remote/backend_test.go deleted file mode 100644 index f0ad73f7c5b9..000000000000 --- a/internal/backend/remote/backend_test.go +++ /dev/null @@ -1,724 +0,0 @@ -package remote - -import ( - "context" - "fmt" - "reflect" - "strings" - "testing" - - tfe "github.com/hashicorp/go-tfe" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - "github.com/zclconf/go-cty/cty" - - backendLocal "github.com/hashicorp/terraform/internal/backend/local" -) - -func TestRemote(t *testing.T) { - var _ backend.Enhanced = New(nil) - var _ backend.CLI = New(nil) -} - -func TestRemote_backendDefault(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - backend.TestBackendStates(t, b) - backend.TestBackendStateLocks(t, b, b) - backend.TestBackendStateForceUnlock(t, b, b) -} - -func TestRemote_backendNoDefault(t *testing.T) { - b, bCleanup := testBackendNoDefault(t) - defer bCleanup() - - backend.TestBackendStates(t, b) -} - -func TestRemote_config(t *testing.T) { - cases := map[string]struct { - config cty.Value - confErr string - valErr string - }{ - "with_a_nonexisting_organization": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("nonexisting"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }), - confErr: "organization \"nonexisting\" at host app.terraform.io not found", - }, - "with_an_unknown_host": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.StringVal("nonexisting.local"), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }), - confErr: "Failed to request discovery document", - }, - // localhost advertises TFE services, but has no token in the credentials - "without_a_token": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.StringVal("localhost"), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }), - confErr: "terraform login localhost", - }, - "with_a_name": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }), - }, - "with_a_prefix": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "prefix": cty.StringVal("my-app-"), - }), - }), - }, - "without_either_a_name_and_a_prefix": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "prefix": cty.NullVal(cty.String), - }), - }), - valErr: `Either workspace "name" or "prefix" is required`, - }, - "with_both_a_name_and_a_prefix": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.StringVal("my-app-"), - }), - }), - valErr: `Only one of workspace "name" or "prefix" is allowed`, - }, - "null config": { - config: cty.NullVal(cty.EmptyObject), - }, - } - - for name, tc := range cases { - s := testServer(t) - b := New(testDisco(s)) - - // Validate - _, valDiags := b.PrepareConfig(tc.config) - if (valDiags.Err() != nil || tc.valErr != "") && - (valDiags.Err() == nil || !strings.Contains(valDiags.Err().Error(), tc.valErr)) { - t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) - } - - // Configure - confDiags := b.Configure(tc.config) - if (confDiags.Err() != nil || tc.confErr != "") && - (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.confErr)) { - t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err()) - } - } -} - -func TestRemote_versionConstraints(t *testing.T) { - cases := map[string]struct { - config cty.Value - prerelease string - version string - result string - }{ - "compatible version": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }), - version: "0.11.1", - }, - "version too old": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }), - version: "0.0.1", - result: "upgrade Terraform to >= 0.1.0", - }, - "version too new": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }), - version: "10.0.1", - result: "downgrade Terraform to <= 10.0.0", - }, - } - - // Save and restore the actual version. - p := tfversion.Prerelease - v := tfversion.Version - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - }() - - for name, tc := range cases { - s := testServer(t) - b := New(testDisco(s)) - - // Set the version for this test. - tfversion.Prerelease = tc.prerelease - tfversion.Version = tc.version - - // Validate - _, valDiags := b.PrepareConfig(tc.config) - if valDiags.HasErrors() { - t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) - } - - // Configure - confDiags := b.Configure(tc.config) - if (confDiags.Err() != nil || tc.result != "") && - (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.result)) { - t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err()) - } - } -} - -func TestRemote_localBackend(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - local, ok := b.local.(*backendLocal.Local) - if !ok { - t.Fatalf("expected b.local to be \"*local.Local\", got: %T", b.local) - } - - remote, ok := local.Backend.(*Remote) - if !ok { - t.Fatalf("expected local.Backend to be *remote.Remote, got: %T", remote) - } -} - -func TestRemote_addAndRemoveWorkspacesDefault(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - if _, err := b.Workspaces(); err != backend.ErrWorkspacesNotSupported { - t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) - } - - if _, err := b.StateMgr(backend.DefaultStateName); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - if _, err := b.StateMgr("prod"); err != backend.ErrWorkspacesNotSupported { - t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) - } - - if err := b.DeleteWorkspace(backend.DefaultStateName, true); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - if err := b.DeleteWorkspace("prod", true); err != backend.ErrWorkspacesNotSupported { - t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) - } -} - -func TestRemote_addAndRemoveWorkspacesNoDefault(t *testing.T) { - b, bCleanup := testBackendNoDefault(t) - defer bCleanup() - - states, err := b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedWorkspaces := []string(nil) - if !reflect.DeepEqual(states, expectedWorkspaces) { - t.Fatalf("expected states %#+v, got %#+v", expectedWorkspaces, states) - } - - if _, err := b.StateMgr(backend.DefaultStateName); err != backend.ErrDefaultWorkspaceNotSupported { - t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err) - } - - expectedA := "test_A" - if _, err := b.StateMgr(expectedA); err != nil { - t.Fatal(err) - } - - states, err = b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedWorkspaces = append(expectedWorkspaces, expectedA) - if !reflect.DeepEqual(states, expectedWorkspaces) { - t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) - } - - expectedB := "test_B" - if _, err := b.StateMgr(expectedB); err != nil { - t.Fatal(err) - } - - states, err = b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedWorkspaces = append(expectedWorkspaces, expectedB) - if !reflect.DeepEqual(states, expectedWorkspaces) { - t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) - } - - if err := b.DeleteWorkspace(backend.DefaultStateName, true); err != backend.ErrDefaultWorkspaceNotSupported { - t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err) - } - - if err := b.DeleteWorkspace(expectedA, true); err != nil { - t.Fatal(err) - } - - states, err = b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedWorkspaces = []string{expectedB} - if !reflect.DeepEqual(states, expectedWorkspaces) { - t.Fatalf("expected %#+v got %#+v", expectedWorkspaces, states) - } - - if err := b.DeleteWorkspace(expectedB, true); err != nil { - t.Fatal(err) - } - - states, err = b.Workspaces() - if err != nil { - t.Fatal(err) - } - - expectedWorkspaces = []string(nil) - if !reflect.DeepEqual(states, expectedWorkspaces) { - t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) - } -} - -func TestRemote_checkConstraints(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - cases := map[string]struct { - constraints *disco.Constraints - prerelease string - version string - result string - }{ - "compatible version": { - constraints: &disco.Constraints{ - Minimum: "0.11.0", - Maximum: "0.11.11", - }, - version: "0.11.1", - result: "", - }, - "version too old": { - constraints: &disco.Constraints{ - Minimum: "0.11.0", - Maximum: "0.11.11", - }, - version: "0.10.1", - result: "upgrade Terraform to >= 0.11.0", - }, - "version too new": { - constraints: &disco.Constraints{ - Minimum: "0.11.0", - Maximum: "0.11.11", - }, - version: "0.12.0", - result: "downgrade Terraform to <= 0.11.11", - }, - "version excluded - ordered": { - constraints: &disco.Constraints{ - Minimum: "0.11.0", - Excluding: []string{"0.11.7", "0.11.8"}, - Maximum: "0.11.11", - }, - version: "0.11.7", - result: "upgrade Terraform to > 0.11.8", - }, - "version excluded - unordered": { - constraints: &disco.Constraints{ - Minimum: "0.11.0", - Excluding: []string{"0.11.8", "0.11.6"}, - Maximum: "0.11.11", - }, - version: "0.11.6", - result: "upgrade Terraform to > 0.11.8", - }, - "list versions": { - constraints: &disco.Constraints{ - Minimum: "0.11.0", - Maximum: "0.11.11", - }, - version: "0.10.1", - result: "versions >= 0.11.0, <= 0.11.11.", - }, - "list exclusion": { - constraints: &disco.Constraints{ - Minimum: "0.11.0", - Excluding: []string{"0.11.6"}, - Maximum: "0.11.11", - }, - version: "0.11.6", - result: "excluding version 0.11.6.", - }, - "list exclusions": { - constraints: &disco.Constraints{ - Minimum: "0.11.0", - Excluding: []string{"0.11.8", "0.11.6"}, - Maximum: "0.11.11", - }, - version: "0.11.6", - result: "excluding versions 0.11.6, 0.11.8.", - }, - } - - // Save and restore the actual version. - p := tfversion.Prerelease - v := tfversion.Version - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - }() - - for name, tc := range cases { - // Set the version for this test. - tfversion.Prerelease = tc.prerelease - tfversion.Version = tc.version - - // Check the constraints. - diags := b.checkConstraints(tc.constraints) - if (diags.Err() != nil || tc.result != "") && - (diags.Err() == nil || !strings.Contains(diags.Err().Error(), tc.result)) { - t.Fatalf("%s: unexpected constraints result: %v", name, diags.Err()) - } - } -} - -func TestRemote_StateMgr_versionCheck(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - // Some fixed versions for testing with. This logic is a simple string - // comparison, so we don't need many test cases. - v0135 := version.Must(version.NewSemver("0.13.5")) - v0140 := version.Must(version.NewSemver("0.14.0")) - - // Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // For this test, the local Terraform version is set to 0.14.0 - tfversion.Prerelease = "" - tfversion.Version = v0140.String() - tfversion.SemVer = v0140 - - // Update the mock remote workspace Terraform version to match the local - // Terraform version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.workspace, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String(v0140.String()), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - // This should succeed - if _, err := b.StateMgr(backend.DefaultStateName); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Now change the remote workspace to a different Terraform version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.workspace, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String(v0135.String()), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - // This should fail - want := `Remote workspace Terraform version "0.13.5" does not match local Terraform version "0.14.0"` - if _, err := b.StateMgr(backend.DefaultStateName); err.Error() != want { - t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want) - } -} - -func TestRemote_StateMgr_versionCheckLatest(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - v0140 := version.Must(version.NewSemver("0.14.0")) - - // Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // For this test, the local Terraform version is set to 0.14.0 - tfversion.Prerelease = "" - tfversion.Version = v0140.String() - tfversion.SemVer = v0140 - - // Update the remote workspace to the pseudo-version "latest" - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.workspace, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String("latest"), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - // This should succeed despite not being a string match - if _, err := b.StateMgr(backend.DefaultStateName); err != nil { - t.Fatalf("expected no error, got %v", err) - } -} - -func TestRemote_VerifyWorkspaceTerraformVersion(t *testing.T) { - testCases := []struct { - local string - remote string - executionMode string - wantErr bool - }{ - {"0.13.5", "0.13.5", "remote", false}, - {"0.14.0", "0.13.5", "remote", true}, - {"0.14.0", "0.13.5", "local", false}, - {"0.14.0", "0.14.1", "remote", false}, - {"0.14.0", "1.0.99", "remote", false}, - {"0.14.0", "1.1.0", "remote", false}, - {"0.14.0", "1.3.0", "remote", true}, - {"1.2.0", "1.2.99", "remote", false}, - {"1.2.0", "1.3.0", "remote", true}, - {"0.15.0", "latest", "remote", false}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - local := version.Must(version.NewSemver(tc.local)) - - // Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // Override local version as specified - tfversion.Prerelease = "" - tfversion.Version = local.String() - tfversion.SemVer = local - - // Update the mock remote workspace Terraform version to the - // specified remote version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.workspace, - tfe.WorkspaceUpdateOptions{ - ExecutionMode: &tc.executionMode, - TerraformVersion: tfe.String(tc.remote), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) - if tc.wantErr { - if len(diags) != 1 { - t.Fatal("expected diag, but none returned") - } - if got := diags.Err().Error(); !strings.Contains(got, "Terraform version mismatch") { - t.Fatalf("unexpected error: %s", got) - } - } else { - if len(diags) != 0 { - t.Fatalf("unexpected diags: %s", diags.Err()) - } - } - }) - } -} - -func TestRemote_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - // Attempting to check the version against a workspace which doesn't exist - // should result in no errors - diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace") - if len(diags) != 0 { - t.Fatalf("unexpected error: %s", diags.Err()) - } - - // Use a special workspace ID to trigger a 500 error, which should result - // in a failed check - diags = b.VerifyWorkspaceTerraformVersion("network-error") - if len(diags) != 1 { - t.Fatal("expected diag, but none returned") - } - if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") { - t.Fatalf("unexpected error: %s", got) - } - - // Update the mock remote workspace Terraform version to an invalid version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.workspace, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String("1.0.cheetarah"), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) - - if len(diags) != 1 { - t.Fatal("expected diag, but none returned") - } - if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Invalid Terraform version") { - t.Fatalf("unexpected error: %s", got) - } -} - -func TestRemote_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - // If the ignore flag is set, the behaviour changes - b.IgnoreVersionConflict() - - // Different local & remote versions to cause an error - local := version.Must(version.NewSemver("0.14.0")) - remote := version.Must(version.NewSemver("0.13.5")) - - // Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // Override local version as specified - tfversion.Prerelease = "" - tfversion.Version = local.String() - tfversion.SemVer = local - - // Update the mock remote workspace Terraform version to the - // specified remote version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.workspace, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String(remote.String()), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) - if len(diags) != 1 { - t.Fatal("expected diag, but none returned") - } - - if got, want := diags[0].Severity(), tfdiags.Warning; got != want { - t.Errorf("wrong severity: got %#v, want %#v", got, want) - } - if got, want := diags[0].Description().Summary, "Terraform version mismatch"; got != want { - t.Errorf("wrong summary: got %s, want %s", got, want) - } - wantDetail := "The local Terraform version (0.14.0) does not match the configured version for remote workspace hashicorp/prod (0.13.5)." - if got := diags[0].Description().Detail; got != wantDetail { - t.Errorf("wrong summary: got %s, want %s", got, wantDetail) - } -} diff --git a/internal/backend/remote/cli.go b/internal/backend/remote/cli.go deleted file mode 100644 index 926908360e0e..000000000000 --- a/internal/backend/remote/cli.go +++ /dev/null @@ -1,20 +0,0 @@ -package remote - -import ( - "github.com/hashicorp/terraform/internal/backend" -) - -// CLIInit implements backend.CLI -func (b *Remote) CLIInit(opts *backend.CLIOpts) error { - if cli, ok := b.local.(backend.CLI); ok { - if err := cli.CLIInit(opts); err != nil { - return err - } - } - - b.CLI = opts.CLI - b.CLIColor = opts.CLIColor - b.ContextOpts = opts.ContextOpts - - return nil -} diff --git a/internal/backend/remote/remote_test.go b/internal/backend/remote/remote_test.go deleted file mode 100644 index f4cc3c5c2837..000000000000 --- a/internal/backend/remote/remote_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package remote - -import ( - "flag" - "os" - "testing" - "time" - - _ "github.com/hashicorp/terraform/internal/logging" -) - -func TestMain(m *testing.M) { - flag.Parse() - - // Make sure TF_FORCE_LOCAL_BACKEND is unset - os.Unsetenv("TF_FORCE_LOCAL_BACKEND") - - // Reduce delays to make tests run faster - backoffMin = 1.0 - backoffMax = 1.0 - planConfigurationVersionsPollInterval = 1 * time.Millisecond - runPollInterval = 1 * time.Millisecond - - os.Exit(m.Run()) -} diff --git a/internal/backend/remote/testing.go b/internal/backend/remote/testing.go deleted file mode 100644 index 7dbb9e9b2c8c..000000000000 --- a/internal/backend/remote/testing.go +++ /dev/null @@ -1,321 +0,0 @@ -package remote - -import ( - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "path" - "testing" - "time" - - tfe "github.com/hashicorp/go-tfe" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/auth" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/cloud" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/hashicorp/terraform/version" - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" - - backendLocal "github.com/hashicorp/terraform/internal/backend/local" -) - -const ( - testCred = "test-auth-token" -) - -var ( - tfeHost = svchost.Hostname(defaultHostname) - credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ - tfeHost: {"token": testCred}, - }) -) - -// mockInput is a mock implementation of terraform.UIInput. -type mockInput struct { - answers map[string]string -} - -func (m *mockInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { - v, ok := m.answers[opts.Id] - if !ok { - return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) - } - if v == "wait-for-external-update" { - select { - case <-ctx.Done(): - case <-time.After(time.Minute): - } - } - delete(m.answers, opts.Id) - return v, nil -} - -func testInput(t *testing.T, answers map[string]string) *mockInput { - return &mockInput{answers: answers} -} - -func testBackendDefault(t *testing.T) (*Remote, func()) { - obj := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }) - return testBackend(t, obj) -} - -func testBackendNoDefault(t *testing.T) (*Remote, func()) { - obj := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "prefix": cty.StringVal("my-app-"), - }), - }) - return testBackend(t, obj) -} - -func testBackendNoOperations(t *testing.T) (*Remote, func()) { - obj := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("no-operations"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }) - return testBackend(t, obj) -} - -func testRemoteClient(t *testing.T) remote.Client { - b, bCleanup := testBackendDefault(t) - defer bCleanup() - - raw, err := b.StateMgr(backend.DefaultStateName) - if err != nil { - t.Fatalf("error: %v", err) - } - - return raw.(*remote.State).Client -} - -func testBackend(t *testing.T, obj cty.Value) (*Remote, func()) { - s := testServer(t) - b := New(testDisco(s)) - - // Configure the backend so the client is created. - newObj, valDiags := b.PrepareConfig(obj) - if len(valDiags) != 0 { - t.Fatal(valDiags.ErrWithWarnings()) - } - obj = newObj - - confDiags := b.Configure(obj) - if len(confDiags) != 0 { - t.Fatal(confDiags.ErrWithWarnings()) - } - - // Get a new mock client. - mc := cloud.NewMockClient() - - // Replace the services we use with our mock services. - b.CLI = cli.NewMockUi() - b.client.Applies = mc.Applies - b.client.ConfigurationVersions = mc.ConfigurationVersions - b.client.CostEstimates = mc.CostEstimates - b.client.Organizations = mc.Organizations - b.client.Plans = mc.Plans - b.client.PolicyChecks = mc.PolicyChecks - b.client.Runs = mc.Runs - b.client.StateVersions = mc.StateVersions - b.client.Variables = mc.Variables - b.client.Workspaces = mc.Workspaces - - // Set local to a local test backend. - b.local = testLocalBackend(t, b) - - ctx := context.Background() - - // Create the organization. - _, err := b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ - Name: tfe.String(b.organization), - }) - if err != nil { - t.Fatalf("error: %v", err) - } - - // Create the default workspace if required. - if b.workspace != "" { - _, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{ - Name: tfe.String(b.workspace), - }) - if err != nil { - t.Fatalf("error: %v", err) - } - } - - return b, s.Close -} - -func testLocalBackend(t *testing.T, remote *Remote) backend.Enhanced { - b := backendLocal.NewWithBackend(remote) - - // Add a test provider to the local backend. - p := backendLocal.TestLocalProvider(t, b, "null", &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "null_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - })} - - return b -} - -// testServer returns a *httptest.Server used for local testing. -func testServer(t *testing.T) *httptest.Server { - mux := http.NewServeMux() - - // Respond to service discovery calls. - mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - io.WriteString(w, `{ - "state.v2": "/api/v2/", - "tfe.v2.1": "/api/v2/", - "versions.v1": "/v1/versions/" -}`) - }) - - // Respond to service version constraints calls. - mux.HandleFunc("/v1/versions/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - io.WriteString(w, fmt.Sprintf(`{ - "service": "%s", - "product": "terraform", - "minimum": "0.1.0", - "maximum": "10.0.0" -}`, path.Base(r.URL.Path))) - }) - - // Respond to pings to get the API version header. - mux.HandleFunc("/api/v2/ping", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.4") - }) - - // Respond to the initial query to read the hashicorp org entitlements. - mux.HandleFunc("/api/v2/organizations/hashicorp/entitlement-set", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.api+json") - io.WriteString(w, `{ - "data": { - "id": "org-GExadygjSbKP8hsY", - "type": "entitlement-sets", - "attributes": { - "operations": true, - "private-module-registry": true, - "sentinel": true, - "state-storage": true, - "teams": true, - "vcs-integrations": true - } - } -}`) - }) - - // Respond to the initial query to read the no-operations org entitlements. - mux.HandleFunc("/api/v2/organizations/no-operations/entitlement-set", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.api+json") - io.WriteString(w, `{ - "data": { - "id": "org-ufxa3y8jSbKP8hsT", - "type": "entitlement-sets", - "attributes": { - "operations": false, - "private-module-registry": true, - "sentinel": true, - "state-storage": true, - "teams": true, - "vcs-integrations": true - } - } -}`) - }) - - // All tests that are assumed to pass will use the hashicorp organization, - // so for all other organization requests we will return a 404. - mux.HandleFunc("/api/v2/organizations/", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(404) - io.WriteString(w, `{ - "errors": [ - { - "status": "404", - "title": "not found" - } - ] -}`) - }) - - return httptest.NewServer(mux) -} - -// testDisco returns a *disco.Disco mapping app.terraform.io and -// localhost to a local test server. -func testDisco(s *httptest.Server) *disco.Disco { - services := map[string]interface{}{ - "state.v2": fmt.Sprintf("%s/api/v2/", s.URL), - "tfe.v2.1": fmt.Sprintf("%s/api/v2/", s.URL), - "versions.v1": fmt.Sprintf("%s/v1/versions/", s.URL), - } - d := disco.NewWithCredentialsSource(credsSrc) - d.SetUserAgent(httpclient.TerraformUserAgent(version.String())) - - d.ForceHostServices(svchost.Hostname(defaultHostname), services) - d.ForceHostServices(svchost.Hostname("localhost"), services) - return d -} - -type unparsedVariableValue struct { - value string - source terraform.ValueSourceType -} - -func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { - return &terraform.InputValue{ - Value: cty.StringVal(v.value), - SourceType: v.source, - }, tfdiags.Diagnostics{} -} - -// testVariable returns a backend.UnparsedVariableValue used for testing. -func testVariables(s terraform.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue { - vars := make(map[string]backend.UnparsedVariableValue, len(vs)) - for _, v := range vs { - vars[v] = &unparsedVariableValue{ - value: v, - source: s, - } - } - return vars -} diff --git a/internal/backend/testing.go b/internal/backend/testing.go deleted file mode 100644 index 3b97e307de5b..000000000000 --- a/internal/backend/testing.go +++ /dev/null @@ -1,425 +0,0 @@ -package backend - -import ( - "reflect" - "sort" - "testing" - - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// TestBackendConfig validates and configures the backend with the -// given configuration. -func TestBackendConfig(t *testing.T, b Backend, c hcl.Body) Backend { - t.Helper() - - t.Logf("TestBackendConfig on %T with %#v", b, c) - - var diags tfdiags.Diagnostics - - // To make things easier for test authors, we'll allow a nil body here - // (even though that's not normally valid) and just treat it as an empty - // body. - if c == nil { - c = hcl.EmptyBody() - } - - schema := b.ConfigSchema() - spec := schema.DecoderSpec() - obj, decDiags := hcldec.Decode(c, spec, nil) - diags = diags.Append(decDiags) - - newObj, valDiags := b.PrepareConfig(obj) - diags = diags.Append(valDiags.InConfigBody(c, "")) - - // it's valid for a Backend to have warnings (e.g. a Deprecation) as such we should only raise on errors - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - obj = newObj - - confDiags := b.Configure(obj) - if len(confDiags) != 0 { - confDiags = confDiags.InConfigBody(c, "") - t.Fatal(confDiags.ErrWithWarnings()) - } - - return b -} - -// TestWrapConfig takes a raw data structure and converts it into a -// synthetic hcl.Body to use for testing. -// -// The given structure should only include values that can be accepted by -// hcl2shim.HCL2ValueFromConfigValue. If incompatible values are given, -// this function will panic. -func TestWrapConfig(raw map[string]interface{}) hcl.Body { - obj := hcl2shim.HCL2ValueFromConfigValue(raw) - return configs.SynthBody("", obj.AsValueMap()) -} - -// TestBackend will test the functionality of a Backend. The backend is -// assumed to already be configured. This will test state functionality. -// If the backend reports it doesn't support multi-state by returning the -// error ErrWorkspacesNotSupported, then it will not test that. -func TestBackendStates(t *testing.T, b Backend) { - t.Helper() - - noDefault := false - if _, err := b.StateMgr(DefaultStateName); err != nil { - if err == ErrDefaultWorkspaceNotSupported { - noDefault = true - } else { - t.Fatalf("error: %v", err) - } - } - - workspaces, err := b.Workspaces() - if err != nil { - if err == ErrWorkspacesNotSupported { - t.Logf("TestBackend: workspaces not supported in %T, skipping", b) - return - } - t.Fatalf("error: %v", err) - } - - // Test it starts with only the default - if !noDefault && (len(workspaces) != 1 || workspaces[0] != DefaultStateName) { - t.Fatalf("should only have the default workspace to start: %#v", workspaces) - } - - // Create a couple states - foo, err := b.StateMgr("foo") - if err != nil { - t.Fatalf("error: %s", err) - } - if err := foo.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - if v := foo.State(); v.HasManagedResourceInstanceObjects() { - t.Fatalf("should be empty: %s", v) - } - - bar, err := b.StateMgr("bar") - if err != nil { - t.Fatalf("error: %s", err) - } - if err := bar.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - if v := bar.State(); v.HasManagedResourceInstanceObjects() { - t.Fatalf("should be empty: %s", v) - } - - // Verify they are distinct states that can be read back from storage - { - // We'll use two distinct states here and verify that changing one - // does not also change the other. - fooState := states.NewState() - barState := states.NewState() - - // write a known state to foo - if err := foo.WriteState(fooState); err != nil { - t.Fatal("error writing foo state:", err) - } - if err := foo.PersistState(nil); err != nil { - t.Fatal("error persisting foo state:", err) - } - - // We'll make "bar" different by adding a fake resource state to it. - barState.SyncWrapper().SetResourceInstanceCurrent( - addrs.ResourceInstance{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }, - }.Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte("{}"), - Status: states.ObjectReady, - SchemaVersion: 0, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - // write a distinct known state to bar - if err := bar.WriteState(barState); err != nil { - t.Fatalf("bad: %s", err) - } - if err := bar.PersistState(nil); err != nil { - t.Fatalf("bad: %s", err) - } - - // verify that foo is unchanged with the existing state manager - if err := foo.RefreshState(); err != nil { - t.Fatal("error refreshing foo:", err) - } - fooState = foo.State() - if fooState.HasManagedResourceInstanceObjects() { - t.Fatal("after writing a resource to bar, foo now has resources too") - } - - // fetch foo again from the backend - foo, err = b.StateMgr("foo") - if err != nil { - t.Fatal("error re-fetching state:", err) - } - if err := foo.RefreshState(); err != nil { - t.Fatal("error refreshing foo:", err) - } - fooState = foo.State() - if fooState.HasManagedResourceInstanceObjects() { - t.Fatal("after writing a resource to bar and re-reading foo, foo now has resources too") - } - - // fetch the bar again from the backend - bar, err = b.StateMgr("bar") - if err != nil { - t.Fatal("error re-fetching state:", err) - } - if err := bar.RefreshState(); err != nil { - t.Fatal("error refreshing bar:", err) - } - barState = bar.State() - if !barState.HasManagedResourceInstanceObjects() { - t.Fatal("after writing a resource instance object to bar and re-reading it, the object has vanished") - } - } - - // Verify we can now list them - { - // we determined that named stated are supported earlier - workspaces, err := b.Workspaces() - if err != nil { - t.Fatalf("err: %s", err) - } - - sort.Strings(workspaces) - expected := []string{"bar", "default", "foo"} - if noDefault { - expected = []string{"bar", "foo"} - } - if !reflect.DeepEqual(workspaces, expected) { - t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) - } - } - - // Delete some workspaces - if err := b.DeleteWorkspace("foo", true); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the default state can't be deleted - if err := b.DeleteWorkspace(DefaultStateName, true); err == nil { - t.Fatal("expected error") - } - - // Create and delete the foo workspace again. - // Make sure that there are no leftover artifacts from a deleted state - // preventing re-creation. - foo, err = b.StateMgr("foo") - if err != nil { - t.Fatalf("error: %s", err) - } - if err := foo.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - if v := foo.State(); v.HasManagedResourceInstanceObjects() { - t.Fatalf("should be empty: %s", v) - } - // and delete it again - if err := b.DeleteWorkspace("foo", true); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify deletion - { - workspaces, err := b.Workspaces() - if err != nil { - t.Fatalf("err: %s", err) - } - - sort.Strings(workspaces) - expected := []string{"bar", "default"} - if noDefault { - expected = []string{"bar"} - } - if !reflect.DeepEqual(workspaces, expected) { - t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) - } - } -} - -// TestBackendStateLocks will test the locking functionality of the remote -// state backend. -func TestBackendStateLocks(t *testing.T, b1, b2 Backend) { - t.Helper() - testLocks(t, b1, b2, false) -} - -// TestBackendStateForceUnlock verifies that the lock error is the expected -// type, and the lock can be unlocked using the ID reported in the error. -// Remote state backends that support -force-unlock should call this in at -// least one of the acceptance tests. -func TestBackendStateForceUnlock(t *testing.T, b1, b2 Backend) { - t.Helper() - testLocks(t, b1, b2, true) -} - -// TestBackendStateLocksInWS will test the locking functionality of the remote -// state backend. -func TestBackendStateLocksInWS(t *testing.T, b1, b2 Backend, ws string) { - t.Helper() - testLocksInWorkspace(t, b1, b2, false, ws) -} - -// TestBackendStateForceUnlockInWS verifies that the lock error is the expected -// type, and the lock can be unlocked using the ID reported in the error. -// Remote state backends that support -force-unlock should call this in at -// least one of the acceptance tests. -func TestBackendStateForceUnlockInWS(t *testing.T, b1, b2 Backend, ws string) { - t.Helper() - testLocksInWorkspace(t, b1, b2, true, ws) -} - -func testLocks(t *testing.T, b1, b2 Backend, testForceUnlock bool) { - testLocksInWorkspace(t, b1, b2, testForceUnlock, DefaultStateName) -} - -func testLocksInWorkspace(t *testing.T, b1, b2 Backend, testForceUnlock bool, workspace string) { - t.Helper() - - // Get the default state for each - b1StateMgr, err := b1.StateMgr(DefaultStateName) - if err != nil { - t.Fatalf("error: %s", err) - } - if err := b1StateMgr.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - - // Fast exit if this doesn't support locking at all - if _, ok := b1StateMgr.(statemgr.Locker); !ok { - t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1) - return - } - - t.Logf("TestBackend: testing state locking for %T", b1) - - b2StateMgr, err := b2.StateMgr(DefaultStateName) - if err != nil { - t.Fatalf("error: %s", err) - } - if err := b2StateMgr.RefreshState(); err != nil { - t.Fatalf("bad: %s", err) - } - - // Reassign so its obvious whats happening - lockerA := b1StateMgr.(statemgr.Locker) - lockerB := b2StateMgr.(statemgr.Locker) - - infoA := statemgr.NewLockInfo() - infoA.Operation = "test" - infoA.Who = "clientA" - - infoB := statemgr.NewLockInfo() - infoB.Operation = "test" - infoB.Who = "clientB" - - lockIDA, err := lockerA.Lock(infoA) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - // Make sure we can still get the statemgr.Full from another instance even - // when locked. This should only happen when a state is loaded via the - // backend, and as a remote state. - _, err = b2.StateMgr(DefaultStateName) - if err != nil { - t.Errorf("failed to read locked state from another backend instance: %s", err) - } - - // If the lock ID is blank, assume locking is disabled - if lockIDA == "" { - t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1) - return - } - - _, err = lockerB.Lock(infoB) - if err == nil { - lockerA.Unlock(lockIDA) - t.Fatal("client B obtained lock while held by client A") - } - - if err := lockerA.Unlock(lockIDA); err != nil { - t.Fatal("error unlocking client A", err) - } - - lockIDB, err := lockerB.Lock(infoB) - if err != nil { - t.Fatal("unable to obtain lock from client B") - } - - if lockIDB == lockIDA { - t.Errorf("duplicate lock IDs: %q", lockIDB) - } - - if err = lockerB.Unlock(lockIDB); err != nil { - t.Fatal("error unlocking client B:", err) - } - - // test the equivalent of -force-unlock, by using the id from the error - // output. - if !testForceUnlock { - return - } - - // get a new ID - infoA.ID, err = uuid.GenerateUUID() - if err != nil { - panic(err) - } - - lockIDA, err = lockerA.Lock(infoA) - if err != nil { - t.Fatal("unable to get re lock A:", err) - } - unlock := func() { - err := lockerA.Unlock(lockIDA) - if err != nil { - t.Fatal(err) - } - } - - _, err = lockerB.Lock(infoB) - if err == nil { - unlock() - t.Fatal("client B obtained lock while held by client A") - } - - infoErr, ok := err.(*statemgr.LockError) - if !ok { - unlock() - t.Fatalf("expected type *statemgr.LockError, got : %#v", err) - } - - // try to unlock with the second unlocker, using the ID from the error - if err := lockerB.Unlock(infoErr.Info.ID); err != nil { - unlock() - t.Fatalf("could not unlock with the reported ID %q: %s", infoErr.Info.ID, err) - } -} diff --git a/internal/builtin/providers/terraform/provider.go b/internal/builtin/providers/terraform/provider.go deleted file mode 100644 index 7c43991753a7..000000000000 --- a/internal/builtin/providers/terraform/provider.go +++ /dev/null @@ -1,138 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/providers" -) - -// Provider is an implementation of providers.Interface -type Provider struct{} - -// NewProvider returns a new terraform provider -func NewProvider() providers.Interface { - return &Provider{} -} - -// GetSchema returns the complete schema for the provider. -func (p *Provider) GetProviderSchema() providers.GetProviderSchemaResponse { - return providers.GetProviderSchemaResponse{ - DataSources: map[string]providers.Schema{ - "terraform_remote_state": dataSourceRemoteStateGetSchema(), - }, - ResourceTypes: map[string]providers.Schema{ - "terraform_data": dataStoreResourceSchema(), - }, - } -} - -// ValidateProviderConfig is used to validate the configuration values. -func (p *Provider) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { - // At this moment there is nothing to configure for the terraform provider, - // so we will happily return without taking any action - var res providers.ValidateProviderConfigResponse - res.PreparedConfig = req.Config - return res -} - -// ValidateDataResourceConfig is used to validate the data source configuration values. -func (p *Provider) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { - // FIXME: move the backend configuration validate call that's currently - // inside the read method into here so that we can catch provider configuration - // errors in terraform validate as well as during terraform plan. - var res providers.ValidateDataResourceConfigResponse - - // This should not happen - if req.TypeName != "terraform_remote_state" { - res.Diagnostics.Append(fmt.Errorf("Error: unsupported data source %s", req.TypeName)) - return res - } - - diags := dataSourceRemoteStateValidate(req.Config) - res.Diagnostics = diags - - return res -} - -// Configure configures and initializes the provider. -func (p *Provider) ConfigureProvider(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { - // At this moment there is nothing to configure for the terraform provider, - // so we will happily return without taking any action - var res providers.ConfigureProviderResponse - return res -} - -// ReadDataSource returns the data source's current state. -func (p *Provider) ReadDataSource(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - // call function - var res providers.ReadDataSourceResponse - - // This should not happen - if req.TypeName != "terraform_remote_state" { - res.Diagnostics.Append(fmt.Errorf("Error: unsupported data source %s", req.TypeName)) - return res - } - - newState, diags := dataSourceRemoteStateRead(req.Config) - - res.State = newState - res.Diagnostics = diags - - return res -} - -// Stop is called when the provider should halt any in-flight actions. -func (p *Provider) Stop() error { - log.Println("[DEBUG] terraform provider cannot Stop") - return nil -} - -// All the Resource-specific functions are below. -// The terraform provider supplies a single data source, `terraform_remote_state` -// and no resources. - -// UpgradeResourceState is called when the state loader encounters an -// instance state whose schema version is less than the one reported by the -// currently-used version of the corresponding provider, and the upgraded -// result is used for any further processing. -func (p *Provider) UpgradeResourceState(req providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { - return upgradeDataStoreResourceState(req) -} - -// ReadResource refreshes a resource and returns its current state. -func (p *Provider) ReadResource(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return readDataStoreResourceState(req) -} - -// PlanResourceChange takes the current state and proposed state of a -// resource, and returns the planned final state. -func (p *Provider) PlanResourceChange(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return planDataStoreResourceChange(req) -} - -// ApplyResourceChange takes the planned state for a resource, which may -// yet contain unknown computed values, and applies the changes returning -// the final state. -func (p *Provider) ApplyResourceChange(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - return applyDataStoreResourceChange(req) -} - -// ImportResourceState requests that the given resource be imported. -func (p *Provider) ImportResourceState(req providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { - if req.TypeName == "terraform_data" { - return importDataStore(req) - } - - panic("unimplemented - terraform_remote_state has no resources") -} - -// ValidateResourceConfig is used to to validate the resource configuration values. -func (p *Provider) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - return validateDataStoreResourceConfig(req) -} - -// Close is a noop for this provider, since it's run in-process. -func (p *Provider) Close() error { - return nil -} diff --git a/internal/builtin/providers/terraform/provider_test.go b/internal/builtin/providers/terraform/provider_test.go deleted file mode 100644 index 5f06e9c3423a..000000000000 --- a/internal/builtin/providers/terraform/provider_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package terraform - -import ( - backendInit "github.com/hashicorp/terraform/internal/backend/init" -) - -func init() { - // Initialize the backends - backendInit.Init(nil) -} diff --git a/internal/builtin/providers/terraform/resource_data.go b/internal/builtin/providers/terraform/resource_data.go deleted file mode 100644 index b7b67df82296..000000000000 --- a/internal/builtin/providers/terraform/resource_data.go +++ /dev/null @@ -1,169 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -func dataStoreResourceSchema() providers.Schema { - return providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "input": {Type: cty.DynamicPseudoType, Optional: true}, - "output": {Type: cty.DynamicPseudoType, Computed: true}, - "triggers_replace": {Type: cty.DynamicPseudoType, Optional: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - } -} - -func validateDataStoreResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { - if req.Config.IsNull() { - return resp - } - - // Core does not currently validate computed values are not set in the - // configuration. - for _, attr := range []string{"id", "output"} { - if !req.Config.GetAttr(attr).IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf(`%q attribute is read-only`, attr)) - } - } - return resp -} - -func upgradeDataStoreResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - ty := dataStoreResourceSchema().Block.ImpliedType() - val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.UpgradedState = val - return resp -} - -func readDataStoreResourceState(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - resp.NewState = req.PriorState - return resp -} - -func planDataStoreResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - if req.ProposedNewState.IsNull() { - // destroy op - resp.PlannedState = req.ProposedNewState - return resp - } - - planned := req.ProposedNewState.AsValueMap() - - input := req.ProposedNewState.GetAttr("input") - trigger := req.ProposedNewState.GetAttr("triggers_replace") - - switch { - case req.PriorState.IsNull(): - // Create - // Set the id value to unknown. - planned["id"] = cty.UnknownVal(cty.String) - - // Output type must always match the input, even when it's null. - if input.IsNull() { - planned["output"] = input - } else { - planned["output"] = cty.UnknownVal(input.Type()) - } - - resp.PlannedState = cty.ObjectVal(planned) - return resp - - case !req.PriorState.GetAttr("triggers_replace").RawEquals(trigger): - // trigger changed, so we need to replace the entire instance - resp.RequiresReplace = append(resp.RequiresReplace, cty.GetAttrPath("triggers_replace")) - planned["id"] = cty.UnknownVal(cty.String) - - // We need to check the input for the replacement instance to compute a - // new output. - if input.IsNull() { - planned["output"] = input - } else { - planned["output"] = cty.UnknownVal(input.Type()) - } - - case !req.PriorState.GetAttr("input").RawEquals(input): - // only input changed, so we only need to re-compute output - planned["output"] = cty.UnknownVal(input.Type()) - } - - resp.PlannedState = cty.ObjectVal(planned) - return resp -} - -var testUUIDHook func() string - -func applyDataStoreResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - if req.PlannedState.IsNull() { - resp.NewState = req.PlannedState - return resp - } - - newState := req.PlannedState.AsValueMap() - - if !req.PlannedState.GetAttr("output").IsKnown() { - newState["output"] = req.PlannedState.GetAttr("input") - } - - if !req.PlannedState.GetAttr("id").IsKnown() { - idString, err := uuid.GenerateUUID() - // Terraform would probably never get this far without a good random - // source, but catch the error anyway. - if err != nil { - diag := tfdiags.AttributeValue( - tfdiags.Error, - "Error generating id", - err.Error(), - cty.GetAttrPath("id"), - ) - - resp.Diagnostics = resp.Diagnostics.Append(diag) - } - - if testUUIDHook != nil { - idString = testUUIDHook() - } - - newState["id"] = cty.StringVal(idString) - } - - resp.NewState = cty.ObjectVal(newState) - - return resp -} - -// TODO: This isn't very useful even for examples, because terraform_data has -// no way to refresh the full resource value from only the import ID. This -// minimal implementation allows the import to succeed, and can be extended -// once the configuration is available during import. -func importDataStore(req providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - schema := dataStoreResourceSchema() - v := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal(req.ID), - }) - state, err := schema.Block.CoerceValue(v) - resp.Diagnostics = resp.Diagnostics.Append(err) - - resp.ImportedResources = []providers.ImportedResource{ - { - TypeName: req.TypeName, - State: state, - }, - } - return resp -} diff --git a/internal/builtin/providers/terraform/resource_data_test.go b/internal/builtin/providers/terraform/resource_data_test.go deleted file mode 100644 index 33deb0ddf199..000000000000 --- a/internal/builtin/providers/terraform/resource_data_test.go +++ /dev/null @@ -1,382 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/providers" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -func TestManagedDataValidate(t *testing.T) { - cfg := map[string]cty.Value{ - "input": cty.NullVal(cty.DynamicPseudoType), - "output": cty.NullVal(cty.DynamicPseudoType), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.NullVal(cty.String), - } - - // empty - req := providers.ValidateResourceConfigRequest{ - TypeName: "terraform_data", - Config: cty.ObjectVal(cfg), - } - - resp := validateDataStoreResourceConfig(req) - if resp.Diagnostics.HasErrors() { - t.Error("empty config error:", resp.Diagnostics.ErrWithWarnings()) - } - - // invalid computed values - cfg["output"] = cty.StringVal("oops") - req.Config = cty.ObjectVal(cfg) - - resp = validateDataStoreResourceConfig(req) - if !resp.Diagnostics.HasErrors() { - t.Error("expected error") - } - - msg := resp.Diagnostics.Err().Error() - if !strings.Contains(msg, "attribute is read-only") { - t.Error("unexpected error", msg) - } -} - -func TestManagedDataUpgradeState(t *testing.T) { - schema := dataStoreResourceSchema() - ty := schema.Block.ImpliedType() - - state := cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.ListVal([]cty.Value{ - cty.StringVal("a"), cty.StringVal("b"), - }), - "id": cty.StringVal("not-quite-unique"), - }) - - jsState, err := ctyjson.Marshal(state, ty) - if err != nil { - t.Fatal(err) - } - - // empty - req := providers.UpgradeResourceStateRequest{ - TypeName: "terraform_data", - RawStateJSON: jsState, - } - - resp := upgradeDataStoreResourceState(req) - if resp.Diagnostics.HasErrors() { - t.Error("upgrade state error:", resp.Diagnostics.ErrWithWarnings()) - } - - if !resp.UpgradedState.RawEquals(state) { - t.Errorf("prior state was:\n%#v\nupgraded state is:\n%#v\n", state, resp.UpgradedState) - } -} - -func TestManagedDataRead(t *testing.T) { - req := providers.ReadResourceRequest{ - TypeName: "terraform_data", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.ListVal([]cty.Value{ - cty.StringVal("a"), cty.StringVal("b"), - }), - "id": cty.StringVal("not-quite-unique"), - }), - } - - resp := readDataStoreResourceState(req) - if resp.Diagnostics.HasErrors() { - t.Fatal("unexpected error", resp.Diagnostics.ErrWithWarnings()) - } - - if !resp.NewState.RawEquals(req.PriorState) { - t.Errorf("prior state was:\n%#v\nnew state is:\n%#v\n", req.PriorState, resp.NewState) - } -} - -func TestManagedDataPlan(t *testing.T) { - schema := dataStoreResourceSchema().Block - ty := schema.ImpliedType() - - for name, tc := range map[string]struct { - prior cty.Value - proposed cty.Value - planned cty.Value - }{ - "create": { - prior: cty.NullVal(ty), - proposed: cty.ObjectVal(map[string]cty.Value{ - "input": cty.NullVal(cty.DynamicPseudoType), - "output": cty.NullVal(cty.DynamicPseudoType), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.NullVal(cty.String), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.NullVal(cty.DynamicPseudoType), - "output": cty.NullVal(cty.DynamicPseudoType), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.UnknownVal(cty.String), - }), - }, - - "create-typed-null-input": { - prior: cty.NullVal(ty), - proposed: cty.ObjectVal(map[string]cty.Value{ - "input": cty.NullVal(cty.String), - "output": cty.NullVal(cty.DynamicPseudoType), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.NullVal(cty.String), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.NullVal(cty.String), - "output": cty.NullVal(cty.String), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.UnknownVal(cty.String), - }), - }, - - "create-output": { - prior: cty.NullVal(ty), - proposed: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.NullVal(cty.DynamicPseudoType), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.NullVal(cty.String), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.UnknownVal(cty.String), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.UnknownVal(cty.String), - }), - }, - - "update-input": { - prior: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - proposed: cty.ObjectVal(map[string]cty.Value{ - "input": cty.UnknownVal(cty.List(cty.String)), - "output": cty.StringVal("input"), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.UnknownVal(cty.List(cty.String)), - "output": cty.UnknownVal(cty.List(cty.String)), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - }, - - "update-trigger": { - prior: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - proposed: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.StringVal("new-value"), - "id": cty.StringVal("not-quite-unique"), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.UnknownVal(cty.String), - "triggers_replace": cty.StringVal("new-value"), - "id": cty.UnknownVal(cty.String), - }), - }, - - "update-input-trigger": { - prior: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("value"), - }), - "id": cty.StringVal("not-quite-unique"), - }), - proposed: cty.ObjectVal(map[string]cty.Value{ - "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), - "output": cty.StringVal("input"), - "triggers_replace": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("new value"), - }), - "id": cty.StringVal("not-quite-unique"), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), - "output": cty.UnknownVal(cty.List(cty.String)), - "triggers_replace": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("new value"), - }), - "id": cty.UnknownVal(cty.String), - }), - }, - } { - t.Run("plan-"+name, func(t *testing.T) { - req := providers.PlanResourceChangeRequest{ - TypeName: "terraform_data", - PriorState: tc.prior, - ProposedNewState: tc.proposed, - } - - resp := planDataStoreResourceChange(req) - if resp.Diagnostics.HasErrors() { - t.Fatal(resp.Diagnostics.ErrWithWarnings()) - } - - if !resp.PlannedState.RawEquals(tc.planned) { - t.Errorf("expected:\n%#v\ngot:\n%#v\n", tc.planned, resp.PlannedState) - } - }) - } -} - -func TestManagedDataApply(t *testing.T) { - testUUIDHook = func() string { - return "not-quite-unique" - } - defer func() { - testUUIDHook = nil - }() - - schema := dataStoreResourceSchema().Block - ty := schema.ImpliedType() - - for name, tc := range map[string]struct { - prior cty.Value - planned cty.Value - state cty.Value - }{ - "create": { - prior: cty.NullVal(ty), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.NullVal(cty.DynamicPseudoType), - "output": cty.NullVal(cty.DynamicPseudoType), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.UnknownVal(cty.String), - }), - state: cty.ObjectVal(map[string]cty.Value{ - "input": cty.NullVal(cty.DynamicPseudoType), - "output": cty.NullVal(cty.DynamicPseudoType), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - }, - - "create-output": { - prior: cty.NullVal(ty), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.UnknownVal(cty.String), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.UnknownVal(cty.String), - }), - state: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - }, - - "update-input": { - prior: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), - "output": cty.UnknownVal(cty.List(cty.String)), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - state: cty.ObjectVal(map[string]cty.Value{ - "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), - "output": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - }, - - "update-trigger": { - prior: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.NullVal(cty.DynamicPseudoType), - "id": cty.StringVal("not-quite-unique"), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.UnknownVal(cty.String), - "triggers_replace": cty.StringVal("new-value"), - "id": cty.UnknownVal(cty.String), - }), - state: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.StringVal("new-value"), - "id": cty.StringVal("not-quite-unique"), - }), - }, - - "update-input-trigger": { - prior: cty.ObjectVal(map[string]cty.Value{ - "input": cty.StringVal("input"), - "output": cty.StringVal("input"), - "triggers_replace": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("value"), - }), - "id": cty.StringVal("not-quite-unique"), - }), - planned: cty.ObjectVal(map[string]cty.Value{ - "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), - "output": cty.UnknownVal(cty.List(cty.String)), - "triggers_replace": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("new value"), - }), - "id": cty.UnknownVal(cty.String), - }), - state: cty.ObjectVal(map[string]cty.Value{ - "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), - "output": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), - "triggers_replace": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("new value"), - }), - "id": cty.StringVal("not-quite-unique"), - }), - }, - } { - t.Run("apply-"+name, func(t *testing.T) { - req := providers.ApplyResourceChangeRequest{ - TypeName: "terraform_data", - PriorState: tc.prior, - PlannedState: tc.planned, - } - - resp := applyDataStoreResourceChange(req) - if resp.Diagnostics.HasErrors() { - t.Fatal(resp.Diagnostics.ErrWithWarnings()) - } - - if !resp.NewState.RawEquals(tc.state) { - t.Errorf("expected:\n%#v\ngot:\n%#v\n", tc.state, resp.NewState) - } - }) - } -} diff --git a/internal/builtin/provisioners/file/resource_provisioner.go b/internal/builtin/provisioners/file/resource_provisioner.go deleted file mode 100644 index 54c2e4a2b1be..000000000000 --- a/internal/builtin/provisioners/file/resource_provisioner.go +++ /dev/null @@ -1,207 +0,0 @@ -package file - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/terraform/internal/communicator" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/mitchellh/go-homedir" - "github.com/zclconf/go-cty/cty" -) - -func New() provisioners.Interface { - ctx, cancel := context.WithCancel(context.Background()) - return &provisioner{ - ctx: ctx, - cancel: cancel, - } -} - -type provisioner struct { - // We store a context here tied to the lifetime of the provisioner. - // This allows the Stop method to cancel any in-flight requests. - ctx context.Context - cancel context.CancelFunc -} - -func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "source": { - Type: cty.String, - Optional: true, - }, - - "content": { - Type: cty.String, - Optional: true, - }, - - "destination": { - Type: cty.String, - Required: true, - }, - }, - } - resp.Provisioner = schema - return resp -} - -func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { - cfg, err := p.GetSchema().Provisioner.CoerceValue(req.Config) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - - source := cfg.GetAttr("source") - content := cfg.GetAttr("content") - - switch { - case !source.IsNull() && !content.IsNull(): - resp.Diagnostics = resp.Diagnostics.Append(errors.New("Cannot set both 'source' and 'content'")) - return resp - case source.IsNull() && content.IsNull(): - resp.Diagnostics = resp.Diagnostics.Append(errors.New("Must provide one of 'source' or 'content'")) - return resp - } - - return resp -} - -func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - if req.Connection.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "file provisioner error", - "Missing connection configuration for provisioner.", - )) - return resp - } - - comm, err := communicator.New(req.Connection) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "file provisioner error", - err.Error(), - )) - return resp - } - - // Get the source - src, deleteSource, err := getSrc(req.Config) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "file provisioner error", - err.Error(), - )) - return resp - } - if deleteSource { - defer os.Remove(src) - } - - // Begin the file copy - dst := req.Config.GetAttr("destination").AsString() - if err := copyFiles(p.ctx, comm, src, dst); err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "file provisioner error", - err.Error(), - )) - return resp - } - - return resp -} - -// getSrc returns the file to use as source -func getSrc(v cty.Value) (string, bool, error) { - content := v.GetAttr("content") - src := v.GetAttr("source") - - switch { - case !content.IsNull(): - file, err := ioutil.TempFile("", "tf-file-content") - if err != nil { - return "", true, err - } - - if _, err = file.WriteString(content.AsString()); err != nil { - return "", true, err - } - - return file.Name(), true, nil - - case !src.IsNull(): - expansion, err := homedir.Expand(src.AsString()) - return expansion, false, err - - default: - panic("source and content cannot both be null") - } -} - -// copyFiles is used to copy the files from a source to a destination -func copyFiles(ctx context.Context, comm communicator.Communicator, src, dst string) error { - retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) - defer cancel() - - // Wait and retry until we establish the connection - err := communicator.Retry(retryCtx, func() error { - return comm.Connect(nil) - }) - if err != nil { - return err - } - - // disconnect when the context is canceled, which will close this after - // Apply as well. - go func() { - <-ctx.Done() - comm.Disconnect() - }() - - info, err := os.Stat(src) - if err != nil { - return err - } - - // If we're uploading a directory, short circuit and do that - if info.IsDir() { - if err := comm.UploadDir(dst, src); err != nil { - return fmt.Errorf("Upload failed: %v", err) - } - return nil - } - - // We're uploading a file... - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - err = comm.Upload(dst, f) - if err != nil { - return fmt.Errorf("Upload failed: %v", err) - } - - return err -} - -func (p *provisioner) Stop() error { - p.cancel() - return nil -} - -func (p *provisioner) Close() error { - return nil -} diff --git a/internal/builtin/provisioners/file/resource_provisioner_test.go b/internal/builtin/provisioners/file/resource_provisioner_test.go deleted file mode 100644 index c470743b3444..000000000000 --- a/internal/builtin/provisioners/file/resource_provisioner_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package file - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/zclconf/go-cty/cty" -) - -func TestResourceProvider_Validate_good_source(t *testing.T) { - v := cty.ObjectVal(map[string]cty.Value{ - "source": cty.StringVal("/tmp/foo"), - "destination": cty.StringVal("/tmp/bar"), - }) - - resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: v, - }) - - if len(resp.Diagnostics) > 0 { - t.Fatal(resp.Diagnostics.ErrWithWarnings()) - } -} - -func TestResourceProvider_Validate_good_content(t *testing.T) { - v := cty.ObjectVal(map[string]cty.Value{ - "content": cty.StringVal("value to copy"), - "destination": cty.StringVal("/tmp/bar"), - }) - - resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: v, - }) - - if len(resp.Diagnostics) > 0 { - t.Fatal(resp.Diagnostics.ErrWithWarnings()) - } -} - -func TestResourceProvider_Validate_good_unknown_variable_value(t *testing.T) { - v := cty.ObjectVal(map[string]cty.Value{ - "content": cty.UnknownVal(cty.String), - "destination": cty.StringVal("/tmp/bar"), - }) - - resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: v, - }) - - if len(resp.Diagnostics) > 0 { - t.Fatal(resp.Diagnostics.ErrWithWarnings()) - } -} - -func TestResourceProvider_Validate_bad_not_destination(t *testing.T) { - v := cty.ObjectVal(map[string]cty.Value{ - "source": cty.StringVal("nope"), - }) - - resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: v, - }) - - if !resp.Diagnostics.HasErrors() { - t.Fatal("Should have errors") - } -} - -func TestResourceProvider_Validate_bad_no_source(t *testing.T) { - v := cty.ObjectVal(map[string]cty.Value{ - "destination": cty.StringVal("/tmp/bar"), - }) - - resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: v, - }) - - if !resp.Diagnostics.HasErrors() { - t.Fatal("Should have errors") - } -} - -func TestResourceProvider_Validate_bad_to_many_src(t *testing.T) { - v := cty.ObjectVal(map[string]cty.Value{ - "source": cty.StringVal("nope"), - "content": cty.StringVal("vlue to copy"), - "destination": cty.StringVal("/tmp/bar"), - }) - - resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: v, - }) - - if !resp.Diagnostics.HasErrors() { - t.Fatal("Should have errors") - } -} - -// Validate that Stop can Close can be called even when not provisioning. -func TestResourceProvisioner_StopClose(t *testing.T) { - p := New() - p.Stop() - p.Close() -} - -func TestResourceProvisioner_connectionRequired(t *testing.T) { - p := New() - resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{}) - if !resp.Diagnostics.HasErrors() { - t.Fatal("expected error") - } - - got := resp.Diagnostics.Err().Error() - if !strings.Contains(got, "Missing connection") { - t.Fatalf("expected 'Missing connection' error: got %q", got) - } -} diff --git a/internal/builtin/provisioners/local-exec/resource_provisioner.go b/internal/builtin/provisioners/local-exec/resource_provisioner.go deleted file mode 100644 index 650472c23e7e..000000000000 --- a/internal/builtin/provisioners/local-exec/resource_provisioner.go +++ /dev/null @@ -1,221 +0,0 @@ -package localexec - -import ( - "context" - "fmt" - "io" - "os" - "os/exec" - "runtime" - - "github.com/armon/circbuf" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/mitchellh/go-linereader" - "github.com/zclconf/go-cty/cty" -) - -const ( - // maxBufSize limits how much output we collect from a local - // invocation. This is to prevent TF memory usage from growing - // to an enormous amount due to a faulty process. - maxBufSize = 8 * 1024 -) - -func New() provisioners.Interface { - ctx, cancel := context.WithCancel(context.Background()) - return &provisioner{ - ctx: ctx, - cancel: cancel, - } -} - -type provisioner struct { - // We store a context here tied to the lifetime of the provisioner. - // This allows the Stop method to cancel any in-flight requests. - ctx context.Context - cancel context.CancelFunc -} - -func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "command": { - Type: cty.String, - Required: true, - }, - "interpreter": { - Type: cty.List(cty.String), - Optional: true, - }, - "working_dir": { - Type: cty.String, - Optional: true, - }, - "environment": { - Type: cty.Map(cty.String), - Optional: true, - }, - "quiet": { - Type: cty.Bool, - Optional: true, - }, - }, - } - - resp.Provisioner = schema - return resp -} - -func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { - if _, err := p.GetSchema().Provisioner.CoerceValue(req.Config); err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "Invalid local-exec provisioner configuration", - err.Error(), - )) - } - return resp -} - -func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - command := req.Config.GetAttr("command").AsString() - if command == "" { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "Invalid local-exec provisioner command", - "The command must be a non-empty string.", - )) - return resp - } - - envVal := req.Config.GetAttr("environment") - var env []string - - if !envVal.IsNull() { - for k, v := range envVal.AsValueMap() { - if !v.IsNull() { - entry := fmt.Sprintf("%s=%s", k, v.AsString()) - env = append(env, entry) - } - } - } - - // Execute the command using a shell - intrVal := req.Config.GetAttr("interpreter") - - var cmdargs []string - if !intrVal.IsNull() && intrVal.LengthInt() > 0 { - for _, v := range intrVal.AsValueSlice() { - if !v.IsNull() { - cmdargs = append(cmdargs, v.AsString()) - } - } - } else { - if runtime.GOOS == "windows" { - cmdargs = []string{"cmd", "/C"} - } else { - cmdargs = []string{"/bin/sh", "-c"} - } - } - - cmdargs = append(cmdargs, command) - - workingdir := "" - if wdVal := req.Config.GetAttr("working_dir"); !wdVal.IsNull() { - workingdir = wdVal.AsString() - } - - // Set up the reader that will read the output from the command. - // We use an os.Pipe so that the *os.File can be passed directly to the - // process, and not rely on goroutines copying the data which may block. - // See golang.org/issue/18874 - pr, pw, err := os.Pipe() - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "local-exec provisioner error", - fmt.Sprintf("Failed to initialize pipe for output: %s", err), - )) - return resp - } - - var cmdEnv []string - cmdEnv = os.Environ() - cmdEnv = append(cmdEnv, env...) - - // Set up the command - cmd := exec.CommandContext(p.ctx, cmdargs[0], cmdargs[1:]...) - cmd.Stderr = pw - cmd.Stdout = pw - // Dir specifies the working directory of the command. - // If Dir is the empty string (this is default), runs the command - // in the calling process's current directory. - cmd.Dir = workingdir - // Env specifies the environment of the command. - // By default will use the calling process's environment - cmd.Env = cmdEnv - - output, _ := circbuf.NewBuffer(maxBufSize) - - // Write everything we read from the pipe to the output buffer too - tee := io.TeeReader(pr, output) - - // copy the teed output to the UI output - copyDoneCh := make(chan struct{}) - go copyUIOutput(req.UIOutput, tee, copyDoneCh) - - // Output what we're about to run - if quietVal := req.Config.GetAttr("quiet"); !quietVal.IsNull() && quietVal.True() { - req.UIOutput.Output("local-exec: Executing: Suppressed by quiet=true") - } else { - req.UIOutput.Output(fmt.Sprintf("Executing: %q", cmdargs)) - } - - // Start the command - err = cmd.Start() - if err == nil { - err = cmd.Wait() - } - - // Close the write-end of the pipe so that the goroutine mirroring output - // ends properly. - pw.Close() - - // Cancelling the command may block the pipe reader if the file descriptor - // was passed to a child process which hasn't closed it. In this case the - // copyOutput goroutine will just hang out until exit. - select { - case <-copyDoneCh: - case <-p.ctx.Done(): - } - - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "local-exec provisioner error", - fmt.Sprintf("Error running command '%s': %v. Output: %s", command, err, output.Bytes()), - )) - return resp - } - - return resp -} - -func (p *provisioner) Stop() error { - p.cancel() - return nil -} - -func (p *provisioner) Close() error { - return nil -} - -func copyUIOutput(o provisioners.UIOutput, r io.Reader, doneCh chan<- struct{}) { - defer close(doneCh) - lr := linereader.New(r) - for line := range lr.Ch { - o.Output(line) - } -} diff --git a/internal/builtin/provisioners/local-exec/resource_provisioner_test.go b/internal/builtin/provisioners/local-exec/resource_provisioner_test.go deleted file mode 100644 index d1560d48ee7a..000000000000 --- a/internal/builtin/provisioners/local-exec/resource_provisioner_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package localexec - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" -) - -func TestResourceProvider_Apply(t *testing.T) { - defer os.Remove("test_out") - output := cli.NewMockUi() - p := New() - schema := p.GetSchema().Provisioner - c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "command": cty.StringVal("echo foo > test_out"), - })) - if err != nil { - t.Fatal(err) - } - - resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: c, - UIOutput: output, - }) - - if resp.Diagnostics.HasErrors() { - t.Fatalf("err: %v", resp.Diagnostics.Err()) - } - - // Check the file - raw, err := ioutil.ReadFile("test_out") - if err != nil { - t.Fatalf("err: %v", err) - } - - actual := strings.TrimSpace(string(raw)) - expected := "foo" - if actual != expected { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceProvider_stop(t *testing.T) { - output := cli.NewMockUi() - p := New() - schema := p.GetSchema().Provisioner - - c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - // bash/zsh/ksh will exec a single command in the same process. This - // makes certain there's a subprocess in the shell. - "command": cty.StringVal("sleep 30; sleep 30"), - })) - if err != nil { - t.Fatal(err) - } - - doneCh := make(chan struct{}) - startTime := time.Now() - go func() { - defer close(doneCh) - // The functionality of p.Apply is tested in TestResourceProvider_Apply. - // Because p.Apply is called in a goroutine, trying to t.Fatal() on its - // result would be ignored or would cause a panic if the parent goroutine - // has already completed. - _ = p.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: c, - UIOutput: output, - }) - }() - - mustExceed := (50 * time.Millisecond) - select { - case <-doneCh: - t.Fatalf("expected to finish sometime after %s finished in %s", mustExceed, time.Since(startTime)) - case <-time.After(mustExceed): - t.Logf("correctly took longer than %s", mustExceed) - } - - // Stop it - stopTime := time.Now() - p.Stop() - - maxTempl := "expected to finish under %s, finished in %s" - finishWithin := (2 * time.Second) - select { - case <-doneCh: - t.Logf(maxTempl, finishWithin, time.Since(stopTime)) - case <-time.After(finishWithin): - t.Fatalf(maxTempl, finishWithin, time.Since(stopTime)) - } -} - -func TestResourceProvider_ApplyCustomInterpreter(t *testing.T) { - output := cli.NewMockUi() - p := New() - - schema := p.GetSchema().Provisioner - - c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "interpreter": cty.ListVal([]cty.Value{cty.StringVal("echo"), cty.StringVal("is")}), - "command": cty.StringVal("not really an interpreter"), - })) - if err != nil { - t.Fatal(err) - } - - resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: c, - UIOutput: output, - }) - - if resp.Diagnostics.HasErrors() { - t.Fatal(resp.Diagnostics.Err()) - } - - got := strings.TrimSpace(output.OutputWriter.String()) - want := `Executing: ["echo" "is" "not really an interpreter"] -is not really an interpreter` - if got != want { - t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) - } -} - -func TestResourceProvider_ApplyCustomWorkingDirectory(t *testing.T) { - testdir := "working_dir_test" - os.Mkdir(testdir, 0755) - defer os.Remove(testdir) - - output := cli.NewMockUi() - p := New() - schema := p.GetSchema().Provisioner - - c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "working_dir": cty.StringVal(testdir), - "command": cty.StringVal("echo `pwd`"), - })) - if err != nil { - t.Fatal(err) - } - - resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: c, - UIOutput: output, - }) - - if resp.Diagnostics.HasErrors() { - t.Fatal(resp.Diagnostics.Err()) - } - - dir, err := os.Getwd() - if err != nil { - t.Fatalf("err: %v", err) - } - - got := strings.TrimSpace(output.OutputWriter.String()) - want := "Executing: [\"/bin/sh\" \"-c\" \"echo `pwd`\"]\n" + dir + "/" + testdir - if got != want { - t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) - } -} - -func TestResourceProvider_ApplyCustomEnv(t *testing.T) { - output := cli.NewMockUi() - p := New() - schema := p.GetSchema().Provisioner - - c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "command": cty.StringVal("echo $FOO $BAR $BAZ"), - "environment": cty.MapVal(map[string]cty.Value{ - "FOO": cty.StringVal("BAR"), - "BAR": cty.StringVal("1"), - "BAZ": cty.StringVal("true"), - }), - })) - if err != nil { - t.Fatal(err) - } - - resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: c, - UIOutput: output, - }) - if resp.Diagnostics.HasErrors() { - t.Fatal(resp.Diagnostics.Err()) - } - - got := strings.TrimSpace(output.OutputWriter.String()) - want := `Executing: ["/bin/sh" "-c" "echo $FOO $BAR $BAZ"] -BAR 1 true` - if got != want { - t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) - } -} - -// Validate that Stop can Close can be called even when not provisioning. -func TestResourceProvisioner_StopClose(t *testing.T) { - p := New() - p.Stop() - p.Close() -} - -func TestResourceProvisioner_nullsInOptionals(t *testing.T) { - output := cli.NewMockUi() - p := New() - schema := p.GetSchema().Provisioner - - for i, cfg := range []cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "command": cty.StringVal("echo OK"), - "environment": cty.MapVal(map[string]cty.Value{ - "FOO": cty.NullVal(cty.String), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "command": cty.StringVal("echo OK"), - "environment": cty.NullVal(cty.Map(cty.String)), - }), - cty.ObjectVal(map[string]cty.Value{ - "command": cty.StringVal("echo OK"), - "interpreter": cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), - }), - cty.ObjectVal(map[string]cty.Value{ - "command": cty.StringVal("echo OK"), - "interpreter": cty.NullVal(cty.List(cty.String)), - }), - cty.ObjectVal(map[string]cty.Value{ - "command": cty.StringVal("echo OK"), - "working_dir": cty.NullVal(cty.String), - }), - } { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - - cfg, err := schema.CoerceValue(cfg) - if err != nil { - t.Fatal(err) - } - - // verifying there are no panics - p.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: cfg, - UIOutput: output, - }) - }) - } -} diff --git a/internal/builtin/provisioners/remote-exec/resource_provisioner.go b/internal/builtin/provisioners/remote-exec/resource_provisioner.go deleted file mode 100644 index f8edfb78556c..000000000000 --- a/internal/builtin/provisioners/remote-exec/resource_provisioner.go +++ /dev/null @@ -1,294 +0,0 @@ -package remoteexec - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/hashicorp/terraform/internal/communicator" - "github.com/hashicorp/terraform/internal/communicator/remote" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/mitchellh/go-linereader" - "github.com/zclconf/go-cty/cty" -) - -func New() provisioners.Interface { - ctx, cancel := context.WithCancel(context.Background()) - return &provisioner{ - ctx: ctx, - cancel: cancel, - } -} - -type provisioner struct { - // We store a context here tied to the lifetime of the provisioner. - // This allows the Stop method to cancel any in-flight requests. - ctx context.Context - cancel context.CancelFunc -} - -func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "inline": { - Type: cty.List(cty.String), - Optional: true, - }, - "script": { - Type: cty.String, - Optional: true, - }, - "scripts": { - Type: cty.List(cty.String), - Optional: true, - }, - }, - } - - resp.Provisioner = schema - return resp -} - -func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { - cfg, err := p.GetSchema().Provisioner.CoerceValue(req.Config) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "Invalid remote-exec provisioner configuration", - err.Error(), - )) - return resp - } - - inline := cfg.GetAttr("inline") - script := cfg.GetAttr("script") - scripts := cfg.GetAttr("scripts") - - set := 0 - if !inline.IsNull() { - set++ - } - if !script.IsNull() { - set++ - } - if !scripts.IsNull() { - set++ - } - if set != 1 { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "Invalid remote-exec provisioner configuration", - `Only one of "inline", "script", or "scripts" must be set`, - )) - } - return resp -} - -func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - if req.Connection.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "remote-exec provisioner error", - "Missing connection configuration for provisioner.", - )) - return resp - } - - comm, err := communicator.New(req.Connection) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "remote-exec provisioner error", - err.Error(), - )) - return resp - } - - // Collect the scripts - scripts, err := collectScripts(req.Config) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "remote-exec provisioner error", - err.Error(), - )) - return resp - } - for _, s := range scripts { - defer s.Close() - } - - // Copy and execute each script - if err := runScripts(p.ctx, req.UIOutput, comm, scripts); err != nil { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "remote-exec provisioner error", - err.Error(), - )) - return resp - } - - return resp -} - -func (p *provisioner) Stop() error { - p.cancel() - return nil -} - -func (p *provisioner) Close() error { - return nil -} - -// generateScripts takes the configuration and creates a script from each inline config -func generateScripts(inline cty.Value) ([]string, error) { - var lines []string - for _, l := range inline.AsValueSlice() { - if l.IsNull() { - return nil, errors.New("invalid null string in 'scripts'") - } - - s := l.AsString() - if s == "" { - return nil, errors.New("invalid empty string in 'scripts'") - } - lines = append(lines, s) - } - lines = append(lines, "") - - return []string{strings.Join(lines, "\n")}, nil -} - -// collectScripts is used to collect all the scripts we need -// to execute in preparation for copying them. -func collectScripts(v cty.Value) ([]io.ReadCloser, error) { - // Check if inline - if inline := v.GetAttr("inline"); !inline.IsNull() { - scripts, err := generateScripts(inline) - if err != nil { - return nil, err - } - - var r []io.ReadCloser - for _, script := range scripts { - r = append(r, ioutil.NopCloser(bytes.NewReader([]byte(script)))) - } - - return r, nil - } - - // Collect scripts - var scripts []string - if script := v.GetAttr("script"); !script.IsNull() { - s := script.AsString() - if s == "" { - return nil, errors.New("invalid empty string in 'script'") - } - scripts = append(scripts, s) - } - - if scriptList := v.GetAttr("scripts"); !scriptList.IsNull() { - for _, script := range scriptList.AsValueSlice() { - if script.IsNull() { - return nil, errors.New("invalid null string in 'script'") - } - s := script.AsString() - if s == "" { - return nil, errors.New("invalid empty string in 'script'") - } - scripts = append(scripts, s) - } - } - - // Open all the scripts - var fhs []io.ReadCloser - for _, s := range scripts { - fh, err := os.Open(s) - if err != nil { - for _, fh := range fhs { - fh.Close() - } - return nil, fmt.Errorf("Failed to open script '%s': %v", s, err) - } - fhs = append(fhs, fh) - } - - // Done, return the file handles - return fhs, nil -} - -// runScripts is used to copy and execute a set of scripts -func runScripts(ctx context.Context, o provisioners.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { - retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) - defer cancel() - - // Wait and retry until we establish the connection - err := communicator.Retry(retryCtx, func() error { - return comm.Connect(o) - }) - if err != nil { - return err - } - - // Wait for the context to end and then disconnect - go func() { - <-ctx.Done() - comm.Disconnect() - }() - - for _, script := range scripts { - var cmd *remote.Cmd - - outR, outW := io.Pipe() - errR, errW := io.Pipe() - defer outW.Close() - defer errW.Close() - - go copyUIOutput(o, outR) - go copyUIOutput(o, errR) - - remotePath := comm.ScriptPath() - - if err := comm.UploadScript(remotePath, script); err != nil { - return fmt.Errorf("Failed to upload script: %v", err) - } - - cmd = &remote.Cmd{ - Command: remotePath, - Stdout: outW, - Stderr: errW, - } - if err := comm.Start(cmd); err != nil { - return fmt.Errorf("Error starting script: %v", err) - } - - if err := cmd.Wait(); err != nil { - return err - } - - // Upload a blank follow up file in the same path to prevent residual - // script contents from remaining on remote machine - empty := bytes.NewReader([]byte("")) - if err := comm.Upload(remotePath, empty); err != nil { - // This feature is best-effort. - log.Printf("[WARN] Failed to upload empty follow up script: %v", err) - } - } - - return nil -} - -func copyUIOutput(o provisioners.UIOutput, r io.Reader) { - lr := linereader.New(r) - for line := range lr.Ch { - o.Output(line) - } -} diff --git a/internal/builtin/provisioners/remote-exec/resource_provisioner_test.go b/internal/builtin/provisioners/remote-exec/resource_provisioner_test.go deleted file mode 100644 index 549dbf30165c..000000000000 --- a/internal/builtin/provisioners/remote-exec/resource_provisioner_test.go +++ /dev/null @@ -1,320 +0,0 @@ -package remoteexec - -import ( - "bytes" - "context" - "fmt" - "io" - "log" - "testing" - "time" - - "strings" - - "github.com/hashicorp/terraform/internal/communicator" - "github.com/hashicorp/terraform/internal/communicator/remote" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" -) - -func TestResourceProvider_Validate_good(t *testing.T) { - c := cty.ObjectVal(map[string]cty.Value{ - "inline": cty.ListVal([]cty.Value{cty.StringVal("echo foo")}), - }) - - resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: c, - }) - if len(resp.Diagnostics) > 0 { - t.Fatal(resp.Diagnostics.ErrWithWarnings()) - } -} - -func TestResourceProvider_Validate_bad(t *testing.T) { - c := cty.ObjectVal(map[string]cty.Value{ - "invalid": cty.StringVal("nope"), - }) - - resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: c, - }) - if !resp.Diagnostics.HasErrors() { - t.Fatalf("Should have errors") - } -} - -var expectedScriptOut = `cd /tmp -wget http://foobar -exit 0 -` - -func TestResourceProvider_generateScript(t *testing.T) { - inline := cty.ListVal([]cty.Value{ - cty.StringVal("cd /tmp"), - cty.StringVal("wget http://foobar"), - cty.StringVal("exit 0"), - }) - - out, err := generateScripts(inline) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(out) != 1 { - t.Fatal("expected 1 out") - } - - if out[0] != expectedScriptOut { - t.Fatalf("bad: %v", out) - } -} - -func TestResourceProvider_generateScriptEmptyInline(t *testing.T) { - inline := cty.ListVal([]cty.Value{cty.StringVal("")}) - - _, err := generateScripts(inline) - if err == nil { - t.Fatal("expected error, got none") - } - - if !strings.Contains(err.Error(), "empty string") { - t.Fatalf("expected empty string error, got: %s", err) - } -} - -func TestResourceProvider_CollectScripts_inline(t *testing.T) { - conf := map[string]cty.Value{ - "inline": cty.ListVal([]cty.Value{ - cty.StringVal("cd /tmp"), - cty.StringVal("wget http://foobar"), - cty.StringVal("exit 0"), - }), - } - - scripts, err := collectScripts(cty.ObjectVal(conf)) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(scripts) != 1 { - t.Fatalf("bad: %v", scripts) - } - - var out bytes.Buffer - _, err = io.Copy(&out, scripts[0]) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out.String() != expectedScriptOut { - t.Fatalf("bad: %v", out.String()) - } -} - -func TestResourceProvider_CollectScripts_script(t *testing.T) { - p := New() - schema := p.GetSchema().Provisioner - - conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "scripts": cty.ListVal([]cty.Value{ - cty.StringVal("testdata/script1.sh"), - }), - })) - if err != nil { - t.Fatal(err) - } - - scripts, err := collectScripts(conf) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(scripts) != 1 { - t.Fatalf("bad: %v", scripts) - } - - var out bytes.Buffer - _, err = io.Copy(&out, scripts[0]) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out.String() != expectedScriptOut { - t.Fatalf("bad: %v", out.String()) - } -} - -func TestResourceProvider_CollectScripts_scripts(t *testing.T) { - p := New() - schema := p.GetSchema().Provisioner - - conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "scripts": cty.ListVal([]cty.Value{ - cty.StringVal("testdata/script1.sh"), - cty.StringVal("testdata/script1.sh"), - cty.StringVal("testdata/script1.sh"), - }), - })) - if err != nil { - log.Fatal(err) - } - - scripts, err := collectScripts(conf) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(scripts) != 3 { - t.Fatalf("bad: %v", scripts) - } - - for idx := range scripts { - var out bytes.Buffer - _, err = io.Copy(&out, scripts[idx]) - if err != nil { - t.Fatalf("err: %v", err) - } - - if out.String() != expectedScriptOut { - t.Fatalf("bad: %v", out.String()) - } - } -} - -func TestResourceProvider_CollectScripts_scriptsEmpty(t *testing.T) { - p := New() - schema := p.GetSchema().Provisioner - - conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "scripts": cty.ListVal([]cty.Value{cty.StringVal("")}), - })) - if err != nil { - t.Fatal(err) - } - - _, err = collectScripts(conf) - if err == nil { - t.Fatal("expected error") - } - - if !strings.Contains(err.Error(), "empty string") { - t.Fatalf("Expected empty string error, got: %s", err) - } -} - -func TestProvisionerTimeout(t *testing.T) { - o := cli.NewMockUi() - c := new(communicator.MockCommunicator) - - disconnected := make(chan struct{}) - c.DisconnectFunc = func() error { - close(disconnected) - return nil - } - - completed := make(chan struct{}) - c.CommandFunc = func(cmd *remote.Cmd) error { - defer close(completed) - cmd.Init() - time.Sleep(2 * time.Second) - cmd.SetExitStatus(0, nil) - return nil - } - c.ConnTimeout = time.Second - c.UploadScripts = map[string]string{"hello": "echo hello"} - c.RemoteScriptPath = "hello" - - conf := map[string]cty.Value{ - "inline": cty.ListVal([]cty.Value{cty.StringVal("echo hello")}), - } - - scripts, err := collectScripts(cty.ObjectVal(conf)) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - - done := make(chan struct{}) - - var runErr error - go func() { - defer close(done) - runErr = runScripts(ctx, o, c, scripts) - }() - - select { - case <-disconnected: - t.Fatal("communicator disconnected before command completed") - case <-completed: - } - - <-done - if runErr != nil { - t.Fatal(err) - } -} - -// Validate that Stop can Close can be called even when not provisioning. -func TestResourceProvisioner_StopClose(t *testing.T) { - p := New() - p.Stop() - p.Close() -} - -func TestResourceProvisioner_connectionRequired(t *testing.T) { - p := New() - resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{}) - if !resp.Diagnostics.HasErrors() { - t.Fatal("expected error") - } - - got := resp.Diagnostics.Err().Error() - if !strings.Contains(got, "Missing connection") { - t.Fatalf("expected 'Missing connection' error: got %q", got) - } -} - -func TestResourceProvisioner_nullsInOptionals(t *testing.T) { - output := cli.NewMockUi() - p := New() - schema := p.GetSchema().Provisioner - - for i, cfg := range []cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "script": cty.StringVal("echo"), - "inline": cty.NullVal(cty.List(cty.String)), - }), - cty.ObjectVal(map[string]cty.Value{ - "inline": cty.ListVal([]cty.Value{ - cty.NullVal(cty.String), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "script": cty.NullVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "scripts": cty.NullVal(cty.List(cty.String)), - }), - cty.ObjectVal(map[string]cty.Value{ - "scripts": cty.ListVal([]cty.Value{ - cty.NullVal(cty.String), - }), - }), - } { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - - cfg, err := schema.CoerceValue(cfg) - if err != nil { - t.Fatal(err) - } - - // verifying there are no panics - p.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: cfg, - UIOutput: output, - }) - }) - } -} diff --git a/internal/checks/state.go b/internal/checks/state.go deleted file mode 100644 index 2d9d7e188dba..000000000000 --- a/internal/checks/state.go +++ /dev/null @@ -1,290 +0,0 @@ -package checks - -import ( - "fmt" - "sort" - "sync" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" -) - -// State is a container for state tracking of all of the the checks declared in -// a particular Terraform configuration and their current statuses. -// -// A State object is mutable during plan and apply operations but should -// otherwise be treated as a read-only snapshot of the status of checks -// at a particular moment. -// -// The checks State tracks a few different concepts: -// - configuration objects: items in the configuration which statically -// declare some checks associated with zero or more checkable objects. -// - checkable objects: dynamically-determined objects that are each -// associated with one configuration object. -// - checks: a single check that is declared as part of a configuration -// object and then resolved once for each of its associated checkable -// objects. -// - check statuses: the current state of a particular check associated -// with a particular checkable object. -// -// This container type is concurrency-safe for both reads and writes through -// its various methods. -type State struct { - mu sync.Mutex - - statuses addrs.Map[addrs.ConfigCheckable, *configCheckableState] - failureMsgs addrs.Map[addrs.Check, string] -} - -// configCheckableState is an internal part of type State that represents -// the evaluation status for a particular addrs.ConfigCheckable address. -// -// Its initial state, at the beginning of a run, is that it doesn't even know -// how many checkable objects will be dynamically-declared yet. Terraform Core -// will notify the State object of the associated Checkables once -// it has decided the appropriate expansion of that configuration object, -// and then will gradually report the results of each check once the graph -// walk reaches it. -// -// This must be accessed only while holding the mutex inside the associated -// State object. -type configCheckableState struct { - // checkTypes captures the expected number of checks of each type - // associated with object declared by this configuration construct. Since - // checks are statically declared (even though the checkable objects - // aren't) we can compute this only from the configuration. - checkTypes map[addrs.CheckType]int - - // objects represents the set of dynamic checkable objects associated - // with this configuration construct. This is initially nil to represent - // that we don't know the objects yet, and is replaced by a non-nil map - // once Terraform Core reports the expansion of this configuration - // construct. - // - // The leaf Status values will initially be StatusUnknown - // and then gradually updated by Terraform Core as it visits the - // individual checkable objects and reports their status. - objects addrs.Map[addrs.Checkable, map[addrs.CheckType][]Status] -} - -// NOTE: For the "Report"-prefixed methods that we use to gradually update -// the structure with results during a plan or apply operation, see the -// state_report.go file also in this package. - -// NewState returns a new State object representing the check statuses of -// objects declared in the given configuration. -// -// The configuration determines which configuration objects and associated -// checks we'll be expecting to see, so that we can seed their statuses as -// all unknown until we see affirmative reports sent by the Report-prefixed -// methods on Checks. -func NewState(config *configs.Config) *State { - return &State{ - statuses: initialStatuses(config), - } -} - -// ConfigHasChecks returns true if and only if the given address refers to -// a configuration object that this State object is expecting to recieve -// statuses for. -// -// Other methods of Checks will typically panic if given a config address -// that would not have returned true from ConfigHasChecked. -func (c *State) ConfigHasChecks(addr addrs.ConfigCheckable) bool { - c.mu.Lock() - defer c.mu.Unlock() - return c.statuses.Has(addr) -} - -// AllConfigAddrs returns all of the addresses of all configuration objects -// that could potentially produce checkable objects at runtime. -// -// This is a good starting point for reporting on the outcome of all of the -// configured checks at the configuration level of granularity, e.g. for -// automated testing reports where we want to report the status of all -// configured checks even if the graph walk aborted before we reached any -// of their objects. -func (c *State) AllConfigAddrs() addrs.Set[addrs.ConfigCheckable] { - c.mu.Lock() - defer c.mu.Unlock() - return c.statuses.Keys() -} - -// ObjectAddrs returns the addresses of individual checkable objects belonging -// to the configuration object with the given address. -// -// This will panic if the given address isn't a known configuration object -// that has checks. -func (c *State) ObjectAddrs(configAddr addrs.ConfigCheckable) addrs.Set[addrs.Checkable] { - c.mu.Lock() - defer c.mu.Unlock() - - st, ok := c.statuses.GetOk(configAddr) - if !ok { - panic(fmt.Sprintf("unknown configuration object %s", configAddr)) - } - - ret := addrs.MakeSet[addrs.Checkable]() - for _, elem := range st.objects.Elems { - ret.Add(elem.Key) - } - return ret - -} - -// AggregateCheckStatus returns a summarization of all of the check results -// for a particular configuration object into a single status. -// -// The given address must refer to an object within the configuration that -// this Checks was instantiated from, or this method will panic. -func (c *State) AggregateCheckStatus(addr addrs.ConfigCheckable) Status { - c.mu.Lock() - defer c.mu.Unlock() - - st, ok := c.statuses.GetOk(addr) - if !ok { - panic(fmt.Sprintf("request for status of unknown configuration object %s", addr)) - } - - if st.objects.Elems == nil { - // If we don't even know how many objects we have for this - // configuration construct then that summarizes as unknown. - // (Note: this is different than Elems being a non-nil empty map, - // which means that we know there are zero objects and therefore - // the aggregate result will be pass to pass below.) - return StatusUnknown - } - - // Otherwise, our result depends on how many of our known objects are - // in each status. - errorCount := 0 - failCount := 0 - unknownCount := 0 - - for _, objects := range st.objects.Elems { - for _, checks := range objects.Value { - for _, status := range checks { - switch status { - case StatusPass: - // ok - case StatusFail: - failCount++ - case StatusError: - errorCount++ - default: - unknownCount++ - } - } - } - } - - return summarizeCheckStatuses(errorCount, failCount, unknownCount) -} - -// ObjectCheckStatus returns a summarization of all of the check results -// for a particular checkable object into a single status. -// -// The given address must refer to a checkable object that Terraform Core -// previously reported while doing a graph walk, or this method will panic. -func (c *State) ObjectCheckStatus(addr addrs.Checkable) Status { - c.mu.Lock() - defer c.mu.Unlock() - - configAddr := addr.ConfigCheckable() - - st, ok := c.statuses.GetOk(configAddr) - if !ok { - panic(fmt.Sprintf("request for status of unknown object %s", addr)) - } - if st.objects.Elems == nil { - panic(fmt.Sprintf("request for status of %s before establishing the checkable objects for %s", addr, configAddr)) - } - checks, ok := st.objects.GetOk(addr) - if !ok { - panic(fmt.Sprintf("request for status of unknown object %s", addr)) - } - - errorCount := 0 - failCount := 0 - unknownCount := 0 - for _, statuses := range checks { - for _, status := range statuses { - switch status { - case StatusPass: - // ok - case StatusFail: - failCount++ - case StatusError: - errorCount++ - default: - unknownCount++ - } - } - } - return summarizeCheckStatuses(errorCount, failCount, unknownCount) -} - -// ObjectFailureMessages returns the zero or more failure messages reported -// for the object with the given address. -// -// Failure messages are recorded only for checks whose status is StatusFail, -// but since this aggregates together the results of all of the checks -// on the given object it's possible for there to be a mixture of failures -// and errors at the same time, which would aggregate as StatusError in -// ObjectCheckStatus's result because errors are defined as "stronger" -// than failures. -func (c *State) ObjectFailureMessages(addr addrs.Checkable) []string { - var ret []string - - configAddr := addr.ConfigCheckable() - - st, ok := c.statuses.GetOk(configAddr) - if !ok { - panic(fmt.Sprintf("request for status of unknown object %s", addr)) - } - if st.objects.Elems == nil { - panic(fmt.Sprintf("request for status of %s before establishing the checkable objects for %s", addr, configAddr)) - } - checksByType, ok := st.objects.GetOk(addr) - if !ok { - panic(fmt.Sprintf("request for status of unknown object %s", addr)) - } - - for checkType, checks := range checksByType { - for i, status := range checks { - if status == StatusFail { - checkAddr := addrs.NewCheck(addr, checkType, i) - msg := c.failureMsgs.Get(checkAddr) - if msg != "" { - ret = append(ret, msg) - } - } - } - } - - // We always return the messages in a lexical sort order just so that - // it'll be consistent between runs if we still have the same problems. - sort.Strings(ret) - - return ret -} - -func summarizeCheckStatuses(errorCount, failCount, unknownCount int) Status { - switch { - case errorCount > 0: - // If we saw any errors then we'll treat the whole thing as errored. - return StatusError - case failCount > 0: - // If anything failed then this whole configuration construct failed. - return StatusFail - case unknownCount > 0: - // If nothing failed but we still have unknowns then our outcome isn't - // known yet. - return StatusUnknown - default: - // If we have no failures and no unknowns then either we have all - // passes or no checkable objects at all, both of which summarize as - // a pass. - return StatusPass - } -} diff --git a/internal/checks/state_test.go b/internal/checks/state_test.go deleted file mode 100644 index 8c0f0d447fb7..000000000000 --- a/internal/checks/state_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package checks - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/initwd" -) - -func TestChecksHappyPath(t *testing.T) { - const fixtureDir = "testdata/happypath" - loader, close := configload.NewLoaderForTests(t) - defer close() - inst := initwd.NewModuleInstaller(loader.ModulesDir(), nil) - _, instDiags := inst.InstallModules(context.Background(), fixtureDir, true, initwd.ModuleInstallHooksImpl{}) - if instDiags.HasErrors() { - t.Fatal(instDiags.Err()) - } - if err := loader.RefreshModules(); err != nil { - t.Fatalf("failed to refresh modules after installation: %s", err) - } - - ///////////////////////////////////////////////////////////////////////// - - cfg, hclDiags := loader.LoadConfig(fixtureDir) - if hclDiags.HasErrors() { - t.Fatalf("invalid configuration: %s", hclDiags.Error()) - } - - resourceA := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "a", - }.InModule(addrs.RootModule) - resourceNoChecks := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "no_checks", - }.InModule(addrs.RootModule) - resourceNonExist := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "nonexist", - }.InModule(addrs.RootModule) - rootOutput := addrs.OutputValue{ - Name: "a", - }.InModule(addrs.RootModule) - moduleChild := addrs.RootModule.Child("child") - resourceB := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "b", - }.InModule(moduleChild) - resourceC := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "c", - }.InModule(moduleChild) - childOutput := addrs.OutputValue{ - Name: "b", - }.InModule(moduleChild) - - // First some consistency checks to make sure our configuration is the - // shape we are relying on it to be. - if addr := resourceA; cfg.Module.ResourceByAddr(addr.Resource) == nil { - t.Fatalf("configuration does not include %s", addr) - } - if addr := resourceB; cfg.Children["child"].Module.ResourceByAddr(addr.Resource) == nil { - t.Fatalf("configuration does not include %s", addr) - } - if addr := resourceNoChecks; cfg.Module.ResourceByAddr(addr.Resource) == nil { - t.Fatalf("configuration does not include %s", addr) - } - if addr := resourceNonExist; cfg.Module.ResourceByAddr(addr.Resource) != nil { - t.Fatalf("configuration includes %s, which is not supposed to exist", addr) - } - - ///////////////////////////////////////////////////////////////////////// - - checks := NewState(cfg) - - missing := 0 - if addr := resourceA; !checks.ConfigHasChecks(addr) { - t.Errorf("checks not detected for %s", addr) - missing++ - } - if addr := resourceB; !checks.ConfigHasChecks(addr) { - t.Errorf("checks not detected for %s", addr) - missing++ - } - if addr := resourceC; !checks.ConfigHasChecks(addr) { - t.Errorf("checks not detected for %s", addr) - missing++ - } - if addr := rootOutput; !checks.ConfigHasChecks(addr) { - t.Errorf("checks not detected for %s", addr) - missing++ - } - if addr := childOutput; !checks.ConfigHasChecks(addr) { - t.Errorf("checks not detected for %s", addr) - missing++ - } - if addr := resourceNoChecks; checks.ConfigHasChecks(addr) { - t.Errorf("checks detected for %s, even though it has none", addr) - } - if addr := resourceNonExist; checks.ConfigHasChecks(addr) { - t.Errorf("checks detected for %s, even though it doesn't exist", addr) - } - if missing > 0 { - t.Fatalf("missing some configuration objects we'd need for subsequent testing") - } - - ///////////////////////////////////////////////////////////////////////// - - // Everything should start with status unknown. - - { - wantConfigAddrs := addrs.MakeSet[addrs.ConfigCheckable]( - resourceA, - resourceB, - resourceC, - rootOutput, - childOutput, - ) - gotConfigAddrs := checks.AllConfigAddrs() - if diff := cmp.Diff(wantConfigAddrs, gotConfigAddrs); diff != "" { - t.Errorf("wrong detected config addresses\n%s", diff) - } - - for _, configAddr := range gotConfigAddrs { - if got, want := checks.AggregateCheckStatus(configAddr), StatusUnknown; got != want { - t.Errorf("incorrect initial aggregate check status for %s: %s, but want %s", configAddr, got, want) - } - } - } - - ///////////////////////////////////////////////////////////////////////// - - // The following are steps that would normally be done by Terraform Core - // as part of visiting checkable objects during the graph walk. We're - // simulating a likely sequence of calls here for testing purposes, but - // Terraform Core won't necessarily visit all of these in exactly the - // same order every time and so this is just one possible valid ordering - // of calls. - - resourceInstA := resourceA.Resource.Absolute(addrs.RootModuleInstance).Instance(addrs.NoKey) - rootOutputInst := rootOutput.OutputValue.Absolute(addrs.RootModuleInstance) - moduleChildInst := addrs.RootModuleInstance.Child("child", addrs.NoKey) - resourceInstB := resourceB.Resource.Absolute(moduleChildInst).Instance(addrs.NoKey) - resourceInstC0 := resourceC.Resource.Absolute(moduleChildInst).Instance(addrs.IntKey(0)) - resourceInstC1 := resourceC.Resource.Absolute(moduleChildInst).Instance(addrs.IntKey(1)) - childOutputInst := childOutput.OutputValue.Absolute(moduleChildInst) - - checks.ReportCheckableObjects(resourceA, addrs.MakeSet[addrs.Checkable](resourceInstA)) - checks.ReportCheckResult(resourceInstA, addrs.ResourcePrecondition, 0, StatusPass) - checks.ReportCheckResult(resourceInstA, addrs.ResourcePrecondition, 1, StatusPass) - checks.ReportCheckResult(resourceInstA, addrs.ResourcePostcondition, 0, StatusPass) - - checks.ReportCheckableObjects(resourceB, addrs.MakeSet[addrs.Checkable](resourceInstB)) - checks.ReportCheckResult(resourceInstB, addrs.ResourcePrecondition, 0, StatusPass) - - checks.ReportCheckableObjects(resourceC, addrs.MakeSet[addrs.Checkable](resourceInstC0, resourceInstC1)) - checks.ReportCheckResult(resourceInstC0, addrs.ResourcePostcondition, 0, StatusPass) - checks.ReportCheckResult(resourceInstC1, addrs.ResourcePostcondition, 0, StatusPass) - - checks.ReportCheckableObjects(childOutput, addrs.MakeSet[addrs.Checkable](childOutputInst)) - checks.ReportCheckResult(childOutputInst, addrs.OutputPrecondition, 0, StatusPass) - - checks.ReportCheckableObjects(rootOutput, addrs.MakeSet[addrs.Checkable](rootOutputInst)) - checks.ReportCheckResult(rootOutputInst, addrs.OutputPrecondition, 0, StatusPass) - - ///////////////////////////////////////////////////////////////////////// - - // This "section" is simulating what we might do to report the results - // of the checks after a run completes. - - { - configCount := 0 - for _, configAddr := range checks.AllConfigAddrs() { - configCount++ - if got, want := checks.AggregateCheckStatus(configAddr), StatusPass; got != want { - t.Errorf("incorrect final aggregate check status for %s: %s, but want %s", configAddr, got, want) - } - } - if got, want := configCount, 5; got != want { - t.Errorf("incorrect number of known config addresses %d; want %d", got, want) - } - } - - { - objAddrs := addrs.MakeSet[addrs.Checkable]( - resourceInstA, - rootOutputInst, - resourceInstB, - resourceInstC0, - resourceInstC1, - childOutputInst, - ) - for _, addr := range objAddrs { - if got, want := checks.ObjectCheckStatus(addr), StatusPass; got != want { - t.Errorf("incorrect final check status for object %s: %s, but want %s", addr, got, want) - } - } - } -} diff --git a/internal/cloud/backend.go b/internal/cloud/backend.go deleted file mode 100644 index 4625a4d43140..000000000000 --- a/internal/cloud/backend.go +++ /dev/null @@ -1,1213 +0,0 @@ -package cloud - -import ( - "context" - "errors" - "fmt" - "log" - "net/http" - "net/url" - "os" - "sort" - "strings" - "sync" - "time" - - tfe "github.com/hashicorp/go-tfe" - version "github.com/hashicorp/go-version" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - - backendLocal "github.com/hashicorp/terraform/internal/backend/local" -) - -const ( - defaultHostname = "app.terraform.io" - defaultParallelism = 10 - tfeServiceID = "tfe.v2" - headerSourceKey = "X-Terraform-Integration" - headerSourceValue = "cloud" - genericHostname = "localterraform.com" -) - -// Cloud is an implementation of EnhancedBackend in service of the Terraform Cloud/Enterprise -// integration for Terraform CLI. This backend is not intended to be surfaced at the user level and -// is instead an implementation detail of cloud.Cloud. -type Cloud struct { - // CLI and Colorize control the CLI output. If CLI is nil then no CLI - // output will be done. If CLIColor is nil then no coloring will be done. - CLI cli.Ui - CLIColor *colorstring.Colorize - - // ContextOpts are the base context options to set when initializing a - // new Terraform context. Many of these will be overridden or merged by - // Operation. See Operation for more details. - ContextOpts *terraform.ContextOpts - - // client is the Terraform Cloud/Enterprise API client. - client *tfe.Client - - // lastRetry is set to the last time a request was retried. - lastRetry time.Time - - // hostname of Terraform Cloud or Terraform Enterprise - hostname string - - // token for Terraform Cloud or Terraform Enterprise - token string - - // organization is the organization that contains the target workspaces. - organization string - - // WorkspaceMapping contains strategies for mapping CLI workspaces in the working directory - // to remote Terraform Cloud workspaces. - WorkspaceMapping WorkspaceMapping - - // services is used for service discovery - services *disco.Disco - - // renderer is used for rendering JSON plan output and streamed logs. - renderer *jsonformat.Renderer - - // local allows local operations, where Terraform Cloud serves as a state storage backend. - local backend.Enhanced - - // forceLocal, if true, will force the use of the local backend. - forceLocal bool - - // opLock locks operations - opLock sync.Mutex - - // ignoreVersionConflict, if true, will disable the requirement that the - // local Terraform version matches the remote workspace's configured - // version. This will also cause VerifyWorkspaceTerraformVersion to return - // a warning diagnostic instead of an error. - ignoreVersionConflict bool - - runningInAutomation bool - - // input stores the value of the -input flag, since it will be used - // to determine whether or not to ask the user for approval of a run. - input bool -} - -var _ backend.Backend = (*Cloud)(nil) -var _ backend.Enhanced = (*Cloud)(nil) -var _ backend.Local = (*Cloud)(nil) - -// New creates a new initialized cloud backend. -func New(services *disco.Disco) *Cloud { - return &Cloud{ - services: services, - } -} - -// ConfigSchema implements backend.Enhanced. -func (b *Cloud) ConfigSchema() *configschema.Block { - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "hostname": { - Type: cty.String, - Optional: true, - Description: schemaDescriptionHostname, - }, - "organization": { - Type: cty.String, - Optional: true, - Description: schemaDescriptionOrganization, - }, - "token": { - Type: cty.String, - Optional: true, - Description: schemaDescriptionToken, - }, - }, - - BlockTypes: map[string]*configschema.NestedBlock{ - "workspaces": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": { - Type: cty.String, - Optional: true, - Description: schemaDescriptionName, - }, - "tags": { - Type: cty.Set(cty.String), - Optional: true, - Description: schemaDescriptionTags, - }, - }, - }, - Nesting: configschema.NestingSingle, - }, - }, - } -} - -// PrepareConfig implements backend.Backend. -func (b *Cloud) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - if obj.IsNull() { - return obj, diags - } - - // check if organization is specified in the config. - if val := obj.GetAttr("organization"); val.IsNull() || val.AsString() == "" { - // organization is specified in the config but is invalid, so - // we'll fallback on TF_CLOUD_ORGANIZATION - if val := os.Getenv("TF_CLOUD_ORGANIZATION"); val == "" { - diags = diags.Append(missingConfigAttributeAndEnvVar("organization", "TF_CLOUD_ORGANIZATION")) - } - } - - WorkspaceMapping := WorkspaceMapping{} - if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { - if val := workspaces.GetAttr("name"); !val.IsNull() { - WorkspaceMapping.Name = val.AsString() - } - if val := workspaces.GetAttr("tags"); !val.IsNull() { - err := gocty.FromCtyValue(val, &WorkspaceMapping.Tags) - if err != nil { - log.Panicf("An unxpected error occurred: %s", err) - } - } - } else { - WorkspaceMapping.Name = os.Getenv("TF_WORKSPACE") - } - - switch WorkspaceMapping.Strategy() { - // Make sure have a workspace mapping strategy present - case WorkspaceNoneStrategy: - diags = diags.Append(invalidWorkspaceConfigMissingValues) - // Make sure that a workspace name is configured. - case WorkspaceInvalidStrategy: - diags = diags.Append(invalidWorkspaceConfigMisconfiguration) - } - - return obj, diags -} - -// configureGenericHostname aliases the cloud backend hostname configuration -// as a generic "localterraform.com" hostname. This was originally added as a -// Terraform Enterprise feature and is useful for re-using whatever the -// Cloud/Enterprise backend host is in nested module sources in order -// to prevent code churn when re-using config between multiple -// Terraform Enterprise environments. -func (b *Cloud) configureGenericHostname() { - // This won't be an error for the given constant value - genericHost, _ := svchost.ForComparison(genericHostname) - - // This won't be an error because, by this time, the hostname has been parsed and - // service discovery requests made against it. - targetHost, _ := svchost.ForComparison(b.hostname) - - b.services.Alias(genericHost, targetHost) -} - -// Configure implements backend.Enhanced. -func (b *Cloud) Configure(obj cty.Value) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if obj.IsNull() { - return diags - } - - diagErr := b.setConfigurationFields(obj) - if diagErr.HasErrors() { - return diagErr - } - - // Discover the service URL to confirm that it provides the Terraform Cloud/Enterprise API - service, err := b.discover() - - // Check for errors before we continue. - if err != nil { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - strings.ToUpper(err.Error()[:1])+err.Error()[1:], - "", // no description is needed here, the error is clear - cty.Path{cty.GetAttrStep{Name: "hostname"}}, - )) - return diags - } - - // First we'll retrieve the token from the configuration - var token string - if val := obj.GetAttr("token"); !val.IsNull() { - token = val.AsString() - } - - // Get the token from the CLI Config File in the credentials section - // if no token was not set in the configuration - if token == "" { - token, err = b.cliConfigToken() - if err != nil { - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - strings.ToUpper(err.Error()[:1])+err.Error()[1:], - "", // no description is needed here, the error is clear - cty.Path{cty.GetAttrStep{Name: "hostname"}}, - )) - return diags - } - } - - // Return an error if we still don't have a token at this point. - if token == "" { - loginCommand := "terraform login" - if b.hostname != defaultHostname { - loginCommand = loginCommand + " " + b.hostname - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Required token could not be found", - fmt.Sprintf( - "Run the following command to generate a token for %s:\n %s", - b.hostname, - loginCommand, - ), - )) - return diags - } - - b.token = token - b.configureGenericHostname() - - if b.client == nil { - cfg := &tfe.Config{ - Address: service.String(), - BasePath: service.Path, - Token: token, - Headers: make(http.Header), - RetryLogHook: b.retryLogHook, - } - - // Set the version header to the current version. - cfg.Headers.Set(tfversion.Header, tfversion.Version) - cfg.Headers.Set(headerSourceKey, headerSourceValue) - - // Create the TFC/E API client. - b.client, err = tfe.NewClient(cfg) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create the Terraform Cloud/Enterprise client", - fmt.Sprintf( - `Encountered an unexpected error while creating the `+ - `Terraform Cloud/Enterprise client: %s.`, err, - ), - )) - return diags - } - } - - // Check if the organization exists by reading its entitlements. - entitlements, err := b.client.Organizations.ReadEntitlements(context.Background(), b.organization) - if err != nil { - if err == tfe.ErrResourceNotFound { - err = fmt.Errorf("organization %q at host %s not found.\n\n"+ - "Please ensure that the organization and hostname are correct "+ - "and that your API token for %s is valid.", - b.organization, b.hostname, b.hostname) - } - diags = diags.Append(tfdiags.AttributeValue( - tfdiags.Error, - fmt.Sprintf("Failed to read organization %q at host %s", b.organization, b.hostname), - fmt.Sprintf("Encountered an unexpected error while reading the "+ - "organization settings: %s", err), - cty.Path{cty.GetAttrStep{Name: "organization"}}, - )) - return diags - } - - if ws, ok := os.LookupEnv("TF_WORKSPACE"); ok { - if ws == b.WorkspaceMapping.Name || b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { - diag := b.validWorkspaceEnvVar(context.Background(), b.organization, ws) - if diag != nil { - diags = diags.Append(diag) - return diags - } - } - } - - // Check for the minimum version of Terraform Enterprise required. - // - // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, - // so if there's an error when parsing the RemoteAPIVersion, it's handled as - // equivalent to an API version < 2.3. - currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) - desiredAPIVersion, _ := version.NewVersion("2.5") - - if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { - log.Printf("[TRACE] API version check failed; want: >= %s, got: %s", desiredAPIVersion.Original(), currentAPIVersion) - if b.runningInAutomation { - // It should never be possible for this Terraform process to be mistakenly - // used internally within an unsupported Terraform Enterprise install - but - // just in case it happens, give an actionable error. - diags = diags.Append( - tfdiags.Sourceless( - tfdiags.Error, - "Unsupported Terraform Enterprise version", - cloudIntegrationUsedInUnsupportedTFE, - ), - ) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported Terraform Enterprise version", - `The 'cloud' option is not supported with this version of Terraform Enterprise.`, - ), - ) - } - } - - // Configure a local backend for when we need to run operations locally. - b.local = backendLocal.NewWithBackend(b) - b.forceLocal = b.forceLocal || !entitlements.Operations - - // Enable retries for server errors as the backend is now fully configured. - b.client.RetryServerErrors(true) - - return diags -} - -func (b *Cloud) setConfigurationFields(obj cty.Value) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // Get the hostname. - b.hostname = os.Getenv("TF_CLOUD_HOSTNAME") - if val := obj.GetAttr("hostname"); !val.IsNull() && val.AsString() != "" { - b.hostname = val.AsString() - } else if b.hostname == "" { - b.hostname = defaultHostname - } - - // We can have two options, setting the organization via the config - // or using TF_CLOUD_ORGANIZATION. Since PrepareConfig() validates that one of these - // values must exist, we'll initially set it to the env var and override it if - // specified in the configuration. - b.organization = os.Getenv("TF_CLOUD_ORGANIZATION") - - // Check if the organization is present and valid in the config. - if val := obj.GetAttr("organization"); !val.IsNull() && val.AsString() != "" { - b.organization = val.AsString() - } - - // Get the workspaces configuration block and retrieve the - // default workspace name. - if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { - - // PrepareConfig checks that you cannot set both of these. - if val := workspaces.GetAttr("name"); !val.IsNull() { - b.WorkspaceMapping.Name = val.AsString() - } - if val := workspaces.GetAttr("tags"); !val.IsNull() { - var tags []string - err := gocty.FromCtyValue(val, &tags) - if err != nil { - log.Panicf("An unexpected error occurred: %s", err) - } - - b.WorkspaceMapping.Tags = tags - } - } else { - b.WorkspaceMapping.Name = os.Getenv("TF_WORKSPACE") - } - - // Determine if we are forced to use the local backend. - b.forceLocal = os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" - - return diags -} - -// discover the TFC/E API service URL and version constraints. -func (b *Cloud) discover() (*url.URL, error) { - hostname, err := svchost.ForComparison(b.hostname) - if err != nil { - return nil, err - } - - host, err := b.services.Discover(hostname) - if err != nil { - var serviceDiscoErr *disco.ErrServiceDiscoveryNetworkRequest - - switch { - case errors.As(err, &serviceDiscoErr): - err = fmt.Errorf("a network issue prevented cloud configuration; %w", err) - return nil, err - default: - return nil, err - } - } - - service, err := host.ServiceURL(tfeServiceID) - // Return the error, unless its a disco.ErrVersionNotSupported error. - if _, ok := err.(*disco.ErrVersionNotSupported); !ok && err != nil { - return nil, err - } - - return service, err -} - -// cliConfigToken returns the token for this host as configured in the credentials -// section of the CLI Config File. If no token was configured, an empty -// string will be returned instead. -func (b *Cloud) cliConfigToken() (string, error) { - hostname, err := svchost.ForComparison(b.hostname) - if err != nil { - return "", err - } - creds, err := b.services.CredentialsForHost(hostname) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", b.hostname, err) - return "", nil - } - if creds != nil { - return creds.Token(), nil - } - return "", nil -} - -// retryLogHook is invoked each time a request is retried allowing the -// backend to log any connection issues to prevent data loss. -func (b *Cloud) retryLogHook(attemptNum int, resp *http.Response) { - if b.CLI != nil { - // Ignore the first retry to make sure any delayed output will - // be written to the console before we start logging retries. - // - // The retry logic in the TFE client will retry both rate limited - // requests and server errors, but in the cloud backend we only - // care about server errors so we ignore rate limit (429) errors. - if attemptNum == 0 || (resp != nil && resp.StatusCode == 429) { - // Reset the last retry time. - b.lastRetry = time.Now() - return - } - - if attemptNum == 1 { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(initialRetryError))) - } else { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace( - fmt.Sprintf(repeatedRetryError, time.Since(b.lastRetry).Round(time.Second))))) - } - } -} - -// Workspaces implements backend.Enhanced, returning a filtered list of workspace names according to -// the workspace mapping strategy configured. -func (b *Cloud) Workspaces() ([]string, error) { - // Create a slice to contain all the names. - var names []string - - // If configured for a single workspace, return that exact name only. The StateMgr for this - // backend will automatically create the remote workspace if it does not yet exist. - if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy { - names = append(names, b.WorkspaceMapping.Name) - return names, nil - } - - // Otherwise, multiple workspaces are being mapped. Query Terraform Cloud for all the remote - // workspaces by the provided mapping strategy. - options := &tfe.WorkspaceListOptions{} - if b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { - taglist := strings.Join(b.WorkspaceMapping.Tags, ",") - options.Tags = taglist - } - - for { - wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) - if err != nil { - return nil, err - } - - for _, w := range wl.Items { - names = append(names, w.Name) - } - - // Exit the loop when we've seen all pages. - if wl.CurrentPage >= wl.TotalPages { - break - } - - // Update the page number to get the next page. - options.PageNumber = wl.NextPage - } - - // Sort the result so we have consistent output. - sort.StringSlice(names).Sort() - - return names, nil -} - -// DeleteWorkspace implements backend.Enhanced. -func (b *Cloud) DeleteWorkspace(name string, force bool) error { - if name == backend.DefaultStateName { - return backend.ErrDefaultWorkspaceNotSupported - } - - if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy { - return backend.ErrWorkspacesNotSupported - } - - workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) - if err == tfe.ErrResourceNotFound { - return nil // If the workspace does not exist, succeed - } - - if err != nil { - return fmt.Errorf("failed to retrieve workspace %s: %v", name, err) - } - - // Configure the remote workspace name. - State := &State{tfeClient: b.client, organization: b.organization, workspace: workspace} - return State.Delete(force) -} - -// StateMgr implements backend.Enhanced. -func (b *Cloud) StateMgr(name string) (statemgr.Full, error) { - var remoteTFVersion string - - if name == backend.DefaultStateName { - return nil, backend.ErrDefaultWorkspaceNotSupported - } - - if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy && name != b.WorkspaceMapping.Name { - return nil, backend.ErrWorkspacesNotSupported - } - - workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) - if err != nil && err != tfe.ErrResourceNotFound { - return nil, fmt.Errorf("Failed to retrieve workspace %s: %v", name, err) - } - if workspace != nil { - remoteTFVersion = workspace.TerraformVersion - } - - if err == tfe.ErrResourceNotFound { - // Create a workspace - options := tfe.WorkspaceCreateOptions{ - Name: tfe.String(name), - Tags: b.WorkspaceMapping.tfeTags(), - } - - log.Printf("[TRACE] cloud: Creating Terraform Cloud workspace %s/%s", b.organization, name) - workspace, err = b.client.Workspaces.Create(context.Background(), b.organization, options) - if err != nil { - return nil, fmt.Errorf("Error creating workspace %s: %v", name, err) - } - - remoteTFVersion = workspace.TerraformVersion - - // Attempt to set the new workspace to use this version of Terraform. This - // can fail if there's no enabled tool_version whose name matches our - // version string, but that's expected sometimes -- just warn and continue. - versionOptions := tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String(tfversion.String()), - } - _, err := b.client.Workspaces.UpdateByID(context.Background(), workspace.ID, versionOptions) - if err == nil { - remoteTFVersion = tfversion.String() - } else { - // TODO: Ideally we could rely on the client to tell us what the actual - // problem was, but we currently can't get enough context from the error - // object to do a nicely formatted message, so we're just assuming the - // issue was that the version wasn't available since that's probably what - // happened. - log.Printf("[TRACE] cloud: Attempted to select version %s for TFC workspace; unavailable, so %s will be used instead.", tfversion.String(), workspace.TerraformVersion) - if b.CLI != nil { - versionUnavailable := fmt.Sprintf(unavailableTerraformVersion, tfversion.String(), workspace.TerraformVersion) - b.CLI.Output(b.Colorize().Color(versionUnavailable)) - } - } - } - - if b.workspaceTagsRequireUpdate(workspace, b.WorkspaceMapping) { - options := tfe.WorkspaceAddTagsOptions{ - Tags: b.WorkspaceMapping.tfeTags(), - } - log.Printf("[TRACE] cloud: Adding tags for Terraform Cloud workspace %s/%s", b.organization, name) - err = b.client.Workspaces.AddTags(context.Background(), workspace.ID, options) - if err != nil { - return nil, fmt.Errorf("Error updating workspace %s: %v", name, err) - } - } - - // This is a fallback error check. Most code paths should use other - // mechanisms to check the version, then set the ignoreVersionConflict - // field to true. This check is only in place to ensure that we don't - // accidentally upgrade state with a new code path, and the version check - // logic is coarser and simpler. - if !b.ignoreVersionConflict { - // Explicitly ignore the pseudo-version "latest" here, as it will cause - // plan and apply to always fail. - if remoteTFVersion != tfversion.String() && remoteTFVersion != "latest" { - return nil, fmt.Errorf("Remote workspace Terraform version %q does not match local Terraform version %q", remoteTFVersion, tfversion.String()) - } - } - - return &State{tfeClient: b.client, organization: b.organization, workspace: workspace}, nil -} - -// Operation implements backend.Enhanced. -func (b *Cloud) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { - // Retrieve the workspace for this operation. - w, err := b.fetchWorkspace(ctx, b.organization, op.Workspace) - if err != nil { - return nil, err - } - - // Terraform remote version conflicts are not a concern for operations. We - // are in one of three states: - // - // - Running remotely, in which case the local version is irrelevant; - // - Workspace configured for local operations, in which case the remote - // version is meaningless; - // - Forcing local operations, which should only happen in the Terraform Cloud worker, in - // which case the Terraform versions by definition match. - b.IgnoreVersionConflict() - - // Check if we need to use the local backend to run the operation. - if b.forceLocal || isLocalExecutionMode(w.ExecutionMode) { - // Record that we're forced to run operations locally to allow the - // command package UI to operate correctly - b.forceLocal = true - return b.local.Operation(ctx, op) - } - - // Set the remote workspace name. - op.Workspace = w.Name - - // Determine the function to call for our operation - var f func(context.Context, context.Context, *backend.Operation, *tfe.Workspace) (*tfe.Run, error) - switch op.Type { - case backend.OperationTypePlan: - f = b.opPlan - case backend.OperationTypeApply: - f = b.opApply - case backend.OperationTypeRefresh: - // The `terraform refresh` command has been deprecated in favor of `terraform apply -refresh-state`. - // Rather than respond with an error telling the user to run the other command we can just run - // that command instead. We will tell the user what we are doing, and then do it. - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(refreshToApplyRefresh) + "\n")) - } - op.PlanMode = plans.RefreshOnlyMode - op.PlanRefresh = true - op.AutoApprove = true - f = b.opApply - default: - return nil, fmt.Errorf( - "\n\nTerraform Cloud does not support the %q operation.", op.Type) - } - - // Lock - b.opLock.Lock() - - // Build our running operation - // the runninCtx is only used to block until the operation returns. - runningCtx, done := context.WithCancel(context.Background()) - runningOp := &backend.RunningOperation{ - Context: runningCtx, - PlanEmpty: true, - } - - // stopCtx wraps the context passed in, and is used to signal a graceful Stop. - stopCtx, stop := context.WithCancel(ctx) - runningOp.Stop = stop - - // cancelCtx is used to cancel the operation immediately, usually - // indicating that the process is exiting. - cancelCtx, cancel := context.WithCancel(context.Background()) - runningOp.Cancel = cancel - - // Do it. - go func() { - defer done() - defer stop() - defer cancel() - - defer b.opLock.Unlock() - - r, opErr := f(stopCtx, cancelCtx, op, w) - if opErr != nil && opErr != context.Canceled { - var diags tfdiags.Diagnostics - diags = diags.Append(opErr) - op.ReportResult(runningOp, diags) - return - } - - if r == nil && opErr == context.Canceled { - runningOp.Result = backend.OperationFailure - return - } - - if r != nil { - // Retrieve the run to get its current status. - r, err := b.client.Runs.Read(cancelCtx, r.ID) - if err != nil { - var diags tfdiags.Diagnostics - diags = diags.Append(generalError("Failed to retrieve run", err)) - op.ReportResult(runningOp, diags) - return - } - - // Record if there are any changes. - runningOp.PlanEmpty = !r.HasChanges - - if opErr == context.Canceled { - if err := b.cancel(cancelCtx, op, r); err != nil { - var diags tfdiags.Diagnostics - diags = diags.Append(generalError("Failed to retrieve run", err)) - op.ReportResult(runningOp, diags) - return - } - } - - if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { - runningOp.Result = backend.OperationFailure - } - } - }() - - // Return the running operation. - return runningOp, nil -} - -func (b *Cloud) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { - if r.Actions.IsCancelable { - // Only ask if the remote operation should be canceled - // if the auto approve flag is not set. - if !op.AutoApprove { - v, err := op.UIIn.Input(cancelCtx, &terraform.InputOpts{ - Id: "cancel", - Query: "\nDo you want to cancel the remote operation?", - Description: "Only 'yes' will be accepted to cancel.", - }) - if err != nil { - return generalError("Failed asking to cancel", err) - } - if v != "yes" { - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled))) - } - return nil - } - } else { - if b.CLI != nil { - // Insert a blank line to separate the ouputs. - b.CLI.Output("") - } - } - - // Try to cancel the remote operation. - err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{}) - if err != nil { - return generalError("Failed to cancel run", err) - } - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled))) - } - } - - return nil -} - -// IgnoreVersionConflict allows commands to disable the fall-back check that -// the local Terraform version matches the remote workspace's configured -// Terraform version. This should be called by commands where this check is -// unnecessary, such as those performing remote operations, or read-only -// operations. It will also be called if the user uses a command-line flag to -// override this check. -func (b *Cloud) IgnoreVersionConflict() { - b.ignoreVersionConflict = true -} - -// VerifyWorkspaceTerraformVersion compares the local Terraform version against -// the workspace's configured Terraform version. If they are compatible, this -// means that there are no state compatibility concerns, so it returns no -// diagnostics. -// -// If the versions aren't compatible, it returns an error (or, if -// b.ignoreVersionConflict is set, a warning). -func (b *Cloud) VerifyWorkspaceTerraformVersion(workspaceName string) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - workspace, err := b.getRemoteWorkspace(context.Background(), workspaceName) - if err != nil { - // If the workspace doesn't exist, there can be no compatibility - // problem, so we can return. This is most likely to happen when - // migrating state from a local backend to a new workspace. - if err == tfe.ErrResourceNotFound { - return nil - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error looking up workspace", - fmt.Sprintf("Workspace read failed: %s", err), - )) - return diags - } - - // If the workspace has the pseudo-version "latest", all bets are off. We - // cannot reasonably determine what the intended Terraform version is, so - // we'll skip version verification. - if workspace.TerraformVersion == "latest" { - return nil - } - - // If the workspace has execution-mode set to local, the remote Terraform - // version is effectively meaningless, so we'll skip version verification. - if isLocalExecutionMode(workspace.ExecutionMode) { - return nil - } - - remoteConstraint, err := version.NewConstraint(workspace.TerraformVersion) - if err != nil { - message := fmt.Sprintf( - "The remote workspace specified an invalid Terraform version or constraint (%s), "+ - "and it isn't possible to determine whether the local Terraform version (%s) is compatible.", - workspace.TerraformVersion, - tfversion.String(), - ) - diags = diags.Append(incompatibleWorkspaceTerraformVersion(message, b.ignoreVersionConflict)) - return diags - } - - remoteVersion, _ := version.NewSemver(workspace.TerraformVersion) - - // We can use a looser version constraint if the workspace specifies a - // literal Terraform version, and it is not a prerelease. The latter - // restriction is because we cannot compare prerelease versions with any - // operator other than simple equality. - if remoteVersion != nil && remoteVersion.Prerelease() == "" { - v014 := version.Must(version.NewSemver("0.14.0")) - v130 := version.Must(version.NewSemver("1.3.0")) - - // Versions from 0.14 through the early 1.x series should be compatible - // (though we don't know about 1.3 yet). - if remoteVersion.GreaterThanOrEqual(v014) && remoteVersion.LessThan(v130) { - early1xCompatible, err := version.NewConstraint(fmt.Sprintf(">= 0.14.0, < %s", v130.String())) - if err != nil { - panic(err) - } - remoteConstraint = early1xCompatible - } - - // Any future new state format will require at least a minor version - // increment, so x.y.* will always be compatible with each other. - if remoteVersion.GreaterThanOrEqual(v130) { - rwvs := remoteVersion.Segments64() - if len(rwvs) >= 3 { - // ~> x.y.0 - minorVersionCompatible, err := version.NewConstraint(fmt.Sprintf("~> %d.%d.0", rwvs[0], rwvs[1])) - if err != nil { - panic(err) - } - remoteConstraint = minorVersionCompatible - } - } - } - - // Re-parsing tfversion.String because tfversion.SemVer omits the prerelease - // prefix, and we want to allow constraints like `~> 1.2.0-beta1`. - fullTfversion := version.Must(version.NewSemver(tfversion.String())) - - if remoteConstraint.Check(fullTfversion) { - return diags - } - - message := fmt.Sprintf( - "The local Terraform version (%s) does not meet the version requirements for remote workspace %s/%s (%s).", - tfversion.String(), - b.organization, - workspace.Name, - remoteConstraint, - ) - diags = diags.Append(incompatibleWorkspaceTerraformVersion(message, b.ignoreVersionConflict)) - return diags -} - -func (b *Cloud) IsLocalOperations() bool { - return b.forceLocal -} - -// Colorize returns the Colorize structure that can be used for colorizing -// output. This is guaranteed to always return a non-nil value and so useful -// as a helper to wrap any potentially colored strings. -// -// TODO SvH: Rename this back to Colorize as soon as we can pass -no-color. -// -//lint:ignore U1000 see above todo -func (b *Cloud) cliColorize() *colorstring.Colorize { - if b.CLIColor != nil { - return b.CLIColor - } - - return &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - } -} - -func (b *Cloud) workspaceTagsRequireUpdate(workspace *tfe.Workspace, workspaceMapping WorkspaceMapping) bool { - if workspaceMapping.Strategy() != WorkspaceTagsStrategy { - return false - } - - existingTags := map[string]struct{}{} - for _, t := range workspace.TagNames { - existingTags[t] = struct{}{} - } - - for _, tag := range workspaceMapping.Tags { - if _, ok := existingTags[tag]; !ok { - return true - } - } - - return false -} - -type WorkspaceMapping struct { - Name string - Tags []string -} - -type workspaceStrategy string - -const ( - WorkspaceTagsStrategy workspaceStrategy = "tags" - WorkspaceNameStrategy workspaceStrategy = "name" - WorkspaceNoneStrategy workspaceStrategy = "none" - WorkspaceInvalidStrategy workspaceStrategy = "invalid" -) - -func (wm WorkspaceMapping) Strategy() workspaceStrategy { - switch { - case len(wm.Tags) > 0 && wm.Name == "": - return WorkspaceTagsStrategy - case len(wm.Tags) == 0 && wm.Name != "": - return WorkspaceNameStrategy - case len(wm.Tags) == 0 && wm.Name == "": - return WorkspaceNoneStrategy - default: - // Any other combination is invalid as each strategy is mutually exclusive - return WorkspaceInvalidStrategy - } -} - -func isLocalExecutionMode(execMode string) bool { - return execMode == "local" -} - -func (b *Cloud) fetchWorkspace(ctx context.Context, organization string, workspace string) (*tfe.Workspace, error) { - // Retrieve the workspace for this operation. - w, err := b.client.Workspaces.Read(ctx, organization, workspace) - if err != nil { - switch err { - case context.Canceled: - return nil, err - case tfe.ErrResourceNotFound: - return nil, fmt.Errorf( - "workspace %s not found\n\n"+ - "For security, Terraform Cloud returns '404 Not Found' responses for resources\n"+ - "for resources that a user doesn't have access to, in addition to resources that\n"+ - "do not exist. If the resource does exist, please check the permissions of the provided token.", - workspace, - ) - default: - err := fmt.Errorf( - "Terraform Cloud returned an unexpected error:\n\n%s", - err, - ) - return nil, err - } - } - - return w, nil -} - -// validWorkspaceEnvVar ensures we have selected a valid workspace using TF_WORKSPACE: -// First, it ensures the workspace specified by TF_WORKSPACE exists in the organization -// Second, if tags are specified in the configuration, it ensures TF_WORKSPACE belongs to the set -// of available workspaces with those given tags. -func (b *Cloud) validWorkspaceEnvVar(ctx context.Context, organization, workspace string) tfdiags.Diagnostic { - // first ensure the workspace exists - _, err := b.client.Workspaces.Read(ctx, organization, workspace) - if err != nil && err != tfe.ErrResourceNotFound { - return tfdiags.Sourceless( - tfdiags.Error, - "Terraform Cloud returned an unexpected error", - err.Error(), - ) - } - - if err == tfe.ErrResourceNotFound { - return tfdiags.Sourceless( - tfdiags.Error, - "Invalid workspace selection", - fmt.Sprintf(`Terraform failed to find workspace %q in organization %s.`, workspace, organization), - ) - } - - // if the configuration has specified tags, we need to ensure TF_WORKSPACE - // is a valid member - if b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { - opts := &tfe.WorkspaceListOptions{} - opts.Tags = strings.Join(b.WorkspaceMapping.Tags, ",") - - for { - wl, err := b.client.Workspaces.List(ctx, b.organization, opts) - if err != nil { - return tfdiags.Sourceless( - tfdiags.Error, - "Terraform Cloud returned an unexpected error", - err.Error(), - ) - } - - for _, ws := range wl.Items { - if ws.Name == workspace { - return nil - } - } - - if wl.CurrentPage >= wl.TotalPages { - break - } - - opts.PageNumber = wl.NextPage - } - - return tfdiags.Sourceless( - tfdiags.Error, - "Invalid workspace selection", - fmt.Sprintf( - "Terraform failed to find workspace %q with the tags specified in your configuration:\n[%s]", - workspace, - strings.ReplaceAll(opts.Tags, ",", ", "), - ), - ) - } - - return nil -} - -func (wm WorkspaceMapping) tfeTags() []*tfe.Tag { - var tags []*tfe.Tag - - if wm.Strategy() != WorkspaceTagsStrategy { - return tags - } - - for _, tag := range wm.Tags { - t := tfe.Tag{Name: tag} - tags = append(tags, &t) - } - - return tags -} - -func generalError(msg string, err error) error { - var diags tfdiags.Diagnostics - - if urlErr, ok := err.(*url.Error); ok { - err = urlErr.Err - } - - switch err { - case context.Canceled: - return err - case tfe.ErrResourceNotFound: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("%s: %v", msg, err), - "For security, Terraform Cloud returns '404 Not Found' responses for resources\n"+ - "for resources that a user doesn't have access to, in addition to resources that\n"+ - "do not exist. If the resource does exist, please check the permissions of the provided token.", - )) - return diags.Err() - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("%s: %v", msg, err), - `Terraform Cloud returned an unexpected error. Sometimes `+ - `this is caused by network connection problems, in which case you could retry `+ - `the command. If the issue persists please open a support ticket to get help `+ - `resolving the problem.`, - )) - return diags.Err() - } -} - -// The newline in this error is to make it look good in the CLI! -const initialRetryError = ` -[reset][yellow]There was an error connecting to Terraform Cloud. Please do not exit -Terraform to prevent data loss! Trying to restore the connection... -[reset] -` - -const repeatedRetryError = ` -[reset][yellow]Still trying to restore the connection... (%s elapsed)[reset] -` - -const operationCanceled = ` -[reset][red]The remote operation was successfully cancelled.[reset] -` - -const operationNotCanceled = ` -[reset][red]The remote operation was not cancelled.[reset] -` - -const refreshToApplyRefresh = `[bold][yellow]Proceeding with 'terraform apply -refresh-only -auto-approve'.[reset]` - -const unavailableTerraformVersion = ` -[reset][yellow]The local Terraform version (%s) is not available in Terraform Cloud, or your -organization does not have access to it. The new workspace will use %s. You can -change this later in the workspace settings.[reset]` - -const cloudIntegrationUsedInUnsupportedTFE = ` -This version of Terraform Cloud/Enterprise does not support the state mechanism -attempting to be used by the platform. This should never happen. - -Please reach out to HashiCorp Support to resolve this issue.` - -var ( - workspaceConfigurationHelp = fmt.Sprintf( - `The 'workspaces' block configures how Terraform CLI maps its workspaces for this single -configuration to workspaces within a Terraform Cloud organization. Two strategies are available: - -[bold]tags[reset] - %s - -[bold]name[reset] - %s`, schemaDescriptionTags, schemaDescriptionName) - - schemaDescriptionHostname = `The Terraform Enterprise hostname to connect to. This optional argument defaults to app.terraform.io -for use with Terraform Cloud.` - - schemaDescriptionOrganization = `The name of the organization containing the targeted workspace(s).` - - schemaDescriptionToken = `The token used to authenticate with Terraform Cloud/Enterprise. Typically this argument should not -be set, and 'terraform login' used instead; your credentials will then be fetched from your CLI -configuration file or configured credential helper.` - - schemaDescriptionTags = `A set of tags used to select remote Terraform Cloud workspaces to be used for this single -configuration. New workspaces will automatically be tagged with these tag values. Generally, this -is the primary and recommended strategy to use. This option conflicts with "name".` - - schemaDescriptionName = `The name of a single Terraform Cloud workspace to be used with this configuration. -When configured, only the specified workspace can be used. This option conflicts with "tags".` -) diff --git a/internal/cloud/backend_apply.go b/internal/cloud/backend_apply.go deleted file mode 100644 index fb43127e8361..000000000000 --- a/internal/cloud/backend_apply.go +++ /dev/null @@ -1,228 +0,0 @@ -package cloud - -import ( - "bufio" - "context" - "encoding/json" - "io" - "log" - - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func (b *Cloud) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { - log.Printf("[INFO] cloud: starting Apply operation") - - var diags tfdiags.Diagnostics - - // We should remove the `CanUpdate` part of this test, but for now - // (to remain compatible with tfe.v2.1) we'll leave it in here. - if !w.Permissions.CanUpdate && !w.Permissions.CanQueueApply { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Insufficient rights to apply changes", - "The provided credentials have insufficient rights to apply changes. In order "+ - "to apply changes at least write permissions on the workspace are required.", - )) - return nil, diags.Err() - } - - if w.VCSRepo != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Apply not allowed for workspaces with a VCS connection", - "A workspace that is connected to a VCS requires the VCS-driven workflow "+ - "to ensure that the VCS remains the single source of truth.", - )) - return nil, diags.Err() - } - - if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Custom parallelism values are currently not supported", - `Terraform Cloud does not support setting a custom parallelism `+ - `value at this time.`, - )) - } - - if op.PlanFile != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Applying a saved plan is currently not supported", - `Terraform Cloud currently requires configuration to be present and `+ - `does not accept an existing saved plan as an argument at this time.`, - )) - } - - if !op.HasConfig() && op.PlanMode != plans.DestroyMode { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "No configuration files found", - `Apply requires configuration to be present. Applying without a configuration `+ - `would mark everything for destruction, which is normally not what is desired. `+ - `If you would like to destroy everything, please run 'terraform destroy' which `+ - `does not require any configuration files.`, - )) - } - - // Return if there are any errors. - if diags.HasErrors() { - return nil, diags.Err() - } - - // Run the plan phase. - r, err := b.plan(stopCtx, cancelCtx, op, w) - if err != nil { - return r, err - } - - // This check is also performed in the plan method to determine if - // the policies should be checked, but we need to check the values - // here again to determine if we are done and should return. - if !r.HasChanges || r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { - return r, nil - } - - // Retrieve the run to get its current status. - r, err = b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - return r, generalError("Failed to retrieve run", err) - } - - // Return if the run cannot be confirmed. - if !op.AutoApprove && !r.Actions.IsConfirmable { - return r, nil - } - - mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove - - if mustConfirm && b.input { - opts := &terraform.InputOpts{Id: "approve"} - - if op.PlanMode == plans.DestroyMode { - opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" - opts.Description = "Terraform will destroy all your managed infrastructure, as shown above.\n" + - "There is no undo. Only 'yes' will be accepted to confirm." - } else { - opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?" - opts.Description = "Terraform will perform the actions described above.\n" + - "Only 'yes' will be accepted to approve." - } - - err = b.confirm(stopCtx, op, opts, r, "yes") - if err != nil && err != errRunApproved { - return r, err - } - } else if mustConfirm && !b.input { - return r, errApplyNeedsUIConfirmation - } else { - // If we don't need to ask for confirmation, insert a blank - // line to separate the ouputs. - if b.CLI != nil { - b.CLI.Output("") - } - } - - if !op.AutoApprove && err != errRunApproved { - if err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}); err != nil { - return r, generalError("Failed to approve the apply command", err) - } - } - - // Retrieve the run to get task stages. - // Task Stages are calculated upfront so we only need to call this once for the run. - taskStages, err := b.runTaskStages(stopCtx, b.client, r.ID) - if err != nil { - return r, err - } - - if stage, ok := taskStages[tfe.PreApply]; ok { - if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Pre-apply Tasks"); err != nil { - return r, err - } - } - - r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w) - if err != nil { - return r, err - } - - err = b.renderApplyLogs(stopCtx, r) - if err != nil { - return r, err - } - - return r, nil -} - -func (b *Cloud) renderApplyLogs(ctx context.Context, run *tfe.Run) error { - logs, err := b.client.Applies.Logs(ctx, run.Apply.ID) - if err != nil { - return err - } - - if b.CLI != nil { - reader := bufio.NewReaderSize(logs, 64*1024) - skip := 0 - - for next := true; next; { - var l, line []byte - var err error - - for isPrefix := true; isPrefix; { - l, isPrefix, err = reader.ReadLine() - if err != nil { - if err != io.EOF { - return generalError("Failed to read logs", err) - } - next = false - } - - line = append(line, l...) - } - - // Apply logs show the same Terraform info logs as shown in the plan logs - // (which contain version and os/arch information), we therefore skip to prevent duplicate output. - if skip < 3 { - skip++ - continue - } - - if next || len(line) > 0 { - log := &jsonformat.JSONLog{} - if err := json.Unmarshal(line, log); err != nil { - // If we can not parse the line as JSON, we will simply - // print the line. This maintains backwards compatibility for - // users who do not wish to enable structured output in their - // workspace. - b.CLI.Output(string(line)) - continue - } - - if b.renderer != nil { - // Otherwise, we will print the log - err := b.renderer.RenderLog(log) - if err != nil { - return err - } - } - } - } - } - - return nil -} - -const applyDefaultHeader = ` -[reset][yellow]Running apply in Terraform Cloud. Output will stream here. Pressing Ctrl-C -will cancel the remote apply if it's still pending. If the apply started it -will stop streaming the logs, but will not stop the apply running remotely.[reset] - -Preparing the remote apply... -` diff --git a/internal/cloud/backend_apply_test.go b/internal/cloud/backend_apply_test.go deleted file mode 100644 index c10d9ed07128..000000000000 --- a/internal/cloud/backend_apply_test.go +++ /dev/null @@ -1,1898 +0,0 @@ -package cloud - -import ( - "context" - "fmt" - "os" - "os/signal" - "strings" - "syscall" - "testing" - "time" - - gomock "github.com/golang/mock/gomock" - "github.com/google/go-cmp/cmp" - tfe "github.com/hashicorp/go-tfe" - mocks "github.com/hashicorp/go-tfe/mocks" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - tfversion "github.com/hashicorp/terraform/version" - "github.com/mitchellh/cli" -) - -func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - return testOperationApplyWithTimeout(t, configDir, 0) -} - -func testOperationApplyWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewView(streams) - stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) - operationView := views.NewOperation(arguments.ViewHuman, false, view) - - // Many of our tests use an overridden "null" provider that's just in-memory - // inside the test process, not a separate plugin on disk. - depLocks := depsfile.NewLocks() - depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/null")) - - return &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - PlanRefresh: true, - StateLocker: clistate.NewLocker(timeout, stateLockerView), - Type: backend.OperationTypeApply, - View: operationView, - DependencyLocks: depLocks, - }, configCleanup, done -} - -func TestCloud_applyBasic(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after apply - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after apply: %s", err.Error()) - } -} - -func TestCloud_applyJSONBasic(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - stream, close := terminal.StreamsForTesting(t) - - b.renderer = &jsonformat.Renderer{ - Streams: stream, - Colorize: mockColorize(), - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-json") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - mockSROWorkspace(t, b, op.Workspace) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - outp := close(t) - gotOut := outp.Stdout() - - if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", gotOut) - } - if !strings.Contains(gotOut, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summary in output: %s", gotOut) - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after apply - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after apply: %s", err.Error()) - } -} - -func TestCloud_applyJSONWithOutputs(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - stream, close := terminal.StreamsForTesting(t) - - b.renderer = &jsonformat.Renderer{ - Streams: stream, - Colorize: mockColorize(), - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-outputs") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - mockSROWorkspace(t, b, op.Workspace) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - outp := close(t) - gotOut := outp.Stdout() - expectedSimpleOutput := `simple = [ - "some", - "list", - ]` - expectedSensitiveOutput := `secret = (sensitive value)` - expectedComplexOutput := `complex = { - keyA = { - someList = [ - 1, - 2, - 3, - ] - } - keyB = { - someBool = true - someStr = "hello" - } - }` - - if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", gotOut) - } - if !strings.Contains(gotOut, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summary in output: %s", gotOut) - } - if !strings.Contains(gotOut, "Outputs:") { - t.Fatalf("expected output header: %s", gotOut) - } - if !strings.Contains(gotOut, expectedSimpleOutput) { - t.Fatalf("expected output: %s, got: %s", expectedSimpleOutput, gotOut) - } - if !strings.Contains(gotOut, expectedSensitiveOutput) { - t.Fatalf("expected output: %s, got: %s", expectedSensitiveOutput, gotOut) - } - if !strings.Contains(gotOut, expectedComplexOutput) { - t.Fatalf("expected output: %s, got: %s", expectedComplexOutput, gotOut) - } - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after apply - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after apply: %s", err.Error()) - } -} - -func TestCloud_applyCanceled(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // Stop the run to simulate a Ctrl-C. - run.Stop() - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after cancelling apply: %s", err.Error()) - } -} - -func TestCloud_applyWithoutPermissions(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - - // Create a named workspace without permissions. - w, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String("prod"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - w.Permissions.CanQueueApply = false - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - op.UIOut = b.CLI - op.Workspace = "prod" - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Insufficient rights to apply changes") { - t.Fatalf("expected a permissions error, got: %v", errOutput) - } -} - -func TestCloud_applyWithVCS(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - - // Create a named workspace with a VCS. - _, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String("prod"), - VCSRepo: &tfe.VCSRepoOptions{}, - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - op.Workspace = "prod" - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "not allowed for workspaces with a VCS") { - t.Fatalf("expected a VCS error, got: %v", errOutput) - } -} - -func TestCloud_applyWithParallelism(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - if b.ContextOpts == nil { - b.ContextOpts = &terraform.ContextOpts{} - } - b.ContextOpts.Parallelism = 3 - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "parallelism values are currently not supported") { - t.Fatalf("expected a parallelism error, got: %v", errOutput) - } -} - -func TestCloud_applyWithPlan(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - op.PlanFile = &planfile.Reader{} - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "saved plan is currently not supported") { - t.Fatalf("expected a saved plan error, got: %v", errOutput) - } -} - -func TestCloud_applyWithoutRefresh(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - op.PlanRefresh = false - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has refresh set - // to false. - runsAPI := b.client.Runs.(*MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff(false, run.Refresh); diff != "" { - t.Errorf("wrong Refresh setting in the created run\n%s", diff) - } - } -} - -func TestCloud_applyWithRefreshOnly(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.RefreshOnlyMode - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has refresh-only set - // to true. - runsAPI := b.client.Runs.(*MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { - t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) - } - } -} - -func TestCloud_applyWithTarget(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") - - op.Targets = []addrs.Targetable{addr} - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("expected apply operation to succeed") - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has the same - // target address we requested above. - runsAPI := b.client.Runs.(*MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { - t.Errorf("wrong TargetAddrs in the created run\n%s", diff) - } - } -} - -func TestCloud_applyWithReplace(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") - - op.ForceReplace = []addrs.AbsResourceInstance{addr} - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("expected plan operation to succeed") - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has the same - // refresh address we requested above. - runsAPI := b.client.Runs.(*MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { - t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) - } - } -} - -func TestCloud_applyWithRequiredVariables(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-variables") - defer configCleanup() - defer done(t) - - op.Variables = testVariables(terraform.ValueFromNamedFile, "foo") // "bar" variable value missing - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - // The usual error of a required variable being missing is deferred and the operation - // is successful - if run.Result != backend.OperationSuccess { - t.Fatal("expected plan operation to succeed") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("unexpected TFC header in output: %s", output) - } -} - -func TestCloud_applyNoConfig(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/empty") - defer configCleanup() - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "configuration files found") { - t.Fatalf("expected configuration files error, got: %v", errOutput) - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after apply - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after failed apply: %s", err.Error()) - } -} - -func TestCloud_applyNoChanges(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-no-changes") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { - t.Fatalf("expected no changes in plan summery: %s", output) - } - if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("expected policy check result in output: %s", output) - } -} - -func TestCloud_applyNoApprove(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - - input := testInput(t, map[string]string{ - "approve": "no", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Apply discarded") { - t.Fatalf("expected an apply discarded error, got: %v", errOutput) - } -} - -func TestCloud_applyAutoApprove(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - ctrl := gomock.NewController(t) - - applyMock := mocks.NewMockApplies(ctrl) - // This needs three new lines because we check for a minimum of three lines - // in the parsing of logs in `opApply` function. - logs := strings.NewReader(applySuccessOneResourceAdded) - applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) - b.client.Applies = applyMock - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "no", - }) - - op.AutoApprove = true - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) != 1 { - t.Fatalf("expected an unused answer, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestCloud_applyApprovedExternally(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "wait-for-external-update", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - ctx := context.Background() - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // Wait 50 milliseconds to make sure the run started. - time.Sleep(50 * time.Millisecond) - - wl, err := b.client.Workspaces.List( - ctx, - b.organization, - nil, - ) - if err != nil { - t.Fatalf("unexpected error listing workspaces: %v", err) - } - if len(wl.Items) != 1 { - t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) - } - - rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) - if err != nil { - t.Fatalf("unexpected error listing runs: %v", err) - } - if len(rl.Items) != 1 { - t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) - } - - err = b.client.Runs.Apply(context.Background(), rl.Items[0].ID, tfe.RunApplyOptions{}) - if err != nil { - t.Fatalf("unexpected error approving run: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "approved using the UI or API") { - t.Fatalf("expected external approval in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestCloud_applyDiscardedExternally(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "wait-for-external-update", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - ctx := context.Background() - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // Wait 50 milliseconds to make sure the run started. - time.Sleep(50 * time.Millisecond) - - wl, err := b.client.Workspaces.List( - ctx, - b.organization, - nil, - ) - if err != nil { - t.Fatalf("unexpected error listing workspaces: %v", err) - } - if len(wl.Items) != 1 { - t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) - } - - rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) - if err != nil { - t.Fatalf("unexpected error listing runs: %v", err) - } - if len(rl.Items) != 1 { - t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) - } - - err = b.client.Runs.Discard(context.Background(), rl.Items[0].ID, tfe.RunDiscardOptions{}) - if err != nil { - t.Fatalf("unexpected error discarding run: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "discarded using the UI or API") { - t.Fatalf("expected external discard output: %s", output) - } - if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("unexpected apply summery in output: %s", output) - } -} - -func TestCloud_applyWithAutoApprove(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - ctrl := gomock.NewController(t) - - applyMock := mocks.NewMockApplies(ctrl) - // This needs three new lines because we check for a minimum of three lines - // in the parsing of logs in `opApply` function. - logs := strings.NewReader(applySuccessOneResourceAdded) - applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) - b.client.Applies = applyMock - - // Create a named workspace that auto applies. - _, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String("prod"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = "prod" - op.AutoApprove = true - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) != 1 { - t.Fatalf("expected an unused answer, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestCloud_applyForceLocal(t *testing.T) { - // Set TF_FORCE_LOCAL_BACKEND so the cloud backend will use - // the local backend with itself as embedded backend. - if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { - t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) - } - defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") - - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("unexpected TFC header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } - if !run.State.HasManagedResourceInstanceObjects() { - t.Fatalf("expected resources in state") - } -} - -func TestCloud_applyWorkspaceWithoutOperations(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - - ctx := context.Background() - - // Create a named workspace that doesn't allow operations. - _, err := b.client.Workspaces.Create( - ctx, - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String("no-operations"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = "no-operations" - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("unexpected TFC header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } - if !run.State.HasManagedResourceInstanceObjects() { - t.Fatalf("expected resources in state") - } -} - -func TestCloud_applyLockTimeout(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - ctx := context.Background() - - // Retrieve the workspace used to run this operation in. - w, err := b.client.Workspaces.Read(ctx, b.organization, b.WorkspaceMapping.Name) - if err != nil { - t.Fatalf("error retrieving workspace: %v", err) - } - - // Create a new configuration version. - c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) - if err != nil { - t.Fatalf("error creating configuration version: %v", err) - } - - // Create a pending run to block this run. - _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ - ConfigurationVersion: c, - Workspace: w, - }) - if err != nil { - t.Fatalf("error creating pending run: %v", err) - } - - op, configCleanup, done := testOperationApplyWithTimeout(t, "./testdata/apply", 50*time.Millisecond) - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "cancel": "yes", - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - _, err = b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - sigint := make(chan os.Signal, 1) - signal.Notify(sigint, syscall.SIGINT) - select { - case <-sigint: - // Stop redirecting SIGINT signals. - signal.Stop(sigint) - case <-time.After(200 * time.Millisecond): - t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") - } - - if len(input.answers) != 2 { - t.Fatalf("expected unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "Lock timeout exceeded") { - t.Fatalf("expected lock timout error in output: %s", output) - } - if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("unexpected plan summery in output: %s", output) - } - if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("unexpected apply summery in output: %s", output) - } -} - -func TestCloud_applyDestroy(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-destroy") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.PlanMode = plans.DestroyMode - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestCloud_applyDestroyNoConfig(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op, configCleanup, done := testOperationApply(t, "./testdata/empty") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.DestroyMode - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } -} - -func TestCloud_applyJSONWithProvisioner(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - stream, close := terminal.StreamsForTesting(t) - - b.renderer = &jsonformat.Renderer{ - Streams: stream, - Colorize: mockColorize(), - } - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-provisioner") - defer configCleanup() - defer done(t) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - mockSROWorkspace(t, b, op.Workspace) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - outp := close(t) - gotOut := outp.Stdout() - if !strings.Contains(gotOut, "null_resource.foo: Provisioning with 'local-exec'") { - t.Fatalf("expected provisioner local-exec start in logs: %s", gotOut) - } - - if !strings.Contains(gotOut, "null_resource.foo: (local-exec):") { - t.Fatalf("expected provisioner local-exec progress in logs: %s", gotOut) - } - - if !strings.Contains(gotOut, "Hello World!") { - t.Fatalf("expected provisioner local-exec output in logs: %s", gotOut) - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after apply - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after apply: %s", err.Error()) - } -} - -func TestCloud_applyJSONWithProvisionerError(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - stream, close := terminal.StreamsForTesting(t) - - b.renderer = &jsonformat.Renderer{ - Streams: stream, - Colorize: mockColorize(), - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-provisioner-error") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - mockSROWorkspace(t, b, op.Workspace) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - - outp := close(t) - gotOut := outp.Stdout() - - if !strings.Contains(gotOut, "local-exec provisioner error") { - t.Fatalf("unexpected error in apply logs: %s", gotOut) - } -} - -func TestCloud_applyPolicyPass(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-passed") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestCloud_applyPolicyHardFail(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-hard-failed") - defer configCleanup() - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - viewOutput := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - if len(input.answers) != 1 { - t.Fatalf("expected an unused answers, got: %v", input.answers) - } - - errOutput := viewOutput.Stderr() - if !strings.Contains(errOutput, "hard failed") { - t.Fatalf("expected a policy check error, got: %v", errOutput) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("unexpected apply summery in output: %s", output) - } -} - -func TestCloud_applyPolicySoftFail(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "override": "override", - "approve": "yes", - }) - - op.AutoApprove = false - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) > 0 { - t.Fatalf("expected no unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestCloud_applyPolicySoftFailAutoApproveSuccess(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - ctrl := gomock.NewController(t) - - policyCheckMock := mocks.NewMockPolicyChecks(ctrl) - // This needs three new lines because we check for a minimum of three lines - // in the parsing of logs in `opApply` function. - logs := strings.NewReader(fmt.Sprintf("%s\n%s", sentinelSoftFail, applySuccessOneResourceAdded)) - - pc := &tfe.PolicyCheck{ - ID: "pc-1", - Actions: &tfe.PolicyActions{ - IsOverridable: true, - }, - Permissions: &tfe.PolicyPermissions{ - CanOverride: true, - }, - Scope: tfe.PolicyScopeOrganization, - Status: tfe.PolicySoftFailed, - } - policyCheckMock.EXPECT().Read(gomock.Any(), gomock.Any()).Return(pc, nil) - policyCheckMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) - policyCheckMock.EXPECT().Override(gomock.Any(), gomock.Any()).Return(nil, nil) - b.client.PolicyChecks = policyCheckMock - applyMock := mocks.NewMockApplies(ctrl) - // This needs three new lines because we check for a minimum of three lines - // in the parsing of logs in `opApply` function. - logs = strings.NewReader("\n\n\n1 added, 0 changed, 0 destroyed") - applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) - b.client.Applies = applyMock - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") - defer configCleanup() - - input := testInput(t, map[string]string{}) - - op.AutoApprove = true - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - viewOutput := done(t) - if run.Result != backend.OperationSuccess { - t.Fatal("expected apply operation to success due to auto-approve") - } - - if run.PlanEmpty { - t.Fatalf("expected plan to not be empty, plan opertion completed without error") - } - - if len(input.answers) != 0 { - t.Fatalf("expected no answers, got: %v", input.answers) - } - - errOutput := viewOutput.Stderr() - if strings.Contains(errOutput, "soft failed") { - t.Fatalf("expected no policy check errors, instead got: %v", errOutput) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check to be false, insead got: %s", output) - } - if !strings.Contains(output, "Apply complete!") { - t.Fatalf("expected apply to be complete, instead got: %s", output) - } - - if !strings.Contains(output, "Resources: 1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected resources, instead got: %s", output) - } -} - -func TestCloud_applyPolicySoftFailAutoApprove(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - ctrl := gomock.NewController(t) - - applyMock := mocks.NewMockApplies(ctrl) - // This needs three new lines because we check for a minimum of three lines - // in the parsing of logs in `opApply` function. - logs := strings.NewReader(applySuccessOneResourceAdded) - applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) - b.client.Applies = applyMock - - // Create a named workspace that auto applies. - _, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String("prod"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "override": "override", - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = "prod" - op.AutoApprove = true - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - if len(input.answers) != 2 { - t.Fatalf("expected an unused answer, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running apply in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summery in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) - } -} - -func TestCloud_applyWithRemoteError(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-with-error") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if run.Result.ExitStatus() != 1 { - t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "null_resource.foo: 1 error") { - t.Fatalf("expected apply error in output: %s", output) - } -} - -func TestCloud_applyJSONWithRemoteError(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - stream, close := terminal.StreamsForTesting(t) - - b.renderer = &jsonformat.Renderer{ - Streams: stream, - Colorize: mockColorize(), - } - - op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-error") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - mockSROWorkspace(t, b, op.Workspace) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected apply operation to fail") - } - if run.Result.ExitStatus() != 1 { - t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) - } - - outp := close(t) - gotOut := outp.Stdout() - - if !strings.Contains(gotOut, "Unsupported block type") { - t.Fatalf("unexpected plan error in output: %s", gotOut) - } -} - -func TestCloud_applyVersionCheck(t *testing.T) { - testCases := map[string]struct { - localVersion string - remoteVersion string - forceLocal bool - executionMode string - wantErr string - }{ - "versions can be different for remote apply": { - localVersion: "0.14.0", - remoteVersion: "0.13.5", - executionMode: "remote", - }, - "versions can be different for local apply": { - localVersion: "0.14.0", - remoteVersion: "0.13.5", - executionMode: "local", - }, - "force local with remote operations and different versions is acceptable": { - localVersion: "0.14.0", - remoteVersion: "0.14.0-acme-provider-bundle", - forceLocal: true, - executionMode: "remote", - }, - "no error if versions are identical": { - localVersion: "0.14.0", - remoteVersion: "0.14.0", - forceLocal: true, - executionMode: "remote", - }, - "no error if force local but workspace has remote operations disabled": { - localVersion: "0.14.0", - remoteVersion: "0.13.5", - forceLocal: true, - executionMode: "local", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - // SETUP: Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // SETUP: Set local version for the test case - tfversion.Prerelease = "" - tfversion.Version = tc.localVersion - tfversion.SemVer = version.Must(version.NewSemver(tc.localVersion)) - - // SETUP: Set force local for the test case - b.forceLocal = tc.forceLocal - - ctx := context.Background() - - // SETUP: set the operations and Terraform Version fields on the - // remote workspace - _, err := b.client.Workspaces.Update( - ctx, - b.organization, - b.WorkspaceMapping.Name, - tfe.WorkspaceUpdateOptions{ - ExecutionMode: tfe.String(tc.executionMode), - TerraformVersion: tfe.String(tc.remoteVersion), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - // RUN: prepare the apply operation and run it - op, configCleanup, opDone := testOperationApply(t, "./testdata/apply") - defer configCleanup() - defer opDone(t) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - input := testInput(t, map[string]string{ - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // RUN: wait for completion - <-run.Done() - output := done(t) - - if tc.wantErr != "" { - // ASSERT: if the test case wants an error, check for failure - // and the error message - if run.Result != backend.OperationFailure { - t.Fatalf("expected run to fail, but result was %#v", run.Result) - } - errOutput := output.Stderr() - if !strings.Contains(errOutput, tc.wantErr) { - t.Fatalf("missing error %q\noutput: %s", tc.wantErr, errOutput) - } - } else { - // ASSERT: otherwise, check for success and appropriate output - // based on whether the run should be local or remote - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - output := b.CLI.(*cli.MockUi).OutputWriter.String() - hasRemote := strings.Contains(output, "Running apply in Terraform Cloud") - hasSummary := strings.Contains(output, "1 added, 0 changed, 0 destroyed") - hasResources := run.State.HasManagedResourceInstanceObjects() - if !tc.forceLocal && !isLocalExecutionMode(tc.executionMode) { - if !hasRemote { - t.Errorf("missing TFC header in output: %s", output) - } - if !hasSummary { - t.Errorf("expected apply summary in output: %s", output) - } - } else { - if hasRemote { - t.Errorf("unexpected TFC header in output: %s", output) - } - if !hasResources { - t.Errorf("expected resources in state") - } - } - } - }) - } -} - -const applySuccessOneResourceAdded = ` -Terraform v0.11.10 - -Initializing plugins and modules... -null_resource.hello: Creating... -null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) - -Apply complete! Resources: 1 added, 0 changed, 0 destroyed. -` - -const sentinelSoftFail = ` -Sentinel Result: false - -Sentinel evaluated to false because one or more Sentinel policies evaluated -to false. This false was not due to an undefined value or runtime error. - -1 policies evaluated. - -## Policy 1: Passthrough.sentinel (soft-mandatory) - -Result: false - -FALSE - Passthrough.sentinel:1:1 - Rule "main" -` diff --git a/internal/cloud/backend_common.go b/internal/cloud/backend_common.go deleted file mode 100644 index a969d1d9279e..000000000000 --- a/internal/cloud/backend_common.go +++ /dev/null @@ -1,635 +0,0 @@ -package cloud - -import ( - "bufio" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-retryablehttp" - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/jsonapi" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/terraform" -) - -var ( - backoffMin = 1000.0 - backoffMax = 3000.0 - - runPollInterval = 3 * time.Second -) - -// backoff will perform exponential backoff based on the iteration and -// limited by the provided min and max (in milliseconds) durations. -func backoff(min, max float64, iter int) time.Duration { - backoff := math.Pow(2, float64(iter)/5) * min - if backoff > max { - backoff = max - } - return time.Duration(backoff) * time.Millisecond -} - -func (b *Cloud) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) { - started := time.Now() - updated := started - for i := 0; ; i++ { - select { - case <-stopCtx.Done(): - return r, stopCtx.Err() - case <-cancelCtx.Done(): - return r, cancelCtx.Err() - case <-time.After(backoff(backoffMin, backoffMax, i)): - // Timer up, show status - } - - // Retrieve the run to get its current status. - r, err := b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - return r, generalError("Failed to retrieve run", err) - } - - // Return if the run is no longer pending. - if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed { - if i == 0 && opType == "plan" && b.CLI != nil { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType))) - } - if i > 0 && b.CLI != nil { - // Insert a blank line to separate the ouputs. - b.CLI.Output("") - } - return r, nil - } - - // Check if 30 seconds have passed since the last update. - current := time.Now() - if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { - updated = current - position := 0 - elapsed := "" - - // Calculate and set the elapsed time. - if i > 0 { - elapsed = fmt.Sprintf( - " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) - } - - // Retrieve the workspace used to run this operation in. - w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name) - if err != nil { - return nil, generalError("Failed to retrieve workspace", err) - } - - // If the workspace is locked the run will not be queued and we can - // update the status without making any expensive calls. - if w.Locked && w.CurrentRun != nil { - cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID) - if err != nil { - return r, generalError("Failed to retrieve current run", err) - } - if cr.Status == tfe.RunPending { - b.CLI.Output(b.Colorize().Color( - "Waiting for the manually locked workspace to be unlocked..." + elapsed)) - continue - } - } - - // Skip checking the workspace queue when we are the current run. - if w.CurrentRun == nil || w.CurrentRun.ID != r.ID { - found := false - options := &tfe.RunListOptions{} - runlist: - for { - rl, err := b.client.Runs.List(stopCtx, w.ID, options) - if err != nil { - return r, generalError("Failed to retrieve run list", err) - } - - // Loop through all runs to calculate the workspace queue position. - for _, item := range rl.Items { - if !found { - if r.ID == item.ID { - found = true - } - continue - } - - // If the run is in a final state, ignore it and continue. - switch item.Status { - case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored: - continue - case tfe.RunPlanned: - if op.Type == backend.OperationTypePlan { - continue - } - } - - // Increase the workspace queue position. - position++ - - // Stop searching when we reached the current run. - if w.CurrentRun != nil && w.CurrentRun.ID == item.ID { - break runlist - } - } - - // Exit the loop when we've seen all pages. - if rl.CurrentPage >= rl.TotalPages { - break - } - - // Update the page number to get the next page. - options.PageNumber = rl.NextPage - } - - if position > 0 { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "Waiting for %d run(s) to finish before being queued...%s", - position, - elapsed, - ))) - continue - } - } - - options := tfe.ReadRunQueueOptions{} - search: - for { - rq, err := b.client.Organizations.ReadRunQueue(stopCtx, b.organization, options) - if err != nil { - return r, generalError("Failed to retrieve queue", err) - } - - // Search through all queued items to find our run. - for _, item := range rq.Items { - if r.ID == item.ID { - position = item.PositionInQueue - break search - } - } - - // Exit the loop when we've seen all pages. - if rq.CurrentPage >= rq.TotalPages { - break - } - - // Update the page number to get the next page. - options.PageNumber = rq.NextPage - } - - if position > 0 { - c, err := b.client.Organizations.ReadCapacity(stopCtx, b.organization) - if err != nil { - return r, generalError("Failed to retrieve capacity", err) - } - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "Waiting for %d queued run(s) to finish before starting...%s", - position-c.Running, - elapsed, - ))) - continue - } - - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "Waiting for the %s to start...%s", opType, elapsed))) - } - } -} - -func (b *Cloud) waitTaskStage(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run, stageID string, outputTitle string) error { - integration := &IntegrationContext{ - B: b, - StopContext: stopCtx, - CancelContext: cancelCtx, - Op: op, - Run: r, - } - return b.runTaskStage(integration, integration.BeginOutput(outputTitle), stageID) -} - -func (b *Cloud) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { - if r.CostEstimate == nil { - return nil - } - - msgPrefix := "Cost Estimation" - started := time.Now() - updated := started - for i := 0; ; i++ { - select { - case <-stopCtx.Done(): - return stopCtx.Err() - case <-cancelCtx.Done(): - return cancelCtx.Err() - case <-time.After(backoff(backoffMin, backoffMax, i)): - } - - // Retrieve the cost estimate to get its current status. - ce, err := b.client.CostEstimates.Read(stopCtx, r.CostEstimate.ID) - if err != nil { - return generalError("Failed to retrieve cost estimate", err) - } - - // If the run is canceled or errored, but the cost-estimate still has - // no result, there is nothing further to render. - if ce.Status != tfe.CostEstimateFinished { - if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { - return nil - } - } - - // checking if i == 0 so as to avoid printing this starting horizontal-rule - // every retry, and that it only prints it on the first (i=0) attempt. - if b.CLI != nil && i == 0 { - b.CLI.Output("\n------------------------------------------------------------------------\n") - } - - switch ce.Status { - case tfe.CostEstimateFinished: - delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64) - if err != nil { - return generalError("Unexpected error", err) - } - - sign := "+" - if delta < 0 { - sign = "-" - } - - deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1) - - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) - b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount))) - b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr))) - - if len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply { - b.CLI.Output("\n------------------------------------------------------------------------") - } - } - - return nil - case tfe.CostEstimatePending, tfe.CostEstimateQueued: - // Check if 30 seconds have passed since the last update. - current := time.Now() - if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { - updated = current - elapsed := "" - - // Calculate and set the elapsed time. - if i > 0 { - elapsed = fmt.Sprintf( - " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) - } - b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) - b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n")) - } - continue - case tfe.CostEstimateSkippedDueToTargeting: - b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) - b.CLI.Output("Not available for this plan, because it was created with the -target option.") - b.CLI.Output("\n------------------------------------------------------------------------") - return nil - case tfe.CostEstimateErrored: - b.CLI.Output(msgPrefix + " errored.\n") - b.CLI.Output("\n------------------------------------------------------------------------") - return nil - case tfe.CostEstimateCanceled: - return fmt.Errorf(msgPrefix + " canceled.") - default: - return fmt.Errorf("Unknown or unexpected cost estimate state: %s", ce.Status) - } - } -} - -func (b *Cloud) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { - if b.CLI != nil { - b.CLI.Output("\n------------------------------------------------------------------------\n") - } - for i, pc := range r.PolicyChecks { - // Read the policy check logs. This is a blocking call that will only - // return once the policy check is complete. - logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID) - if err != nil { - return generalError("Failed to retrieve policy check logs", err) - } - reader := bufio.NewReaderSize(logs, 64*1024) - - // Retrieve the policy check to get its current status. - pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID) - if err != nil { - return generalError("Failed to retrieve policy check", err) - } - - // If the run is canceled or errored, but the policy check still has - // no result, there is nothing further to render. - if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { - switch pc.Status { - case tfe.PolicyPending, tfe.PolicyQueued, tfe.PolicyUnreachable: - continue - } - } - - var msgPrefix string - switch pc.Scope { - case tfe.PolicyScopeOrganization: - msgPrefix = "Organization Policy Check" - case tfe.PolicyScopeWorkspace: - msgPrefix = "Workspace Policy Check" - default: - msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope) - } - - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) - } - - if b.CLI != nil { - for next := true; next; { - var l, line []byte - - for isPrefix := true; isPrefix; { - l, isPrefix, err = reader.ReadLine() - if err != nil { - if err != io.EOF { - return generalError("Failed to read logs", err) - } - next = false - } - line = append(line, l...) - } - - if next || len(line) > 0 { - b.CLI.Output(b.Colorize().Color(string(line))) - } - } - } - - switch pc.Status { - case tfe.PolicyPasses: - if (r.HasChanges && op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil { - b.CLI.Output("\n------------------------------------------------------------------------") - } - continue - case tfe.PolicyErrored: - return fmt.Errorf(msgPrefix + " errored.") - case tfe.PolicyHardFailed: - return fmt.Errorf(msgPrefix + " hard failed.") - case tfe.PolicySoftFailed: - runUrl := fmt.Sprintf(runHeader, b.hostname, b.organization, op.Workspace, r.ID) - - if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil || - !pc.Actions.IsOverridable || !pc.Permissions.CanOverride { - return fmt.Errorf(msgPrefix + " soft failed.\n" + runUrl) - } - - if op.AutoApprove { - if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { - return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) - } - } else if !b.input { - return errPolicyOverrideNeedsUIConfirmation - } else { - opts := &terraform.InputOpts{ - Id: "override", - Query: "\nDo you want to override the soft failed policy check?", - Description: "Only 'override' will be accepted to override.", - } - err = b.confirm(stopCtx, op, opts, r, "override") - if err != nil && err != errRunOverridden { - return fmt.Errorf( - fmt.Sprintf("Failed to override: %s\n%s\n", err.Error(), runUrl), - ) - } - - if err != errRunOverridden { - if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { - return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) - } - } else { - b.CLI.Output(fmt.Sprintf("The run needs to be manually overridden or discarded.\n%s\n", runUrl)) - } - } - - if b.CLI != nil { - b.CLI.Output("------------------------------------------------------------------------") - } - default: - return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status) - } - } - - return nil -} - -func (b *Cloud) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error { - doneCtx, cancel := context.WithCancel(stopCtx) - result := make(chan error, 2) - - go func() { - // Make sure we cancel doneCtx before we return - // so the input command is also canceled. - defer cancel() - - for { - select { - case <-doneCtx.Done(): - return - case <-stopCtx.Done(): - return - case <-time.After(runPollInterval): - // Retrieve the run again to get its current status. - r, err := b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - result <- generalError("Failed to retrieve run", err) - return - } - - switch keyword { - case "override": - if r.Status != tfe.RunPolicyOverride && r.Status != tfe.RunPostPlanAwaitingDecision { - if r.Status == tfe.RunDiscarded { - err = errRunDiscarded - } else { - err = errRunOverridden - } - } - case "yes": - if !r.Actions.IsConfirmable { - if r.Status == tfe.RunDiscarded { - err = errRunDiscarded - } else { - err = errRunApproved - } - } - } - - if err != nil { - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color( - fmt.Sprintf("[reset][yellow]%s[reset]", err.Error()))) - } - - if err == errRunDiscarded { - err = errApplyDiscarded - if op.PlanMode == plans.DestroyMode { - err = errDestroyDiscarded - } - } - - result <- err - return - } - } - } - }() - - result <- func() error { - v, err := op.UIIn.Input(doneCtx, opts) - if err != nil && err != context.Canceled && stopCtx.Err() != context.Canceled { - return fmt.Errorf("Error asking %s: %v", opts.Id, err) - } - - // We return the error of our parent channel as we don't - // care about the error of the doneCtx which is only used - // within this function. So if the doneCtx was canceled - // because stopCtx was canceled, this will properly return - // a context.Canceled error and otherwise it returns nil. - if doneCtx.Err() == context.Canceled || stopCtx.Err() == context.Canceled { - return stopCtx.Err() - } - - // Make sure we cancel the context here so the loop that - // checks for external changes to the run is ended before - // we start to make changes ourselves. - cancel() - - if v != keyword { - // Retrieve the run again to get its current status. - r, err = b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - return generalError("Failed to retrieve run", err) - } - - // Make sure we discard the run if possible. - if r.Actions.IsDiscardable { - err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) - if err != nil { - if op.PlanMode == plans.DestroyMode { - return generalError("Failed to discard destroy", err) - } - return generalError("Failed to discard apply", err) - } - } - - // Even if the run was discarded successfully, we still - // return an error as the apply command was canceled. - if op.PlanMode == plans.DestroyMode { - return errDestroyDiscarded - } - return errApplyDiscarded - } - - return nil - }() - - return <-result -} - -// This method will fetch the redacted plan output and marshal the response into -// a struct the jsonformat.Renderer expects. -// -// Note: Apologies for the lengthy definition, this is a result of not being able to mock receiver methods -var readRedactedPlan func(context.Context, url.URL, string, string) (*jsonformat.Plan, error) = func(ctx context.Context, baseURL url.URL, token string, planID string) (*jsonformat.Plan, error) { - client := retryablehttp.NewClient() - client.RetryMax = 10 - client.RetryWaitMin = 100 * time.Millisecond - client.RetryWaitMax = 400 * time.Millisecond - client.Logger = logging.HCLogger() - - u, err := baseURL.Parse(fmt.Sprintf( - "plans/%s/json-output-redacted", url.QueryEscape(planID))) - if err != nil { - return nil, err - } - - req, err := retryablehttp.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - - req.Header.Set("Authorization", "Bearer "+token) - req.Header.Set("Accept", "application/json") - - p := &jsonformat.Plan{} - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err = checkResponseCode(resp); err != nil { - return nil, err - } - - if err := json.NewDecoder(resp.Body).Decode(p); err != nil { - return nil, err - } - - return p, nil -} - -func checkResponseCode(r *http.Response) error { - if r.StatusCode >= 200 && r.StatusCode <= 299 { - return nil - } - - var errs []string - var err error - - switch r.StatusCode { - case 401: - return tfe.ErrUnauthorized - case 404: - return tfe.ErrResourceNotFound - } - - errs, err = decodeErrorPayload(r) - if err != nil { - return err - } - - return errors.New(strings.Join(errs, "\n")) -} - -func decodeErrorPayload(r *http.Response) ([]string, error) { - // Decode the error payload. - var errs []string - errPayload := &jsonapi.ErrorsPayload{} - err := json.NewDecoder(r.Body).Decode(errPayload) - if err != nil || len(errPayload.Errors) == 0 { - return errs, errors.New(r.Status) - } - - // Parse and format the errors. - for _, e := range errPayload.Errors { - if e.Detail == "" { - errs = append(errs, e.Title) - } else { - errs = append(errs, fmt.Sprintf("%s\n\n%s", e.Title, e.Detail)) - } - } - - return errs, nil -} diff --git a/internal/cloud/backend_context.go b/internal/cloud/backend_context.go deleted file mode 100644 index a1236b3663ef..000000000000 --- a/internal/cloud/backend_context.go +++ /dev/null @@ -1,292 +0,0 @@ -package cloud - -import ( - "context" - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// LocalRun implements backend.Local -func (b *Cloud) LocalRun(op *backend.Operation) (*backend.LocalRun, statemgr.Full, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := &backend.LocalRun{ - PlanOpts: &terraform.PlanOpts{ - Mode: op.PlanMode, - Targets: op.Targets, - }, - } - - op.StateLocker = op.StateLocker.WithContext(context.Background()) - - // Get the remote workspace name. - remoteWorkspaceName := b.getRemoteWorkspaceName(op.Workspace) - - // Get the latest state. - log.Printf("[TRACE] cloud: requesting state manager for workspace %q", remoteWorkspaceName) - stateMgr, err := b.StateMgr(op.Workspace) - if err != nil { - diags = diags.Append(fmt.Errorf("error loading state: %w", err)) - return nil, nil, diags - } - - log.Printf("[TRACE] cloud: requesting state lock for workspace %q", remoteWorkspaceName) - if diags := op.StateLocker.Lock(stateMgr, op.Type.String()); diags.HasErrors() { - return nil, nil, diags - } - - defer func() { - // If we're returning with errors, and thus not producing a valid - // context, we'll want to avoid leaving the remote workspace locked. - if diags.HasErrors() { - diags = diags.Append(op.StateLocker.Unlock()) - } - }() - - log.Printf("[TRACE] cloud: reading remote state for workspace %q", remoteWorkspaceName) - if err := stateMgr.RefreshState(); err != nil { - diags = diags.Append(fmt.Errorf("error loading state: %w", err)) - return nil, nil, diags - } - - // Initialize our context options - var opts terraform.ContextOpts - if v := b.ContextOpts; v != nil { - opts = *v - } - - // Copy set options from the operation - opts.UIInput = op.UIIn - - // Load the latest state. If we enter contextFromPlanFile below then the - // state snapshot in the plan file must match this, or else it'll return - // error diagnostics. - log.Printf("[TRACE] cloud: retrieving remote state snapshot for workspace %q", remoteWorkspaceName) - ret.InputState = stateMgr.State() - - log.Printf("[TRACE] cloud: loading configuration for the current working directory") - config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, nil, diags - } - ret.Config = config - - if op.AllowUnsetVariables { - // If we're not going to use the variables in an operation we'll be - // more lax about them, stubbing out any unset ones as unknown. - // This gives us enough information to produce a consistent context, - // but not enough information to run a real operation (plan, apply, etc) - ret.PlanOpts.SetVariables = stubAllVariables(op.Variables, config.Module.Variables) - } else { - // The underlying API expects us to use the opaque workspace id to request - // variables, so we'll need to look that up using our organization name - // and workspace name. - remoteWorkspaceID, err := b.getRemoteWorkspaceID(context.Background(), op.Workspace) - if err != nil { - diags = diags.Append(fmt.Errorf("error finding remote workspace: %w", err)) - return nil, nil, diags - } - w, err := b.fetchWorkspace(context.Background(), b.organization, op.Workspace) - if err != nil { - diags = diags.Append(fmt.Errorf("error loading workspace: %w", err)) - return nil, nil, diags - } - - if isLocalExecutionMode(w.ExecutionMode) { - log.Printf("[TRACE] skipping retrieving variables from workspace %s/%s (%s), workspace is in Local Execution mode", remoteWorkspaceName, b.organization, remoteWorkspaceID) - } else { - log.Printf("[TRACE] cloud: retrieving variables from workspace %s/%s (%s)", remoteWorkspaceName, b.organization, remoteWorkspaceID) - tfeVariables, err := b.client.Variables.List(context.Background(), remoteWorkspaceID, nil) - if err != nil && err != tfe.ErrResourceNotFound { - diags = diags.Append(fmt.Errorf("error loading variables: %w", err)) - return nil, nil, diags - } - - if tfeVariables != nil { - if op.Variables == nil { - op.Variables = make(map[string]backend.UnparsedVariableValue) - } - - for _, v := range tfeVariables.Items { - if v.Category == tfe.CategoryTerraform { - if _, ok := op.Variables[v.Key]; !ok { - op.Variables[v.Key] = &remoteStoredVariableValue{ - definition: v, - } - } - } - } - } - } - - if op.Variables != nil { - variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) - diags = diags.Append(varDiags) - if diags.HasErrors() { - return nil, nil, diags - } - ret.PlanOpts.SetVariables = variables - } - } - - tfCtx, ctxDiags := terraform.NewContext(&opts) - diags = diags.Append(ctxDiags) - ret.Core = tfCtx - - log.Printf("[TRACE] cloud: finished building terraform.Context") - - return ret, stateMgr, diags -} - -func (b *Cloud) getRemoteWorkspaceName(localWorkspaceName string) string { - switch { - case localWorkspaceName == backend.DefaultStateName: - // The default workspace name is a special case - return b.WorkspaceMapping.Name - default: - return localWorkspaceName - } -} - -func (b *Cloud) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) { - remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName) - - log.Printf("[TRACE] cloud: looking up workspace for %s/%s", b.organization, remoteWorkspaceName) - remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) - if err != nil { - return nil, err - } - - return remoteWorkspace, nil -} - -func (b *Cloud) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) { - remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName) - if err != nil { - return "", err - } - - return remoteWorkspace.ID, nil -} - -func stubAllVariables(vv map[string]backend.UnparsedVariableValue, decls map[string]*configs.Variable) terraform.InputValues { - ret := make(terraform.InputValues, len(decls)) - - for name, cfg := range decls { - raw, exists := vv[name] - if !exists { - ret[name] = &terraform.InputValue{ - Value: cty.UnknownVal(cfg.Type), - SourceType: terraform.ValueFromConfig, - } - continue - } - - val, diags := raw.ParseVariableValue(cfg.ParsingMode) - if diags.HasErrors() { - ret[name] = &terraform.InputValue{ - Value: cty.UnknownVal(cfg.Type), - SourceType: terraform.ValueFromConfig, - } - continue - } - ret[name] = val - } - - return ret -} - -// remoteStoredVariableValue is a backend.UnparsedVariableValue implementation -// that translates from the go-tfe representation of stored variables into -// the Terraform Core backend representation of variables. -type remoteStoredVariableValue struct { - definition *tfe.Variable -} - -var _ backend.UnparsedVariableValue = (*remoteStoredVariableValue)(nil) - -func (v *remoteStoredVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var val cty.Value - - switch { - case v.definition.Sensitive: - // If it's marked as sensitive then it's not available for use in - // local operations. We'll use an unknown value as a placeholder for - // it so that operations that don't need it might still work, but - // we'll also produce a warning about it to add context for any - // errors that might result here. - val = cty.DynamicVal - if !v.definition.HCL { - // If it's not marked as HCL then we at least know that the - // value must be a string, so we'll set that in case it allows - // us to do some more precise type checking. - val = cty.UnknownVal(cty.String) - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - fmt.Sprintf("Value for var.%s unavailable", v.definition.Key), - fmt.Sprintf("The value of variable %q is marked as sensitive in the remote workspace. This operation always runs locally, so the value for that variable is not available.", v.definition.Key), - )) - - case v.definition.HCL: - // If the variable value is marked as being in HCL syntax, we need to - // parse it the same way as it would be interpreted in a .tfvars - // file because that is how it would get passed to Terraform CLI for - // a remote operation and we want to mimic that result as closely as - // possible. - var exprDiags hcl.Diagnostics - expr, exprDiags := hclsyntax.ParseExpression([]byte(v.definition.Value), "", hcl.Pos{Line: 1, Column: 1}) - if expr != nil { - var moreDiags hcl.Diagnostics - val, moreDiags = expr.Value(nil) - exprDiags = append(exprDiags, moreDiags...) - } else { - // We'll have already put some errors in exprDiags above, so we'll - // just stub out the value here. - val = cty.DynamicVal - } - - // We don't have sufficient context to return decent error messages - // for syntax errors in the remote values, so we'll just return a - // generic message instead for now. - // (More complete error messages will still result from true remote - // operations, because they'll run on the remote system where we've - // materialized the values into a tfvars file we can report from.) - if exprDiags.HasErrors() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("Invalid expression for var.%s", v.definition.Key), - fmt.Sprintf("The value of variable %q is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.", v.definition.Key), - )) - } - - default: - // A variable value _not_ marked as HCL is always be a string, given - // literally. - val = cty.StringVal(v.definition.Value) - } - - return &terraform.InputValue{ - Value: val, - - // We mark these as "from input" with the rationale that entering - // variable values into the Terraform Cloud or Enterprise UI is, - // roughly speaking, a similar idea to entering variable values at - // the interactive CLI prompts. It's not a perfect correspondance, - // but it's closer than the other options. - SourceType: terraform.ValueFromInput, - }, diags -} diff --git a/internal/cloud/backend_context_test.go b/internal/cloud/backend_context_test.go deleted file mode 100644 index 635efc88b046..000000000000 --- a/internal/cloud/backend_context_test.go +++ /dev/null @@ -1,455 +0,0 @@ -package cloud - -import ( - "context" - "reflect" - "testing" - - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -func TestRemoteStoredVariableValue(t *testing.T) { - tests := map[string]struct { - Def *tfe.Variable - Want cty.Value - WantError string - }{ - "string literal": { - &tfe.Variable{ - Key: "test", - Value: "foo", - HCL: false, - Sensitive: false, - }, - cty.StringVal("foo"), - ``, - }, - "string HCL": { - &tfe.Variable{ - Key: "test", - Value: `"foo"`, - HCL: true, - Sensitive: false, - }, - cty.StringVal("foo"), - ``, - }, - "list HCL": { - &tfe.Variable{ - Key: "test", - Value: `[]`, - HCL: true, - Sensitive: false, - }, - cty.EmptyTupleVal, - ``, - }, - "null HCL": { - &tfe.Variable{ - Key: "test", - Value: `null`, - HCL: true, - Sensitive: false, - }, - cty.NullVal(cty.DynamicPseudoType), - ``, - }, - "literal sensitive": { - &tfe.Variable{ - Key: "test", - HCL: false, - Sensitive: true, - }, - cty.UnknownVal(cty.String), - ``, - }, - "HCL sensitive": { - &tfe.Variable{ - Key: "test", - HCL: true, - Sensitive: true, - }, - cty.DynamicVal, - ``, - }, - "HCL computation": { - // This (stored expressions containing computation) is not a case - // we intentionally supported, but it became possible for remote - // operations in Terraform 0.12 (due to Terraform Cloud/Enterprise - // just writing the HCL verbatim into generated `.tfvars` files). - // We support it here for consistency, and we continue to support - // it in both places for backward-compatibility. In practice, - // there's little reason to do computation in a stored variable - // value because references are not supported. - &tfe.Variable{ - Key: "test", - Value: `[for v in ["a"] : v]`, - HCL: true, - Sensitive: false, - }, - cty.TupleVal([]cty.Value{cty.StringVal("a")}), - ``, - }, - "HCL syntax error": { - &tfe.Variable{ - Key: "test", - Value: `[`, - HCL: true, - Sensitive: false, - }, - cty.DynamicVal, - `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, - }, - "HCL with references": { - &tfe.Variable{ - Key: "test", - Value: `foo.bar`, - HCL: true, - Sensitive: false, - }, - cty.DynamicVal, - `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - v := &remoteStoredVariableValue{ - definition: test.Def, - } - // This ParseVariableValue implementation ignores the parsing mode, - // so we'll just always parse literal here. (The parsing mode is - // selected by the remote server, not by our local configuration.) - gotIV, diags := v.ParseVariableValue(configs.VariableParseLiteral) - if test.WantError != "" { - if !diags.HasErrors() { - t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) - } - errStr := diags.Err().Error() - if errStr != test.WantError { - t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) - } - } else { - if diags.HasErrors() { - t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) - } - got := gotIV.Value - if !test.Want.RawEquals(got) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - } - }) - } -} - -func TestRemoteContextWithVars(t *testing.T) { - catTerraform := tfe.CategoryTerraform - catEnv := tfe.CategoryEnv - - tests := map[string]struct { - Opts *tfe.VariableCreateOptions - WantError string - }{ - "Terraform variable": { - &tfe.VariableCreateOptions{ - Category: &catTerraform, - }, - `Value for undeclared variable: A variable named "key" was assigned a value, but the root module does not declare a variable of that name. To use this value, add a "variable" block to the configuration.`, - }, - "environment variable": { - &tfe.VariableCreateOptions{ - Category: &catEnv, - }, - ``, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - configDir := "./testdata/empty" - - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - defer configCleanup() - - workspaceID, err := b.getRemoteWorkspaceID(context.Background(), testBackendSingleWorkspaceName) - if err != nil { - t.Fatal(err) - } - - streams, _ := terminal.StreamsForTesting(t) - view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) - - op := &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - StateLocker: clistate.NewLocker(0, view), - Workspace: testBackendSingleWorkspaceName, - } - - v := test.Opts - if v.Key == nil { - key := "key" - v.Key = &key - } - b.client.Variables.Create(context.TODO(), workspaceID, *v) - - _, _, diags := b.LocalRun(op) - - if test.WantError != "" { - if !diags.HasErrors() { - t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) - } - errStr := diags.Err().Error() - if errStr != test.WantError { - t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) - } - // When Context() returns an error, it should unlock the state, - // so re-locking it is expected to succeed. - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state: %s", err.Error()) - } - } else { - if diags.HasErrors() { - t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) - } - // When Context() succeeds, this should fail w/ "workspace already locked" - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { - t.Fatal("unexpected success locking state after Context") - } - } - }) - } -} - -func TestRemoteVariablesDoNotOverride(t *testing.T) { - catTerraform := tfe.CategoryTerraform - - varName1 := "key1" - varName2 := "key2" - varName3 := "key3" - - varValue1 := "value1" - varValue2 := "value2" - varValue3 := "value3" - - tests := map[string]struct { - localVariables map[string]backend.UnparsedVariableValue - remoteVariables []*tfe.VariableCreateOptions - expectedVariables terraform.InputValues - }{ - "no local variables": { - map[string]backend.UnparsedVariableValue{}, - []*tfe.VariableCreateOptions{ - { - Key: &varName1, - Value: &varValue1, - Category: &catTerraform, - }, - { - Key: &varName2, - Value: &varValue2, - Category: &catTerraform, - }, - { - Key: &varName3, - Value: &varValue3, - Category: &catTerraform, - }, - }, - terraform.InputValues{ - varName1: &terraform.InputValue{ - Value: cty.StringVal(varValue1), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName2: &terraform.InputValue{ - Value: cty.StringVal(varValue2), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName3: &terraform.InputValue{ - Value: cty.StringVal(varValue3), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - }, - }, - "single conflicting local variable": { - map[string]backend.UnparsedVariableValue{ - varName3: testUnparsedVariableValue{source: terraform.ValueFromNamedFile, value: cty.StringVal(varValue3)}, - }, - []*tfe.VariableCreateOptions{ - { - Key: &varName1, - Value: &varValue1, - Category: &catTerraform, - }, { - Key: &varName2, - Value: &varValue2, - Category: &catTerraform, - }, { - Key: &varName3, - Value: &varValue3, - Category: &catTerraform, - }, - }, - terraform.InputValues{ - varName1: &terraform.InputValue{ - Value: cty.StringVal(varValue1), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName2: &terraform.InputValue{ - Value: cty.StringVal(varValue2), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName3: &terraform.InputValue{ - Value: cty.StringVal(varValue3), - SourceType: terraform.ValueFromNamedFile, - SourceRange: tfdiags.SourceRange{ - Filename: "fake.tfvars", - Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - }, - }, - }, - }, - "no conflicting local variable": { - map[string]backend.UnparsedVariableValue{ - varName3: testUnparsedVariableValue{source: terraform.ValueFromNamedFile, value: cty.StringVal(varValue3)}, - }, - []*tfe.VariableCreateOptions{ - { - Key: &varName1, - Value: &varValue1, - Category: &catTerraform, - }, { - Key: &varName2, - Value: &varValue2, - Category: &catTerraform, - }, - }, - terraform.InputValues{ - varName1: &terraform.InputValue{ - Value: cty.StringVal(varValue1), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName2: &terraform.InputValue{ - Value: cty.StringVal(varValue2), - SourceType: terraform.ValueFromInput, - SourceRange: tfdiags.SourceRange{ - Filename: "", - Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, - }, - }, - varName3: &terraform.InputValue{ - Value: cty.StringVal(varValue3), - SourceType: terraform.ValueFromNamedFile, - SourceRange: tfdiags.SourceRange{ - Filename: "fake.tfvars", - Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, - }, - }, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - configDir := "./testdata/variables" - - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - defer configCleanup() - - workspaceID, err := b.getRemoteWorkspaceID(context.Background(), testBackendSingleWorkspaceName) - if err != nil { - t.Fatal(err) - } - - streams, _ := terminal.StreamsForTesting(t) - view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) - - op := &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - StateLocker: clistate.NewLocker(0, view), - Workspace: testBackendSingleWorkspaceName, - Variables: test.localVariables, - } - - for _, v := range test.remoteVariables { - b.client.Variables.Create(context.TODO(), workspaceID, *v) - } - - lr, _, diags := b.LocalRun(op) - - if diags.HasErrors() { - t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) - } - // When Context() succeeds, this should fail w/ "workspace already locked" - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { - t.Fatal("unexpected success locking state after Context") - } - - actual := lr.PlanOpts.SetVariables - expected := test.expectedVariables - - for expectedKey := range expected { - actualValue := actual[expectedKey] - expectedValue := expected[expectedKey] - - if !reflect.DeepEqual(*actualValue, *expectedValue) { - t.Fatalf("unexpected variable '%s'\ngot: %v\nwant: %v", expectedKey, actualValue, expectedValue) - } - } - }) - } -} diff --git a/internal/cloud/backend_plan.go b/internal/cloud/backend_plan.go deleted file mode 100644 index b2d465138894..000000000000 --- a/internal/cloud/backend_plan.go +++ /dev/null @@ -1,507 +0,0 @@ -package cloud - -import ( - "bufio" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -var planConfigurationVersionsPollInterval = 500 * time.Millisecond - -func (b *Cloud) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { - log.Printf("[INFO] cloud: starting Plan operation") - - var diags tfdiags.Diagnostics - - if !w.Permissions.CanQueueRun { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Insufficient rights to generate a plan", - "The provided credentials have insufficient rights to generate a plan. In order "+ - "to generate plans, at least plan permissions on the workspace are required.", - )) - return nil, diags.Err() - } - - if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Custom parallelism values are currently not supported", - `Terraform Cloud does not support setting a custom parallelism `+ - `value at this time.`, - )) - } - - if op.PlanFile != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Displaying a saved plan is currently not supported", - `Terraform Cloud currently requires configuration to be present and `+ - `does not accept an existing saved plan as an argument at this time.`, - )) - } - - if op.PlanOutPath != "" { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Saving a generated plan is currently not supported", - `Terraform Cloud does not support saving the generated execution `+ - `plan locally at this time.`, - )) - } - - if !op.HasConfig() && op.PlanMode != plans.DestroyMode { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "No configuration files found", - `Plan requires configuration to be present. Planning without a configuration `+ - `would mark everything for destruction, which is normally not what is desired. `+ - `If you would like to destroy everything, please run plan with the "-destroy" `+ - `flag or create a single empty configuration file. Otherwise, please create `+ - `a Terraform configuration file in the path being executed and try again.`, - )) - } - - // Return if there are any errors. - if diags.HasErrors() { - return nil, diags.Err() - } - - return b.plan(stopCtx, cancelCtx, op, w) -} - -func (b *Cloud) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { - if b.CLI != nil { - header := planDefaultHeader - if op.Type == backend.OperationTypeApply || op.Type == backend.OperationTypeRefresh { - header = applyDefaultHeader - } - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(header) + "\n")) - } - - configOptions := tfe.ConfigurationVersionCreateOptions{ - AutoQueueRuns: tfe.Bool(false), - Speculative: tfe.Bool(op.Type == backend.OperationTypePlan), - } - - cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions) - if err != nil { - return nil, generalError("Failed to create configuration version", err) - } - - var configDir string - if op.ConfigDir != "" { - // De-normalize the configuration directory path. - configDir, err = filepath.Abs(op.ConfigDir) - if err != nil { - return nil, generalError( - "Failed to get absolute path of the configuration directory: %v", err) - } - - // Make sure to take the working directory into account by removing - // the working directory from the current path. This will result in - // a path that points to the expected root of the workspace. - configDir = filepath.Clean(strings.TrimSuffix( - filepath.Clean(configDir), - filepath.Clean(w.WorkingDirectory), - )) - - // If the workspace has a subdirectory as its working directory then - // our configDir will be some parent directory of the current working - // directory. Users are likely to find that surprising, so we'll - // produce an explicit message about it to be transparent about what - // we are doing and why. - if w.WorkingDirectory != "" && filepath.Base(configDir) != w.WorkingDirectory { - if b.CLI != nil { - b.CLI.Output(fmt.Sprintf(strings.TrimSpace(` -The remote workspace is configured to work with configuration at -%s relative to the target repository. - -Terraform will upload the contents of the following directory, -excluding files or directories as defined by a .terraformignore file -at %s/.terraformignore (if it is present), -in order to capture the filesystem context the remote workspace expects: - %s -`), w.WorkingDirectory, configDir, configDir) + "\n") - } - } - - } else { - // We did a check earlier to make sure we either have a config dir, - // or the plan is run with -destroy. So this else clause will only - // be executed when we are destroying and doesn't need the config. - configDir, err = ioutil.TempDir("", "tf") - if err != nil { - return nil, generalError("Failed to create temporary directory", err) - } - defer os.RemoveAll(configDir) - - // Make sure the configured working directory exists. - err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700) - if err != nil { - return nil, generalError( - "Failed to create temporary working directory", err) - } - } - - err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir) - if err != nil { - return nil, generalError("Failed to upload configuration files", err) - } - - uploaded := false - for i := 0; i < 60 && !uploaded; i++ { - select { - case <-stopCtx.Done(): - return nil, context.Canceled - case <-cancelCtx.Done(): - return nil, context.Canceled - case <-time.After(planConfigurationVersionsPollInterval): - cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) - if err != nil { - return nil, generalError("Failed to retrieve configuration version", err) - } - - if cv.Status == tfe.ConfigurationUploaded { - uploaded = true - } - } - } - - if !uploaded { - return nil, generalError( - "Failed to upload configuration files", errors.New("operation timed out")) - } - - runOptions := tfe.RunCreateOptions{ - ConfigurationVersion: cv, - Refresh: tfe.Bool(op.PlanRefresh), - Workspace: w, - AutoApply: tfe.Bool(op.AutoApprove), - } - - switch op.PlanMode { - case plans.NormalMode: - // okay, but we don't need to do anything special for this - case plans.RefreshOnlyMode: - runOptions.RefreshOnly = tfe.Bool(true) - case plans.DestroyMode: - runOptions.IsDestroy = tfe.Bool(true) - default: - // Shouldn't get here because we should update this for each new - // plan mode we add, mapping it to the corresponding RunCreateOptions - // field. - return nil, generalError( - "Invalid plan mode", - fmt.Errorf("Terraform Cloud doesn't support %s", op.PlanMode), - ) - } - - if len(op.Targets) != 0 { - runOptions.TargetAddrs = make([]string, 0, len(op.Targets)) - for _, addr := range op.Targets { - runOptions.TargetAddrs = append(runOptions.TargetAddrs, addr.String()) - } - } - - if len(op.ForceReplace) != 0 { - runOptions.ReplaceAddrs = make([]string, 0, len(op.ForceReplace)) - for _, addr := range op.ForceReplace { - runOptions.ReplaceAddrs = append(runOptions.ReplaceAddrs, addr.String()) - } - } - - config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir) - if configDiags.HasErrors() { - return nil, fmt.Errorf("error loading config with snapshot: %w", configDiags.Errs()[0]) - } - variables, varDiags := ParseCloudRunVariables(op.Variables, config.Module.Variables) - - if varDiags.HasErrors() { - return nil, varDiags.Err() - } - - runVariables := make([]*tfe.RunVariable, 0, len(variables)) - for name, value := range variables { - runVariables = append(runVariables, &tfe.RunVariable{ - Key: name, - Value: value, - }) - } - runOptions.Variables = runVariables - - r, err := b.client.Runs.Create(stopCtx, runOptions) - if err != nil { - return r, generalError("Failed to create run", err) - } - - // When the lock timeout is set, if the run is still pending and - // cancellable after that period, we attempt to cancel it. - if lockTimeout := op.StateLocker.Timeout(); lockTimeout > 0 { - go func() { - select { - case <-stopCtx.Done(): - return - case <-cancelCtx.Done(): - return - case <-time.After(lockTimeout): - // Retrieve the run to get its current status. - r, err := b.client.Runs.Read(cancelCtx, r.ID) - if err != nil { - log.Printf("[ERROR] error reading run: %v", err) - return - } - - if r.Status == tfe.RunPending && r.Actions.IsCancelable { - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr))) - } - - // We abuse the auto aprove flag to indicate that we do not - // want to ask if the remote operation should be canceled. - op.AutoApprove = true - - p, err := os.FindProcess(os.Getpid()) - if err != nil { - log.Printf("[ERROR] error searching process ID: %v", err) - return - } - p.Signal(syscall.SIGINT) - } - } - }() - } - - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( - runHeader, b.hostname, b.organization, op.Workspace, r.ID)) + "\n")) - } - - // Retrieve the run to get task stages. - // Task Stages are calculated upfront so we only need to call this once for the run. - taskStages, err := b.runTaskStages(stopCtx, b.client, r.ID) - if err != nil { - return r, err - } - - if stage, ok := taskStages[tfe.PrePlan]; ok { - if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Pre-plan Tasks"); err != nil { - return r, err - } - } - - r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w) - if err != nil { - return r, err - } - - err = b.renderPlanLogs(stopCtx, op, r) - if err != nil { - return r, err - } - - // Retrieve the run to get its current status. - r, err = b.client.Runs.Read(stopCtx, r.ID) - if err != nil { - return r, generalError("Failed to retrieve run", err) - } - - // If the run is canceled or errored, we still continue to the - // cost-estimation and policy check phases to ensure we render any - // results available. In the case of a hard-failed policy check, the - // status of the run will be "errored", but there is still policy - // information which should be shown. - - if stage, ok := taskStages[tfe.PostPlan]; ok { - if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Post-plan Tasks"); err != nil { - return r, err - } - } - - // Show any cost estimation output. - if r.CostEstimate != nil { - err = b.costEstimate(stopCtx, cancelCtx, op, r) - if err != nil { - return r, err - } - } - - // Check any configured sentinel policies. - if len(r.PolicyChecks) > 0 { - err = b.checkPolicy(stopCtx, cancelCtx, op, r) - if err != nil { - return r, err - } - } - - return r, nil -} - -// renderPlanLogs reads the streamed plan JSON logs and calls the JSON Plan renderer (jsonformat.RenderPlan) to -// render the plan output. The plan output is fetched from the redacted output endpoint. -func (b *Cloud) renderPlanLogs(ctx context.Context, op *backend.Operation, run *tfe.Run) error { - logs, err := b.client.Plans.Logs(ctx, run.Plan.ID) - if err != nil { - return err - } - - if b.CLI != nil { - reader := bufio.NewReaderSize(logs, 64*1024) - - for next := true; next; { - var l, line []byte - var err error - - for isPrefix := true; isPrefix; { - l, isPrefix, err = reader.ReadLine() - if err != nil { - if err != io.EOF { - return generalError("Failed to read logs", err) - } - next = false - } - - line = append(line, l...) - } - - if next || len(line) > 0 { - log := &jsonformat.JSONLog{} - if err := json.Unmarshal(line, log); err != nil { - // If we can not parse the line as JSON, we will simply - // print the line. This maintains backwards compatibility for - // users who do not wish to enable structured output in their - // workspace. - b.CLI.Output(string(line)) - continue - } - - // We will ignore plan output, change summary or outputs logs - // during the plan phase. - if log.Type == jsonformat.LogOutputs || - log.Type == jsonformat.LogChangeSummary || - log.Type == jsonformat.LogPlannedChange { - continue - } - - if b.renderer != nil { - // Otherwise, we will print the log - err := b.renderer.RenderLog(log) - if err != nil { - return err - } - } - } - } - } - - // Get the run's current status and include the workspace. We will check if - // the run has errored and if structured output is enabled. - run, err = b.client.Runs.ReadWithOptions(ctx, run.ID, &tfe.RunReadOptions{ - Include: []tfe.RunIncludeOpt{tfe.RunWorkspace}, - }) - if err != nil { - return err - } - - // If the run was errored, canceled, or discarded we will not resume the rest - // of this logic and attempt to render the plan. - if run.Status == tfe.RunErrored || run.Status == tfe.RunCanceled || - run.Status == tfe.RunDiscarded { - // We won't return an error here since we need to resume the logic that - // follows after rendering the logs (run tasks, cost estimation, etc.) - return nil - } - - // Determine whether we should call the renderer to generate the plan output - // in human readable format. Otherwise we risk duplicate plan output since - // plan output may be contained in the streamed log file. - if ok, err := b.shouldRenderStructuredRunOutput(run); ok { - // Fetch the redacted plan. - redacted, err := readRedactedPlan(ctx, b.client.BaseURL(), b.token, run.Plan.ID) - if err != nil { - return err - } - - // Render plan output. - b.renderer.RenderHumanPlan(*redacted, op.PlanMode) - } else if err != nil { - return err - } - - return nil -} - -// shouldRenderStructuredRunOutput ensures the remote workspace has structured -// run output enabled and, if using Terraform Enterprise, ensures it is a release -// that supports enabling SRO for CLI-driven runs. The plan output will have -// already been rendered when the logs were read if this wasn't the case. -func (b *Cloud) shouldRenderStructuredRunOutput(run *tfe.Run) (bool, error) { - if b.renderer == nil || !run.Workspace.StructuredRunOutputEnabled { - return false, nil - } - - // If the cloud backend is configured against TFC, we only require that - // the workspace has structured run output enabled. - if b.client.IsCloud() && run.Workspace.StructuredRunOutputEnabled { - return true, nil - } - - // If the cloud backend is configured against TFE, ensure the release version - // supports enabling SRO for CLI runs. - if b.client.IsEnterprise() { - tfeVersion := b.client.RemoteTFEVersion() - if tfeVersion != "" { - v := strings.Split(tfeVersion[1:], "-") - releaseDate, err := strconv.Atoi(v[0]) - if err != nil { - return false, err - } - - // Any release older than 202302-1 will not support enabling SRO for - // CLI-driven runs - if releaseDate < 202302 { - return false, nil - } else if run.Workspace.StructuredRunOutputEnabled { - return true, nil - } - } - } - - // Version of TFE is unknowable - return false, nil -} - -const planDefaultHeader = ` -[reset][yellow]Running plan in Terraform Cloud. Output will stream here. Pressing Ctrl-C -will stop streaming the logs, but will not stop the plan running remotely.[reset] - -Preparing the remote plan... -` - -const runHeader = ` -[reset][yellow]To view this run in a browser, visit: -https://%s/app/%s/%s/runs/%s[reset] -` - -// The newline in this error is to make it look good in the CLI! -const lockTimeoutErr = ` -[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation. -[reset] -` diff --git a/internal/cloud/backend_plan_test.go b/internal/cloud/backend_plan_test.go deleted file mode 100644 index a4c1e4998a53..000000000000 --- a/internal/cloud/backend_plan_test.go +++ /dev/null @@ -1,1370 +0,0 @@ -package cloud - -import ( - "context" - "net/http" - "os" - "os/signal" - "strings" - "syscall" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/mitchellh/cli" -) - -func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - return testOperationPlanWithTimeout(t, configDir, 0) -} - -func testOperationPlanWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewView(streams) - stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) - operationView := views.NewOperation(arguments.ViewHuman, false, view) - - // Many of our tests use an overridden "null" provider that's just in-memory - // inside the test process, not a separate plugin on disk. - depLocks := depsfile.NewLocks() - depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.terraform.io/hashicorp/null")) - - return &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - PlanRefresh: true, - StateLocker: clistate.NewLocker(timeout, stateLockerView), - Type: backend.OperationTypePlan, - View: operationView, - DependencyLocks: depLocks, - }, configCleanup, done -} - -func TestCloud_planBasic(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after the operation finished - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) - } -} - -func TestCloud_planJSONBasic(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - stream, close := terminal.StreamsForTesting(t) - - b.renderer = &jsonformat.Renderer{ - Streams: stream, - Colorize: mockColorize(), - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-basic") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - mockSROWorkspace(t, b, op.Workspace) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - outp := close(t) - gotOut := outp.Stdout() - - if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", gotOut) - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after the operation finished - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) - } -} - -func TestCloud_planCanceled(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - // Stop the run to simulate a Ctrl-C. - run.Stop() - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after the operation finished - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after cancelled plan: %s", err.Error()) - } -} - -func TestCloud_planLongLine(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-long-line") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planJSONFull(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - stream, close := terminal.StreamsForTesting(t) - - b.renderer = &jsonformat.Renderer{ - Streams: stream, - Colorize: mockColorize(), - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-full") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - mockSROWorkspace(t, b, op.Workspace) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - outp := close(t) - gotOut := outp.Stdout() - - if !strings.Contains(gotOut, "tfcoremock_simple_resource.example: Refreshing state... [id=my-simple-resource]") { - t.Fatalf("expected plan log: %s", gotOut) - } - - if !strings.Contains(gotOut, "2 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", gotOut) - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after the operation finished - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) - } -} - -func TestCloud_planWithoutPermissions(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - - // Create a named workspace without permissions. - w, err := b.client.Workspaces.Create( - context.Background(), - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String("prod"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - w.Permissions.CanQueueRun = false - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - op.Workspace = "prod" - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "Insufficient rights to generate a plan") { - t.Fatalf("expected a permissions error, got: %v", errOutput) - } -} - -func TestCloud_planWithParallelism(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - if b.ContextOpts == nil { - b.ContextOpts = &terraform.ContextOpts{} - } - b.ContextOpts.Parallelism = 3 - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "parallelism values are currently not supported") { - t.Fatalf("expected a parallelism error, got: %v", errOutput) - } -} - -func TestCloud_planWithPlan(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - op.PlanFile = &planfile.Reader{} - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "saved plan is currently not supported") { - t.Fatalf("expected a saved plan error, got: %v", errOutput) - } -} - -func TestCloud_planWithPath(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - - op.PlanOutPath = "./testdata/plan" - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "generated plan is currently not supported") { - t.Fatalf("expected a generated plan error, got: %v", errOutput) - } -} - -func TestCloud_planWithoutRefresh(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.PlanRefresh = false - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - // We should find a run inside the mock client that has refresh set - // to false. - runsAPI := b.client.Runs.(*MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff(false, run.Refresh); diff != "" { - t.Errorf("wrong Refresh setting in the created run\n%s", diff) - } - } -} - -func TestCloud_planWithRefreshOnly(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.RefreshOnlyMode - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatal("expected a non-empty plan") - } - - // We should find a run inside the mock client that has refresh-only set - // to true. - runsAPI := b.client.Runs.(*MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { - t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) - } - } -} - -func TestCloud_planWithTarget(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - // When the backend code creates a new run, we'll tweak it so that it - // has a cost estimation object with the "skipped_due_to_targeting" status, - // emulating how a real server is expected to behave in that case. - b.client.Runs.(*MockRuns).ModifyNewRun = func(client *MockClient, options tfe.RunCreateOptions, run *tfe.Run) { - const fakeID = "fake" - // This is the cost estimate object embedded in the run itself which - // the backend will use to learn the ID to request from the cost - // estimates endpoint. It's pending to simulate what a freshly-created - // run is likely to look like. - run.CostEstimate = &tfe.CostEstimate{ - ID: fakeID, - Status: "pending", - } - // The backend will then use the main cost estimation API to retrieve - // the same ID indicated in the object above, where we'll then return - // the status "skipped_due_to_targeting" to trigger the special skip - // message in the backend output. - client.CostEstimates.Estimations[fakeID] = &tfe.CostEstimate{ - ID: fakeID, - Status: "skipped_due_to_targeting", - } - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") - - op.Targets = []addrs.Targetable{addr} - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("expected plan operation to succeed") - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // testBackendDefault above attached a "mock UI" to our backend, so we - // can retrieve its non-error output via the OutputWriter in-memory buffer. - gotOutput := b.CLI.(*cli.MockUi).OutputWriter.String() - if wantOutput := "Not available for this plan, because it was created with the -target option."; !strings.Contains(gotOutput, wantOutput) { - t.Errorf("missing message about skipped cost estimation\ngot:\n%s\nwant substring: %s", gotOutput, wantOutput) - } - - // We should find a run inside the mock client that has the same - // target address we requested above. - runsAPI := b.client.Runs.(*MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { - t.Errorf("wrong TargetAddrs in the created run\n%s", diff) - } - } -} - -func TestCloud_planWithReplace(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") - - op.ForceReplace = []addrs.AbsResourceInstance{addr} - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatal("expected plan operation to succeed") - } - if run.PlanEmpty { - t.Fatalf("expected plan to be non-empty") - } - - // We should find a run inside the mock client that has the same - // refresh address we requested above. - runsAPI := b.client.Runs.(*MockRuns) - if got, want := len(runsAPI.Runs), 1; got != want { - t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) - } - for _, run := range runsAPI.Runs { - if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { - t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) - } - } -} - -func TestCloud_planWithRequiredVariables(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-variables") - defer configCleanup() - defer done(t) - - op.Variables = testVariables(terraform.ValueFromCLIArg, "foo") // "bar" variable defined in config is missing - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - // The usual error of a required variable being missing is deferred and the operation - // is successful. - if run.Result != backend.OperationSuccess { - t.Fatal("expected plan operation to succeed") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("unexpected TFC header in output: %s", output) - } -} - -func TestCloud_planNoConfig(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/empty") - defer configCleanup() - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - output := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := output.Stderr() - if !strings.Contains(errOutput, "configuration files found") { - t.Fatalf("expected configuration files error, got: %v", errOutput) - } -} - -func TestCloud_planNoChanges(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-no-changes") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { - t.Fatalf("expected no changes in plan summary: %s", output) - } - if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("expected policy check result in output: %s", output) - } -} - -func TestCloud_planForceLocal(t *testing.T) { - // Set TF_FORCE_LOCAL_BACKEND so the cloud backend will use - // the local backend with itself as embedded backend. - if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { - t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) - } - defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") - - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("unexpected TFC header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planWithoutOperationsEntitlement(t *testing.T) { - b, bCleanup := testBackendNoOperations(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("unexpected TFC header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planWorkspaceWithoutOperations(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - - ctx := context.Background() - - // Create a named workspace that doesn't allow operations. - _, err := b.client.Workspaces.Create( - ctx, - b.organization, - tfe.WorkspaceCreateOptions{ - Name: tfe.String("no-operations"), - }, - ) - if err != nil { - t.Fatalf("error creating named workspace: %v", err) - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = "no-operations" - - streams, done := terminal.StreamsForTesting(t) - view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) - op.View = view - - run, err := b.Operation(ctx, op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("unexpected TFC header in output: %s", output) - } - if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planLockTimeout(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - ctx := context.Background() - - // Retrieve the workspace used to run this operation in. - w, err := b.client.Workspaces.Read(ctx, b.organization, b.WorkspaceMapping.Name) - if err != nil { - t.Fatalf("error retrieving workspace: %v", err) - } - - // Create a new configuration version. - c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) - if err != nil { - t.Fatalf("error creating configuration version: %v", err) - } - - // Create a pending run to block this run. - _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ - ConfigurationVersion: c, - Workspace: w, - }) - if err != nil { - t.Fatalf("error creating pending run: %v", err) - } - - op, configCleanup, done := testOperationPlanWithTimeout(t, "./testdata/plan", 50) - defer configCleanup() - defer done(t) - - input := testInput(t, map[string]string{ - "cancel": "yes", - "approve": "yes", - }) - - op.UIIn = input - op.UIOut = b.CLI - op.Workspace = testBackendSingleWorkspaceName - - _, err = b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - sigint := make(chan os.Signal, 1) - signal.Notify(sigint, syscall.SIGINT) - select { - case <-sigint: - // Stop redirecting SIGINT signals. - signal.Stop(sigint) - case <-time.After(200 * time.Millisecond): - t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") - } - - if len(input.answers) != 2 { - t.Fatalf("expected unused answers, got: %v", input.answers) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "Lock timeout exceeded") { - t.Fatalf("expected lock timout error in output: %s", output) - } - if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("unexpected plan summary in output: %s", output) - } -} - -func TestCloud_planDestroy(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.DestroyMode - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } -} - -func TestCloud_planDestroyNoConfig(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/empty") - defer configCleanup() - defer done(t) - - op.PlanMode = plans.DestroyMode - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } -} - -func TestCloud_planWithWorkingDirectory(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - options := tfe.WorkspaceUpdateOptions{ - WorkingDirectory: tfe.String("terraform"), - } - - // Configure the workspace to use a custom working directory. - _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.WorkspaceMapping.Name, options) - if err != nil { - t.Fatalf("error configuring working directory: %v", err) - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-working-directory/terraform") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "The remote workspace is configured to work with configuration") { - t.Fatalf("expected working directory warning: %s", output) - } - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planWithWorkingDirectoryFromCurrentPath(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - options := tfe.WorkspaceUpdateOptions{ - WorkingDirectory: tfe.String("terraform"), - } - - // Configure the workspace to use a custom working directory. - _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.WorkspaceMapping.Name, options) - if err != nil { - t.Fatalf("error configuring working directory: %v", err) - } - - wd, err := os.Getwd() - if err != nil { - t.Fatalf("error getting current working directory: %v", err) - } - - // We need to change into the configuration directory to make sure - // the logic to upload the correct slug is working as expected. - if err := os.Chdir("./testdata/plan-with-working-directory/terraform"); err != nil { - t.Fatalf("error changing directory: %v", err) - } - defer os.Chdir(wd) // Make sure we change back again when were done. - - // For this test we need to give our current directory instead of the - // full path to the configuration as we already changed directories. - op, configCleanup, done := testOperationPlan(t, ".") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planCostEstimation(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cost-estimation") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "Resources: 1 of 1 estimated") { - t.Fatalf("expected cost estimate result in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planPolicyPass(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-passed") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - if run.PlanEmpty { - t.Fatalf("expected a non-empty plan") - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planPolicyHardFail(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-hard-failed") - defer configCleanup() - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - viewOutput := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := viewOutput.Stderr() - if !strings.Contains(errOutput, "hard failed") { - t.Fatalf("expected a policy check error, got: %v", errOutput) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planPolicySoftFail(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-soft-failed") - defer configCleanup() - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - viewOutput := done(t) - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if !run.PlanEmpty { - t.Fatalf("expected plan to be empty") - } - - errOutput := viewOutput.Stderr() - if !strings.Contains(errOutput, "soft failed") { - t.Fatalf("expected a policy check error, got: %v", errOutput) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("expected policy check result in output: %s", output) - } - if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("expected plan summary in output: %s", output) - } -} - -func TestCloud_planWithRemoteError(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-error") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if run.Result.ExitStatus() != 1 { - t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Running plan in Terraform Cloud") { - t.Fatalf("expected TFC header in output: %s", output) - } - if !strings.Contains(output, "null_resource.foo: 1 error") { - t.Fatalf("expected plan error in output: %s", output) - } -} - -func TestCloud_planJSONWithRemoteError(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - stream, close := terminal.StreamsForTesting(t) - - // Initialize the plan renderer - b.renderer = &jsonformat.Renderer{ - Streams: stream, - Colorize: mockColorize(), - } - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-error") - defer configCleanup() - defer done(t) - - op.Workspace = testBackendSingleWorkspaceName - - mockSROWorkspace(t, b, op.Workspace) - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result == backend.OperationSuccess { - t.Fatal("expected plan operation to fail") - } - if run.Result.ExitStatus() != 1 { - t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) - } - - outp := close(t) - gotOut := outp.Stdout() - - if !strings.Contains(gotOut, "Unsupported block type") { - t.Fatalf("unexpected plan error in output: %s", gotOut) - } -} - -func TestCloud_planOtherError(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationPlan(t, "./testdata/plan") - defer configCleanup() - defer done(t) - - op.Workspace = "network-error" // custom error response in backend_mock.go - - _, err := b.Operation(context.Background(), op) - if err == nil { - t.Errorf("expected error, got success") - } - - if !strings.Contains(err.Error(), - "Terraform Cloud returned an unexpected error:\n\nI'm a little teacup") { - t.Fatalf("expected error message, got: %s", err.Error()) - } -} - -func TestCloud_planShouldRenderSRO(t *testing.T) { - t.Run("when instance is TFC", func(t *testing.T) { - handlers := map[string]func(http.ResponseWriter, *http.Request){ - "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.5") - w.Header().Set("TFP-AppName", "Terraform Cloud") - }, - } - b, bCleanup := testBackendWithHandlers(t, handlers) - t.Cleanup(bCleanup) - b.renderer = &jsonformat.Renderer{} - - t.Run("and SRO is enabled", func(t *testing.T) { - r := &tfe.Run{ - Workspace: &tfe.Workspace{ - StructuredRunOutputEnabled: true, - }, - } - assertSRORendered(t, b, r, true) - }) - - t.Run("and SRO is not enabled", func(t *testing.T) { - r := &tfe.Run{ - Workspace: &tfe.Workspace{ - StructuredRunOutputEnabled: false, - }, - } - assertSRORendered(t, b, r, false) - }) - - }) - - t.Run("when instance is TFE and version supports CLI SRO", func(t *testing.T) { - handlers := map[string]func(http.ResponseWriter, *http.Request){ - "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.5") - w.Header().Set("TFP-AppName", "Terraform Enterprise") - w.Header().Set("X-TFE-Version", "v202303-1") - }, - } - b, bCleanup := testBackendWithHandlers(t, handlers) - t.Cleanup(bCleanup) - b.renderer = &jsonformat.Renderer{} - - t.Run("and SRO is enabled", func(t *testing.T) { - r := &tfe.Run{ - Workspace: &tfe.Workspace{ - StructuredRunOutputEnabled: true, - }, - } - assertSRORendered(t, b, r, true) - }) - - t.Run("and SRO is not enabled", func(t *testing.T) { - r := &tfe.Run{ - Workspace: &tfe.Workspace{ - StructuredRunOutputEnabled: false, - }, - } - assertSRORendered(t, b, r, false) - }) - }) - - t.Run("when instance is a known unsupported TFE release", func(t *testing.T) { - handlers := map[string]func(http.ResponseWriter, *http.Request){ - "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.5") - w.Header().Set("TFP-AppName", "Terraform Enterprise") - w.Header().Set("X-TFE-Version", "v202208-1") - }, - } - b, bCleanup := testBackendWithHandlers(t, handlers) - t.Cleanup(bCleanup) - b.renderer = &jsonformat.Renderer{} - - r := &tfe.Run{ - Workspace: &tfe.Workspace{ - StructuredRunOutputEnabled: true, - }, - } - assertSRORendered(t, b, r, false) - }) - - t.Run("when instance is an unknown TFE release", func(t *testing.T) { - handlers := map[string]func(http.ResponseWriter, *http.Request){ - "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.5") - }, - } - b, bCleanup := testBackendWithHandlers(t, handlers) - t.Cleanup(bCleanup) - b.renderer = &jsonformat.Renderer{} - - r := &tfe.Run{ - Workspace: &tfe.Workspace{ - StructuredRunOutputEnabled: true, - }, - } - assertSRORendered(t, b, r, false) - }) - -} - -func assertSRORendered(t *testing.T, b *Cloud, r *tfe.Run, shouldRender bool) { - got, err := b.shouldRenderStructuredRunOutput(r) - if err != nil { - t.Fatalf("expected no error: %v", err) - } - if shouldRender != got { - t.Fatalf("expected SRO to be rendered: %t, got %t", shouldRender, got) - } -} diff --git a/internal/cloud/backend_refresh_test.go b/internal/cloud/backend_refresh_test.go deleted file mode 100644 index 3abb93577793..000000000000 --- a/internal/cloud/backend_refresh_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package cloud - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/clistate" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/mitchellh/cli" -) - -func testOperationRefresh(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - return testOperationRefreshWithTimeout(t, configDir, 0) -} - -func testOperationRefreshWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { - t.Helper() - - _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir) - - streams, done := terminal.StreamsForTesting(t) - view := views.NewView(streams) - stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) - operationView := views.NewOperation(arguments.ViewHuman, false, view) - - return &backend.Operation{ - ConfigDir: configDir, - ConfigLoader: configLoader, - PlanRefresh: true, - StateLocker: clistate.NewLocker(timeout, stateLockerView), - Type: backend.OperationTypeRefresh, - View: operationView, - }, configCleanup, done -} - -func TestCloud_refreshBasicActuallyRunsApplyRefresh(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") - defer configCleanup() - defer done(t) - - op.UIOut = b.CLI - b.CLIColor = b.cliColorize() - op.PlanMode = plans.RefreshOnlyMode - op.Workspace = testBackendSingleWorkspaceName - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("error starting operation: %v", err) - } - - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) - } - - output := b.CLI.(*cli.MockUi).OutputWriter.String() - if !strings.Contains(output, "Proceeding with 'terraform apply -refresh-only -auto-approve'") { - t.Fatalf("expected TFC header in output: %s", output) - } - - stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) - // An error suggests that the state was not unlocked after apply - if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { - t.Fatalf("unexpected error locking state after apply: %s", err.Error()) - } -} diff --git a/internal/cloud/backend_test.go b/internal/cloud/backend_test.go deleted file mode 100644 index 081fce650570..000000000000 --- a/internal/cloud/backend_test.go +++ /dev/null @@ -1,1219 +0,0 @@ -package cloud - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - "testing" - - tfe "github.com/hashicorp/go-tfe" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - "github.com/zclconf/go-cty/cty" - - backendLocal "github.com/hashicorp/terraform/internal/backend/local" -) - -func TestCloud(t *testing.T) { - var _ backend.Enhanced = New(nil) - var _ backend.CLI = New(nil) -} - -func TestCloud_backendWithName(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - workspaces, err := b.Workspaces() - if err != nil { - t.Fatalf("error: %v", err) - } - - if len(workspaces) != 1 || workspaces[0] != testBackendSingleWorkspaceName { - t.Fatalf("should only have a single configured workspace matching the configured 'name' strategy, but got: %#v", workspaces) - } - - if _, err := b.StateMgr("foo"); err != backend.ErrWorkspacesNotSupported { - t.Fatalf("expected fetching a state which is NOT the single configured workspace to have an ErrWorkspacesNotSupported error, but got: %v", err) - } - - if err := b.DeleteWorkspace(testBackendSingleWorkspaceName, true); err != backend.ErrWorkspacesNotSupported { - t.Fatalf("expected deleting the single configured workspace name to result in an error, but got: %v", err) - } - - if err := b.DeleteWorkspace("foo", true); err != backend.ErrWorkspacesNotSupported { - t.Fatalf("expected deleting a workspace which is NOT the configured workspace name to result in an error, but got: %v", err) - } -} - -func TestCloud_backendWithTags(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - - backend.TestBackendStates(t, b) - - // Test pagination works - for i := 0; i < 25; i++ { - _, err := b.StateMgr(fmt.Sprintf("foo-%d", i+1)) - if err != nil { - t.Fatalf("error: %s", err) - } - } - - workspaces, err := b.Workspaces() - if err != nil { - t.Fatalf("error: %s", err) - } - actual := len(workspaces) - if actual != 26 { - t.Errorf("expected 26 workspaces (over one standard paginated response), got %d", actual) - } -} - -func TestCloud_PrepareConfig(t *testing.T) { - cases := map[string]struct { - config cty.Value - expectedErr string - }{ - "null organization": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - expectedErr: `Invalid or missing required argument: "organization" must be set in the cloud configuration or as an environment variable: TF_CLOUD_ORGANIZATION.`, - }, - "null workspace": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("org"), - "workspaces": cty.NullVal(cty.String), - }), - expectedErr: `Invalid workspaces configuration: Missing workspace mapping strategy. Either workspace "tags" or "name" is required.`, - }, - "workspace: empty tags, name": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("org"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - expectedErr: `Invalid workspaces configuration: Missing workspace mapping strategy. Either workspace "tags" or "name" is required.`, - }, - "workspace: name present": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("org"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - expectedErr: `Invalid workspaces configuration: Only one of workspace "tags" or "name" is allowed.`, - }, - "workspace: name and tags present": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("org"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.SetVal( - []cty.Value{ - cty.StringVal("billing"), - }, - ), - }), - }), - expectedErr: `Invalid workspaces configuration: Only one of workspace "tags" or "name" is allowed.`, - }, - } - - for name, tc := range cases { - s := testServer(t) - b := New(testDisco(s)) - - // Validate - _, valDiags := b.PrepareConfig(tc.config) - if valDiags.Err() != nil && tc.expectedErr != "" { - actualErr := valDiags.Err().Error() - if !strings.Contains(actualErr, tc.expectedErr) { - t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) - } - } - } -} - -func TestCloud_PrepareConfigWithEnvVars(t *testing.T) { - cases := map[string]struct { - config cty.Value - vars map[string]string - expectedErr string - }{ - "with no organization": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - vars: map[string]string{ - "TF_CLOUD_ORGANIZATION": "example-org", - }, - }, - "with no organization attribute or env var": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - vars: map[string]string{}, - expectedErr: `Invalid or missing required argument: "organization" must be set in the cloud configuration or as an environment variable: TF_CLOUD_ORGANIZATION.`, - }, - "null workspace": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("hashicorp"), - "workspaces": cty.NullVal(cty.String), - }), - vars: map[string]string{ - "TF_WORKSPACE": "my-workspace", - }, - }, - "organization and workspace env var": { - config: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.NullVal(cty.String), - "workspaces": cty.NullVal(cty.String), - }), - vars: map[string]string{ - "TF_CLOUD_ORGANIZATION": "hashicorp", - "TF_WORKSPACE": "my-workspace", - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - s := testServer(t) - b := New(testDisco(s)) - - for k, v := range tc.vars { - os.Setenv(k, v) - } - t.Cleanup(func() { - for k := range tc.vars { - os.Unsetenv(k) - } - }) - - _, valDiags := b.PrepareConfig(tc.config) - if valDiags.Err() != nil && tc.expectedErr != "" { - actualErr := valDiags.Err().Error() - if !strings.Contains(actualErr, tc.expectedErr) { - t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) - } - } - }) - } -} - -func TestCloud_configWithEnvVars(t *testing.T) { - cases := map[string]struct { - setup func(b *Cloud) - config cty.Value - vars map[string]string - expectedOrganization string - expectedHostname string - expectedWorkspaceName string - expectedErr string - }{ - "with no organization specified": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "token": cty.NullVal(cty.String), - "organization": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - vars: map[string]string{ - "TF_CLOUD_ORGANIZATION": "hashicorp", - }, - expectedOrganization: "hashicorp", - }, - "with both organization and env var specified": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "token": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - vars: map[string]string{ - "TF_CLOUD_ORGANIZATION": "we-should-not-see-this", - }, - expectedOrganization: "hashicorp", - }, - "with no hostname specified": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "token": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - vars: map[string]string{ - "TF_CLOUD_HOSTNAME": "private.hashicorp.engineering", - }, - expectedHostname: "private.hashicorp.engineering", - }, - "with hostname and env var specified": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.StringVal("private.hashicorp.engineering"), - "token": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - vars: map[string]string{ - "TF_CLOUD_HOSTNAME": "mycool.tfe-host.io", - }, - expectedHostname: "private.hashicorp.engineering", - }, - "an invalid workspace env var": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "token": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "workspaces": cty.NullVal(cty.Object(map[string]cty.Type{ - "name": cty.String, - "tags": cty.Set(cty.String), - })), - }), - vars: map[string]string{ - "TF_WORKSPACE": "i-dont-exist-in-org", - }, - expectedErr: `Invalid workspace selection: Terraform failed to find workspace "i-dont-exist-in-org" in organization hashicorp`, - }, - "workspaces and env var specified": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "token": cty.NullVal(cty.String), - "organization": cty.StringVal("mordor"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("mt-doom"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - vars: map[string]string{ - "TF_WORKSPACE": "shire", - }, - expectedWorkspaceName: "mt-doom", - }, - "env var workspace does not have specified tag": { - setup: func(b *Cloud) { - b.client.Organizations.Create(context.Background(), tfe.OrganizationCreateOptions{ - Name: tfe.String("mordor"), - }) - - b.client.Workspaces.Create(context.Background(), "mordor", tfe.WorkspaceCreateOptions{ - Name: tfe.String("shire"), - }) - }, - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "token": cty.NullVal(cty.String), - "organization": cty.StringVal("mordor"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.SetVal([]cty.Value{ - cty.StringVal("cloud"), - }), - }), - }), - vars: map[string]string{ - "TF_WORKSPACE": "shire", - }, - expectedErr: "Terraform failed to find workspace \"shire\" with the tags specified in your configuration:\n[cloud]", - }, - "env var workspace has specified tag": { - setup: func(b *Cloud) { - b.client.Organizations.Create(context.Background(), tfe.OrganizationCreateOptions{ - Name: tfe.String("mordor"), - }) - - b.client.Workspaces.Create(context.Background(), "mordor", tfe.WorkspaceCreateOptions{ - Name: tfe.String("shire"), - Tags: []*tfe.Tag{ - { - Name: "hobbity", - }, - }, - }) - }, - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "token": cty.NullVal(cty.String), - "organization": cty.StringVal("mordor"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.SetVal([]cty.Value{ - cty.StringVal("hobbity"), - }), - }), - }), - vars: map[string]string{ - "TF_WORKSPACE": "shire", - }, - expectedWorkspaceName: "", // No error is raised, but workspace is not set - }, - "with everything set as env vars": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "token": cty.NullVal(cty.String), - "organization": cty.NullVal(cty.String), - "workspaces": cty.NullVal(cty.String), - }), - vars: map[string]string{ - "TF_CLOUD_ORGANIZATION": "mordor", - "TF_WORKSPACE": "mt-doom", - "TF_CLOUD_HOSTNAME": "mycool.tfe-host.io", - }, - expectedOrganization: "mordor", - expectedWorkspaceName: "mt-doom", - expectedHostname: "mycool.tfe-host.io", - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - b, cleanup := testUnconfiguredBackend(t) - t.Cleanup(cleanup) - - for k, v := range tc.vars { - os.Setenv(k, v) - } - - t.Cleanup(func() { - for k := range tc.vars { - os.Unsetenv(k) - } - }) - - _, valDiags := b.PrepareConfig(tc.config) - if valDiags.Err() != nil { - t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) - } - - if tc.setup != nil { - tc.setup(b) - } - - diags := b.Configure(tc.config) - if (diags.Err() != nil || tc.expectedErr != "") && - (diags.Err() == nil || !strings.Contains(diags.Err().Error(), tc.expectedErr)) { - t.Fatalf("%s: unexpected configure result: %v", name, diags.Err()) - } - - if tc.expectedOrganization != "" && tc.expectedOrganization != b.organization { - t.Fatalf("%s: organization not valid: %s, expected: %s", name, b.organization, tc.expectedOrganization) - } - - if tc.expectedHostname != "" && tc.expectedHostname != b.hostname { - t.Fatalf("%s: hostname not valid: %s, expected: %s", name, b.hostname, tc.expectedHostname) - } - - if tc.expectedWorkspaceName != "" && tc.expectedWorkspaceName != b.WorkspaceMapping.Name { - t.Fatalf("%s: workspace name not valid: %s, expected: %s", name, b.WorkspaceMapping.Name, tc.expectedWorkspaceName) - } - }) - } -} - -func TestCloud_config(t *testing.T) { - cases := map[string]struct { - config cty.Value - confErr string - valErr string - }{ - "with_a_non_tfe_host": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.StringVal("nontfe.local"), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - confErr: "Host nontfe.local does not provide a tfe service", - }, - // localhost advertises TFE services, but has no token in the credentials - "without_a_token": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.StringVal("localhost"), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - confErr: "terraform login localhost", - }, - "with_tags": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.SetVal( - []cty.Value{ - cty.StringVal("billing"), - }, - ), - }), - }), - }, - "with_a_name": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - }, - "without_a_name_tags": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - valErr: `Missing workspace mapping strategy.`, - }, - "with_both_a_name_and_tags": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.SetVal( - []cty.Value{ - cty.StringVal("billing"), - }, - ), - }), - }), - valErr: `Only one of workspace "tags" or "name" is allowed.`, - }, - "null config": { - config: cty.NullVal(cty.EmptyObject), - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - b, cleanup := testUnconfiguredBackend(t) - t.Cleanup(cleanup) - - // Validate - _, valDiags := b.PrepareConfig(tc.config) - if (valDiags.Err() != nil || tc.valErr != "") && - (valDiags.Err() == nil || !strings.Contains(valDiags.Err().Error(), tc.valErr)) { - t.Fatalf("unexpected validation result: %v", valDiags.Err()) - } - - // Configure - confDiags := b.Configure(tc.config) - if (confDiags.Err() != nil || tc.confErr != "") && - (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.confErr)) { - t.Fatalf("unexpected configure result: %v", confDiags.Err()) - } - }) - } -} - -func TestCloud_configVerifyMinimumTFEVersion(t *testing.T) { - config := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.SetVal( - []cty.Value{ - cty.StringVal("billing"), - }, - ), - }), - }) - - handlers := map[string]func(http.ResponseWriter, *http.Request){ - "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.4") - }, - } - s := testServerWithHandlers(handlers) - - b := New(testDisco(s)) - - confDiags := b.Configure(config) - if confDiags.Err() == nil { - t.Fatalf("expected configure to error") - } - - expected := `The 'cloud' option is not supported with this version of Terraform Enterprise.` - if !strings.Contains(confDiags.Err().Error(), expected) { - t.Fatalf("expected configure to error with %q, got %q", expected, confDiags.Err().Error()) - } -} - -func TestCloud_configVerifyMinimumTFEVersionInAutomation(t *testing.T) { - config := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.SetVal( - []cty.Value{ - cty.StringVal("billing"), - }, - ), - }), - }) - - handlers := map[string]func(http.ResponseWriter, *http.Request){ - "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.4") - }, - } - s := testServerWithHandlers(handlers) - - b := New(testDisco(s)) - b.runningInAutomation = true - - confDiags := b.Configure(config) - if confDiags.Err() == nil { - t.Fatalf("expected configure to error") - } - - expected := `This version of Terraform Cloud/Enterprise does not support the state mechanism -attempting to be used by the platform. This should never happen.` - if !strings.Contains(confDiags.Err().Error(), expected) { - t.Fatalf("expected configure to error with %q, got %q", expected, confDiags.Err().Error()) - } -} - -func TestCloud_setUnavailableTerraformVersion(t *testing.T) { - // go-tfe returns an error IRL if you try to set a Terraform version that's - // not available in your TFC instance. To test this, tfe_client_mock errors if - // you try to set any Terraform version for this specific workspace name. - workspaceName := "unavailable-terraform-version" - - config := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.SetVal( - []cty.Value{ - cty.StringVal("sometag"), - }, - ), - }), - }) - - b, bCleanup := testBackend(t, config, nil) - defer bCleanup() - - // Make sure the workspace doesn't exist yet -- otherwise, we can't test what - // happens when a workspace gets created. This is why we can't use "name" in - // the backend config above, btw: if you do, testBackend() creates the default - // workspace before we get a chance to do anything. - _, err := b.client.Workspaces.Read(context.Background(), b.organization, workspaceName) - if err != tfe.ErrResourceNotFound { - t.Fatalf("the workspace we were about to try and create (%s/%s) already exists in the mocks somehow, so this test isn't trustworthy anymore", b.organization, workspaceName) - } - - _, err = b.StateMgr(workspaceName) - if err != nil { - t.Fatalf("expected no error from StateMgr, despite not being able to set remote Terraform version: %#v", err) - } - // Make sure the workspace was created: - workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, workspaceName) - if err != nil { - t.Fatalf("b.StateMgr() didn't actually create the desired workspace") - } - // Make sure our mocks still error as expected, using the same update function b.StateMgr() would call: - _, err = b.client.Workspaces.UpdateByID( - context.Background(), - workspace.ID, - tfe.WorkspaceUpdateOptions{TerraformVersion: tfe.String("1.1.0")}, - ) - if err == nil { - t.Fatalf("the mocks aren't emulating a nonexistent remote Terraform version correctly, so this test isn't trustworthy anymore") - } -} - -func TestCloud_setConfigurationFields(t *testing.T) { - originalForceBackendEnv := os.Getenv("TF_FORCE_LOCAL_BACKEND") - - cases := map[string]struct { - obj cty.Value - expectedHostname string - expectedOrganziation string - expectedWorkspaceName string - expectedWorkspaceTags []string - expectedForceLocal bool - setEnv func() - resetEnv func() - expectedErr string - }{ - "with hostname set": { - obj: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("hashicorp"), - "hostname": cty.StringVal("hashicorp.com"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - expectedHostname: "hashicorp.com", - expectedOrganziation: "hashicorp", - }, - "with hostname not set, set to default hostname": { - obj: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("hashicorp"), - "hostname": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - expectedHostname: defaultHostname, - expectedOrganziation: "hashicorp", - }, - "with workspace name set": { - obj: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("hashicorp"), - "hostname": cty.StringVal("hashicorp.com"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - expectedHostname: "hashicorp.com", - expectedOrganziation: "hashicorp", - expectedWorkspaceName: "prod", - }, - "with workspace tags set": { - obj: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("hashicorp"), - "hostname": cty.StringVal("hashicorp.com"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.SetVal( - []cty.Value{ - cty.StringVal("billing"), - }, - ), - }), - }), - expectedHostname: "hashicorp.com", - expectedOrganziation: "hashicorp", - expectedWorkspaceTags: []string{"billing"}, - }, - "with force local set": { - obj: cty.ObjectVal(map[string]cty.Value{ - "organization": cty.StringVal("hashicorp"), - "hostname": cty.StringVal("hashicorp.com"), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }), - expectedHostname: "hashicorp.com", - expectedOrganziation: "hashicorp", - setEnv: func() { - os.Setenv("TF_FORCE_LOCAL_BACKEND", "1") - }, - resetEnv: func() { - os.Setenv("TF_FORCE_LOCAL_BACKEND", originalForceBackendEnv) - }, - expectedForceLocal: true, - }, - } - - for name, tc := range cases { - b := &Cloud{} - - // if `setEnv` is set, then we expect `resetEnv` to also be set - if tc.setEnv != nil { - tc.setEnv() - defer tc.resetEnv() - } - - errDiags := b.setConfigurationFields(tc.obj) - if errDiags.HasErrors() || tc.expectedErr != "" { - actualErr := errDiags.Err().Error() - if !strings.Contains(actualErr, tc.expectedErr) { - t.Fatalf("%s: unexpected validation result: %v", name, errDiags.Err()) - } - } - - if tc.expectedHostname != "" && b.hostname != tc.expectedHostname { - t.Fatalf("%s: expected hostname %s to match configured hostname %s", name, b.hostname, tc.expectedHostname) - } - if tc.expectedOrganziation != "" && b.organization != tc.expectedOrganziation { - t.Fatalf("%s: expected organization (%s) to match configured organization (%s)", name, b.organization, tc.expectedOrganziation) - } - if tc.expectedWorkspaceName != "" && b.WorkspaceMapping.Name != tc.expectedWorkspaceName { - t.Fatalf("%s: expected workspace name mapping (%s) to match configured workspace name (%s)", name, b.WorkspaceMapping.Name, tc.expectedWorkspaceName) - } - if len(tc.expectedWorkspaceTags) > 0 { - presentSet := make(map[string]struct{}) - for _, tag := range b.WorkspaceMapping.Tags { - presentSet[tag] = struct{}{} - } - - expectedSet := make(map[string]struct{}) - for _, tag := range tc.expectedWorkspaceTags { - expectedSet[tag] = struct{}{} - } - - var missing []string - var unexpected []string - - for _, expected := range tc.expectedWorkspaceTags { - if _, ok := presentSet[expected]; !ok { - missing = append(missing, expected) - } - } - - for _, actual := range b.WorkspaceMapping.Tags { - if _, ok := expectedSet[actual]; !ok { - unexpected = append(unexpected, actual) - } - } - - if len(missing) > 0 { - t.Fatalf("%s: expected workspace tag mapping (%s) to contain the following tags: %s", name, b.WorkspaceMapping.Tags, missing) - } - - if len(unexpected) > 0 { - t.Fatalf("%s: expected workspace tag mapping (%s) to NOT contain the following tags: %s", name, b.WorkspaceMapping.Tags, unexpected) - } - - } - if tc.expectedForceLocal != false && b.forceLocal != tc.expectedForceLocal { - t.Fatalf("%s: expected force local backend to be set ", name) - } - } -} - -func TestCloud_localBackend(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - local, ok := b.local.(*backendLocal.Local) - if !ok { - t.Fatalf("expected b.local to be \"*local.Local\", got: %T", b.local) - } - - cloud, ok := local.Backend.(*Cloud) - if !ok { - t.Fatalf("expected local.Backend to be *cloud.Cloud, got: %T", cloud) - } -} - -func TestCloud_addAndRemoveWorkspacesDefault(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - if err := b.DeleteWorkspace(testBackendSingleWorkspaceName, true); err != backend.ErrWorkspacesNotSupported { - t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) - } -} - -func TestCloud_StateMgr_versionCheck(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - // Some fixed versions for testing with. This logic is a simple string - // comparison, so we don't need many test cases. - v0135 := version.Must(version.NewSemver("0.13.5")) - v0140 := version.Must(version.NewSemver("0.14.0")) - - // Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // For this test, the local Terraform version is set to 0.14.0 - tfversion.Prerelease = "" - tfversion.Version = v0140.String() - tfversion.SemVer = v0140 - - // Update the mock remote workspace Terraform version to match the local - // Terraform version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.WorkspaceMapping.Name, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String(v0140.String()), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - // This should succeed - if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Now change the remote workspace to a different Terraform version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.WorkspaceMapping.Name, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String(v0135.String()), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - // This should fail - want := `Remote workspace Terraform version "0.13.5" does not match local Terraform version "0.14.0"` - if _, err := b.StateMgr(testBackendSingleWorkspaceName); err.Error() != want { - t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want) - } -} - -func TestCloud_StateMgr_versionCheckLatest(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - v0140 := version.Must(version.NewSemver("0.14.0")) - - // Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // For this test, the local Terraform version is set to 0.14.0 - tfversion.Prerelease = "" - tfversion.Version = v0140.String() - tfversion.SemVer = v0140 - - // Update the remote workspace to the pseudo-version "latest" - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.WorkspaceMapping.Name, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String("latest"), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - // This should succeed despite not being a string match - if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { - t.Fatalf("expected no error, got %v", err) - } -} - -func TestCloud_VerifyWorkspaceTerraformVersion(t *testing.T) { - testCases := []struct { - local string - remote string - executionMode string - wantErr bool - }{ - {"0.13.5", "0.13.5", "agent", false}, - {"0.14.0", "0.13.5", "remote", true}, - {"0.14.0", "0.13.5", "local", false}, - {"0.14.0", "0.14.1", "remote", false}, - {"0.14.0", "1.0.99", "remote", false}, - {"0.14.0", "1.1.0", "remote", false}, - {"0.14.0", "1.3.0", "remote", true}, - {"1.2.0", "1.2.99", "remote", false}, - {"1.2.0", "1.3.0", "remote", true}, - {"0.15.0", "latest", "remote", false}, - {"1.1.5", "~> 1.1.1", "remote", false}, - {"1.1.5", "> 1.1.0, < 1.3.0", "remote", false}, - {"1.1.5", "~> 1.0.1", "remote", true}, - // pre-release versions are comparable within their pre-release stage (dev, - // alpha, beta), but not comparable to different stages and not comparable - // to final releases. - {"1.1.0-beta1", "1.1.0-beta1", "remote", false}, - {"1.1.0-beta1", "~> 1.1.0-beta", "remote", false}, - {"1.1.0", "~> 1.1.0-beta", "remote", true}, - {"1.1.0-beta1", "~> 1.1.0-dev", "remote", true}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - local := version.Must(version.NewSemver(tc.local)) - - // Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // Override local version as specified - tfversion.Prerelease = "" - tfversion.Version = local.String() - tfversion.SemVer = local - - // Update the mock remote workspace Terraform version to the - // specified remote version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.WorkspaceMapping.Name, - tfe.WorkspaceUpdateOptions{ - ExecutionMode: &tc.executionMode, - TerraformVersion: tfe.String(tc.remote), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) - if tc.wantErr { - if len(diags) != 1 { - t.Fatal("expected diag, but none returned") - } - if got := diags.Err().Error(); !strings.Contains(got, "Incompatible Terraform version") { - t.Fatalf("unexpected error: %s", got) - } - } else { - if len(diags) != 0 { - t.Fatalf("unexpected diags: %s", diags.Err()) - } - } - }) - } -} - -func TestCloud_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - // Attempting to check the version against a workspace which doesn't exist - // should result in no errors - diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace") - if len(diags) != 0 { - t.Fatalf("unexpected error: %s", diags.Err()) - } - - // Use a special workspace ID to trigger a 500 error, which should result - // in a failed check - diags = b.VerifyWorkspaceTerraformVersion("network-error") - if len(diags) != 1 { - t.Fatal("expected diag, but none returned") - } - if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") { - t.Fatalf("unexpected error: %s", got) - } - - // Update the mock remote workspace Terraform version to an invalid version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.WorkspaceMapping.Name, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String("1.0.cheetarah"), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) - - if len(diags) != 1 { - t.Fatal("expected diag, but none returned") - } - if got := diags.Err().Error(); !strings.Contains(got, "Incompatible Terraform version: The remote workspace specified") { - t.Fatalf("unexpected error: %s", got) - } -} - -func TestCloud_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - // If the ignore flag is set, the behaviour changes - b.IgnoreVersionConflict() - - // Different local & remote versions to cause an error - local := version.Must(version.NewSemver("0.14.0")) - remote := version.Must(version.NewSemver("0.13.5")) - - // Save original local version state and restore afterwards - p := tfversion.Prerelease - v := tfversion.Version - s := tfversion.SemVer - defer func() { - tfversion.Prerelease = p - tfversion.Version = v - tfversion.SemVer = s - }() - - // Override local version as specified - tfversion.Prerelease = "" - tfversion.Version = local.String() - tfversion.SemVer = local - - // Update the mock remote workspace Terraform version to the - // specified remote version - if _, err := b.client.Workspaces.Update( - context.Background(), - b.organization, - b.WorkspaceMapping.Name, - tfe.WorkspaceUpdateOptions{ - TerraformVersion: tfe.String(remote.String()), - }, - ); err != nil { - t.Fatalf("error: %v", err) - } - - diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) - if len(diags) != 1 { - t.Fatal("expected diag, but none returned") - } - - if got, want := diags[0].Severity(), tfdiags.Warning; got != want { - t.Errorf("wrong severity: got %#v, want %#v", got, want) - } - if got, want := diags[0].Description().Summary, "Incompatible Terraform version"; got != want { - t.Errorf("wrong summary: got %s, want %s", got, want) - } - wantDetail := "The local Terraform version (0.14.0) does not meet the version requirements for remote workspace hashicorp/app-prod (0.13.5)." - if got := diags[0].Description().Detail; got != wantDetail { - t.Errorf("wrong summary: got %s, want %s", got, wantDetail) - } -} - -func TestClodBackend_DeleteWorkspace_SafeAndForce(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - safeDeleteWorkspaceName := "safe-delete-workspace" - forceDeleteWorkspaceName := "force-delete-workspace" - - _, err := b.StateMgr(safeDeleteWorkspaceName) - if err != nil { - t.Fatalf("error: %s", err) - } - - _, err = b.StateMgr(forceDeleteWorkspaceName) - if err != nil { - t.Fatalf("error: %s", err) - } - - // sanity check that the mock now contains two workspaces - wl, err := b.Workspaces() - if err != nil { - t.Fatalf("error fetching workspace names: %v", err) - } - if len(wl) != 2 { - t.Fatalf("expected 2 workspaced but got %d", len(wl)) - } - - c := context.Background() - safeDeleteWorkspace, err := b.client.Workspaces.Read(c, b.organization, safeDeleteWorkspaceName) - if err != nil { - t.Fatalf("error fetching workspace: %v", err) - } - - // Lock a workspace so that it should fail to be safe deleted - _, err = b.client.Workspaces.Lock(context.Background(), safeDeleteWorkspace.ID, tfe.WorkspaceLockOptions{Reason: tfe.String("test")}) - if err != nil { - t.Fatalf("error locking workspace: %v", err) - } - err = b.DeleteWorkspace(safeDeleteWorkspaceName, false) - if err == nil { - t.Fatalf("workspace should have failed to safe delete") - } - - // unlock the workspace and confirm that safe-delete now works - _, err = b.client.Workspaces.Unlock(context.Background(), safeDeleteWorkspace.ID) - if err != nil { - t.Fatalf("error unlocking workspace: %v", err) - } - err = b.DeleteWorkspace(safeDeleteWorkspaceName, false) - if err != nil { - t.Fatalf("error safe deleting workspace: %v", err) - } - - // lock a workspace and then confirm that force deleting it works - forceDeleteWorkspace, err := b.client.Workspaces.Read(c, b.organization, forceDeleteWorkspaceName) - if err != nil { - t.Fatalf("error fetching workspace: %v", err) - } - _, err = b.client.Workspaces.Lock(context.Background(), forceDeleteWorkspace.ID, tfe.WorkspaceLockOptions{Reason: tfe.String("test")}) - if err != nil { - t.Fatalf("error locking workspace: %v", err) - } - err = b.DeleteWorkspace(forceDeleteWorkspaceName, true) - if err != nil { - t.Fatalf("error force deleting workspace: %v", err) - } -} - -func TestClodBackend_DeleteWorkspace_DoesNotExist(t *testing.T) { - b, bCleanup := testBackendWithTags(t) - defer bCleanup() - - err := b.DeleteWorkspace("non-existent-workspace", false) - if err != nil { - t.Fatalf("expected deleting a workspace which does not exist to succeed") - } -} diff --git a/internal/cloud/e2e/main_test.go b/internal/cloud/e2e/main_test.go deleted file mode 100644 index 44fc606395b7..000000000000 --- a/internal/cloud/e2e/main_test.go +++ /dev/null @@ -1,249 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "strings" - "testing" - - expect "github.com/Netflix/go-expect" - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/e2e" - tfversion "github.com/hashicorp/terraform/version" -) - -var terraformBin string -var cliConfigFileEnv string - -var tfeClient *tfe.Client -var tfeHostname string -var tfeToken string -var verboseMode bool - -func TestMain(m *testing.M) { - teardown := setup() - code := m.Run() - teardown() - - os.Exit(code) -} - -func accTest() bool { - // TF_ACC is set when we want to run acceptance tests, meaning it relies on - // network access. - return os.Getenv("TF_ACC") != "" -} - -func hasHostname() bool { - return os.Getenv("TFE_HOSTNAME") != "" -} - -func hasToken() bool { - return os.Getenv("TFE_TOKEN") != "" -} - -func hasRequiredEnvVars() bool { - return accTest() && hasHostname() && hasToken() -} - -func skipIfMissingEnvVar(t *testing.T) { - if !hasRequiredEnvVars() { - t.Skip("Skipping test, required environment variables missing. Use `TF_ACC`, `TFE_HOSTNAME`, `TFE_TOKEN`") - } -} - -func setup() func() { - tfOutput := flag.Bool("tfoutput", false, "This flag produces the terraform output from tests.") - flag.Parse() - verboseMode = *tfOutput - - setTfeClient() - teardown := setupBinary() - - return func() { - teardown() - } -} -func testRunner(t *testing.T, cases testCases, orgCount int, tfEnvFlags ...string) { - for name, tc := range cases { - tc := tc // rebind tc into this lexical scope - t.Run(name, func(subtest *testing.T) { - subtest.Parallel() - - orgNames := []string{} - for i := 0; i < orgCount; i++ { - organization, cleanup := createOrganization(t) - t.Cleanup(cleanup) - orgNames = append(orgNames, organization.Name) - } - - exp, err := expect.NewConsole(defaultOpts()...) - if err != nil { - subtest.Fatal(err) - } - defer exp.Close() - - tmpDir := t.TempDir() - - tf := e2e.NewBinary(t, terraformBin, tmpDir) - tfEnvFlags = append(tfEnvFlags, "TF_LOG=INFO") - tfEnvFlags = append(tfEnvFlags, cliConfigFileEnv) - for _, env := range tfEnvFlags { - tf.AddEnv(env) - } - - var orgName string - for index, op := range tc.operations { - switch orgCount { - case 0: - orgName = "" - case 1: - orgName = orgNames[0] - default: - orgName = orgNames[index] - } - - op.prep(t, orgName, tf.WorkDir()) - for _, tfCmd := range op.commands { - cmd := tf.Cmd(tfCmd.command...) - cmd.Stdin = exp.Tty() - cmd.Stdout = exp.Tty() - cmd.Stderr = exp.Tty() - - err = cmd.Start() - if err != nil { - subtest.Fatal(err) - } - - if tfCmd.expectedCmdOutput != "" { - got, err := exp.ExpectString(tfCmd.expectedCmdOutput) - if err != nil { - subtest.Fatalf("error while waiting for output\nwant: %s\nerror: %s\noutput\n%s", tfCmd.expectedCmdOutput, err, got) - } - } - - lenInput := len(tfCmd.userInput) - lenInputOutput := len(tfCmd.postInputOutput) - if lenInput > 0 { - for i := 0; i < lenInput; i++ { - input := tfCmd.userInput[i] - exp.SendLine(input) - // use the index to find the corresponding - // output that matches the input. - if lenInputOutput-1 >= i { - output := tfCmd.postInputOutput[i] - _, err := exp.ExpectString(output) - if err != nil { - subtest.Fatal(err) - } - } - } - } - - err = cmd.Wait() - if err != nil && !tfCmd.expectError { - subtest.Fatal(err) - } - } - } - - if tc.validations != nil { - tc.validations(t, orgName) - } - }) - } -} - -func setTfeClient() { - tfeHostname = os.Getenv("TFE_HOSTNAME") - tfeToken = os.Getenv("TFE_TOKEN") - - cfg := &tfe.Config{ - Address: fmt.Sprintf("https://%s", tfeHostname), - Token: tfeToken, - } - - if tfeHostname != "" && tfeToken != "" { - // Create a new TFE client. - client, err := tfe.NewClient(cfg) - if err != nil { - fmt.Printf("Could not create new tfe client: %v\n", err) - os.Exit(1) - } - tfeClient = client - } -} - -func setupBinary() func() { - log.Println("Setting up terraform binary") - tmpTerraformBinaryDir, err := ioutil.TempDir("", "terraform-test") - if err != nil { - fmt.Printf("Could not create temp directory: %v\n", err) - os.Exit(1) - } - log.Println(tmpTerraformBinaryDir) - currentDir, err := os.Getwd() - defer os.Chdir(currentDir) - if err != nil { - fmt.Printf("Could not change directories: %v\n", err) - os.Exit(1) - } - // Getting top level dir - dirPaths := strings.Split(currentDir, "/") - log.Println(currentDir) - topLevel := len(dirPaths) - 3 - topDir := strings.Join(dirPaths[0:topLevel], "/") - - if err := os.Chdir(topDir); err != nil { - fmt.Printf("Could not change directories: %v\n", err) - os.Exit(1) - } - - cmd := exec.Command( - "go", - "build", - "-o", tmpTerraformBinaryDir, - "-ldflags", fmt.Sprintf("-X \"github.com/hashicorp/terraform/version.Prerelease=%s\"", tfversion.Prerelease), - ) - err = cmd.Run() - if err != nil { - fmt.Printf("Could not run exec command: %v\n", err) - os.Exit(1) - } - - credFile := fmt.Sprintf("%s/dev.tfrc", tmpTerraformBinaryDir) - writeCredRC(credFile) - - terraformBin = fmt.Sprintf("%s/terraform", tmpTerraformBinaryDir) - cliConfigFileEnv = fmt.Sprintf("TF_CLI_CONFIG_FILE=%s", credFile) - - return func() { - os.RemoveAll(tmpTerraformBinaryDir) - } -} - -func writeCredRC(file string) { - creds := credentialBlock() - f, err := os.Create(file) - if err != nil { - fmt.Printf("Could not create file: %v\n", err) - os.Exit(1) - } - _, err = f.WriteString(creds) - if err != nil { - fmt.Printf("Could not write credentials: %v\n", err) - os.Exit(1) - } - f.Close() -} - -func credentialBlock() string { - return fmt.Sprintf(` -credentials "%s" { - token = "%s" -}`, tfeHostname, tfeToken) -} diff --git a/internal/cloud/errors.go b/internal/cloud/errors.go deleted file mode 100644 index cf668516f347..000000000000 --- a/internal/cloud/errors.go +++ /dev/null @@ -1,60 +0,0 @@ -package cloud - -import ( - "errors" - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// String based errors -var ( - errApplyDiscarded = errors.New("Apply discarded.") - errDestroyDiscarded = errors.New("Destroy discarded.") - errRunApproved = errors.New("approved using the UI or API") - errRunDiscarded = errors.New("discarded using the UI or API") - errRunOverridden = errors.New("overridden using the UI or API") - errApplyNeedsUIConfirmation = errors.New("Cannot confirm apply due to -input=false. Please handle run confirmation in the UI.") - errPolicyOverrideNeedsUIConfirmation = errors.New("Cannot override soft failed policy checks when -input=false. Please open the run in the UI to override.") -) - -// Diagnostic error messages -var ( - invalidWorkspaceConfigMissingValues = tfdiags.AttributeValue( - tfdiags.Error, - "Invalid workspaces configuration", - fmt.Sprintf("Missing workspace mapping strategy. Either workspace \"tags\" or \"name\" is required.\n\n%s", workspaceConfigurationHelp), - cty.Path{cty.GetAttrStep{Name: "workspaces"}}, - ) - - invalidWorkspaceConfigMisconfiguration = tfdiags.AttributeValue( - tfdiags.Error, - "Invalid workspaces configuration", - fmt.Sprintf("Only one of workspace \"tags\" or \"name\" is allowed.\n\n%s", workspaceConfigurationHelp), - cty.Path{cty.GetAttrStep{Name: "workspaces"}}, - ) -) - -const ignoreRemoteVersionHelp = "If you're sure you want to upgrade the state, you can force Terraform to continue using the -ignore-remote-version flag. This may result in an unusable workspace." - -func missingConfigAttributeAndEnvVar(attribute string, envVar string) tfdiags.Diagnostic { - detail := strings.TrimSpace(fmt.Sprintf("\"%s\" must be set in the cloud configuration or as an environment variable: %s.\n", attribute, envVar)) - return tfdiags.AttributeValue( - tfdiags.Error, - "Invalid or missing required argument", - detail, - cty.Path{cty.GetAttrStep{Name: attribute}}) -} - -func incompatibleWorkspaceTerraformVersion(message string, ignoreVersionConflict bool) tfdiags.Diagnostic { - severity := tfdiags.Error - suggestion := ignoreRemoteVersionHelp - if ignoreVersionConflict { - severity = tfdiags.Warning - suggestion = "" - } - description := strings.TrimSpace(fmt.Sprintf("%s\n\n%s", message, suggestion)) - return tfdiags.Sourceless(severity, "Incompatible Terraform version", description) -} diff --git a/internal/cloud/remote_test.go b/internal/cloud/remote_test.go deleted file mode 100644 index b0c44d60a830..000000000000 --- a/internal/cloud/remote_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package cloud - -import ( - "flag" - "os" - "testing" - "time" - - _ "github.com/hashicorp/terraform/internal/logging" -) - -func TestMain(m *testing.M) { - flag.Parse() - - // Make sure TF_FORCE_LOCAL_BACKEND is unset - os.Unsetenv("TF_FORCE_LOCAL_BACKEND") - - // Reduce delays to make tests run faster - backoffMin = 1.0 - backoffMax = 1.0 - planConfigurationVersionsPollInterval = 1 * time.Millisecond - runPollInterval = 1 * time.Millisecond - - os.Exit(m.Run()) -} diff --git a/internal/cloud/state.go b/internal/cloud/state.go deleted file mode 100644 index a0379285a899..000000000000 --- a/internal/cloud/state.go +++ /dev/null @@ -1,507 +0,0 @@ -package cloud - -import ( - "bytes" - "context" - "crypto/md5" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "log" - "os" - "strings" - "sync" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - - tfe "github.com/hashicorp/go-tfe" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/internal/command/jsonstate" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/remote" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" -) - -// State implements the State interfaces in the state package to handle -// reading and writing the remote state to TFC. This State on its own does no -// local caching so every persist will go to the remote storage and local -// writes will go to memory. -type State struct { - mu sync.Mutex - - // We track two pieces of meta data in addition to the state itself: - // - // lineage - the state's unique ID - // serial - the monotonic counter of "versions" of the state - // - // Both of these (along with state) have a sister field - // that represents the values read in from an existing source. - // All three of these values are used to determine if the new - // state has changed from an existing state we read in. - lineage, readLineage string - serial, readSerial uint64 - state, readState *states.State - disableLocks bool - tfeClient *tfe.Client - organization string - workspace *tfe.Workspace - stateUploadErr bool - forcePush bool - lockInfo *statemgr.LockInfo -} - -var ErrStateVersionUnauthorizedUpgradeState = errors.New(strings.TrimSpace(` -You are not authorized to read the full state version containing outputs. -State versions created by terraform v1.3.0 and newer do not require this level -of authorization and therefore this error can usually be fixed by upgrading the -remote state version. -`)) - -var _ statemgr.Full = (*State)(nil) -var _ statemgr.Migrator = (*State)(nil) - -// statemgr.Reader impl. -func (s *State) State() *states.State { - s.mu.Lock() - defer s.mu.Unlock() - - return s.state.DeepCopy() -} - -// StateForMigration is part of our implementation of statemgr.Migrator. -func (s *State) StateForMigration() *statefile.File { - s.mu.Lock() - defer s.mu.Unlock() - - return statefile.New(s.state.DeepCopy(), s.lineage, s.serial) -} - -// WriteStateForMigration is part of our implementation of statemgr.Migrator. -func (s *State) WriteStateForMigration(f *statefile.File, force bool) error { - s.mu.Lock() - defer s.mu.Unlock() - - if !force { - checkFile := statefile.New(s.state, s.lineage, s.serial) - if err := statemgr.CheckValidImport(f, checkFile); err != nil { - return err - } - } - - // We create a deep copy of the state here, because the caller also has - // a reference to the given object and can potentially go on to mutate - // it after we return, but we want the snapshot at this point in time. - s.state = f.State.DeepCopy() - s.lineage = f.Lineage - s.serial = f.Serial - s.forcePush = force - - return nil -} - -// DisableLocks turns the Lock and Unlock methods into no-ops. This is intended -// to be called during initialization of a state manager and should not be -// called after any of the statemgr.Full interface methods have been called. -func (s *State) DisableLocks() { - s.disableLocks = true -} - -// StateSnapshotMeta returns the metadata from the most recently persisted -// or refreshed persistent state snapshot. -// -// This is an implementation of statemgr.PersistentMeta. -func (s *State) StateSnapshotMeta() statemgr.SnapshotMeta { - return statemgr.SnapshotMeta{ - Lineage: s.lineage, - Serial: s.serial, - } -} - -// statemgr.Writer impl. -func (s *State) WriteState(state *states.State) error { - s.mu.Lock() - defer s.mu.Unlock() - - // We create a deep copy of the state here, because the caller also has - // a reference to the given object and can potentially go on to mutate - // it after we return, but we want the snapshot at this point in time. - s.state = state.DeepCopy() - s.forcePush = false - - return nil -} - -// PersistState uploads a snapshot of the latest state as a StateVersion to Terraform Cloud -func (s *State) PersistState(schemas *terraform.Schemas) error { - s.mu.Lock() - defer s.mu.Unlock() - - log.Printf("[DEBUG] cloud/state: state read serial is: %d; serial is: %d", s.readSerial, s.serial) - log.Printf("[DEBUG] cloud/state: state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) - - if s.readState != nil { - lineageUnchanged := s.readLineage != "" && s.lineage == s.readLineage - serialUnchanged := s.readSerial != 0 && s.serial == s.readSerial - stateUnchanged := statefile.StatesMarshalEqual(s.state, s.readState) - if stateUnchanged && lineageUnchanged && serialUnchanged { - // If the state, lineage or serial haven't changed at all then we have nothing to do. - return nil - } - s.serial++ - } else { - // We might be writing a new state altogether, but before we do that - // we'll check to make sure there isn't already a snapshot present - // that we ought to be updating. - err := s.refreshState() - if err != nil { - return fmt.Errorf("failed checking for existing remote state: %s", err) - } - log.Printf("[DEBUG] cloud/state: after refresh, state read serial is: %d; serial is: %d", s.readSerial, s.serial) - log.Printf("[DEBUG] cloud/state: after refresh, state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) - - if s.lineage == "" { // indicates that no state snapshot is present yet - lineage, err := uuid.GenerateUUID() - if err != nil { - return fmt.Errorf("failed to generate initial lineage: %v", err) - } - s.lineage = lineage - s.serial++ - } - } - - f := statefile.New(s.state, s.lineage, s.serial) - - var buf bytes.Buffer - err := statefile.Write(f, &buf) - if err != nil { - return err - } - - var jsonState []byte - if schemas != nil { - jsonState, err = jsonstate.Marshal(f, schemas) - if err != nil { - return err - } - } - - stateFile, err := statefile.Read(bytes.NewReader(buf.Bytes())) - if err != nil { - return fmt.Errorf("failed to read state: %w", err) - } - - ov, err := jsonstate.MarshalOutputs(stateFile.State.RootModule().OutputValues) - if err != nil { - return fmt.Errorf("failed to translate outputs: %w", err) - } - jsonStateOutputs, err := json.Marshal(ov) - if err != nil { - return fmt.Errorf("failed to marshal outputs to json: %w", err) - } - - err = s.uploadState(s.lineage, s.serial, s.forcePush, buf.Bytes(), jsonState, jsonStateOutputs) - if err != nil { - s.stateUploadErr = true - return fmt.Errorf("error uploading state: %w", err) - } - // After we've successfully persisted, what we just wrote is our new - // reference state until someone calls RefreshState again. - // We've potentially overwritten (via force) the state, lineage - // and / or serial (and serial was incremented) so we copy over all - // three fields so everything matches the new state and a subsequent - // operation would correctly detect no changes to the lineage, serial or state. - s.readState = s.state.DeepCopy() - s.readLineage = s.lineage - s.readSerial = s.serial - return nil -} - -func (s *State) uploadState(lineage string, serial uint64, isForcePush bool, state, jsonState, jsonStateOutputs []byte) error { - ctx := context.Background() - - options := tfe.StateVersionCreateOptions{ - Lineage: tfe.String(lineage), - Serial: tfe.Int64(int64(serial)), - MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), - State: tfe.String(base64.StdEncoding.EncodeToString(state)), - Force: tfe.Bool(isForcePush), - JSONState: tfe.String(base64.StdEncoding.EncodeToString(jsonState)), - JSONStateOutputs: tfe.String(base64.StdEncoding.EncodeToString(jsonStateOutputs)), - } - - // If we have a run ID, make sure to add it to the options - // so the state will be properly associated with the run. - runID := os.Getenv("TFE_RUN_ID") - if runID != "" { - options.Run = &tfe.Run{ID: runID} - } - // Create the new state. - _, err := s.tfeClient.StateVersions.Create(ctx, s.workspace.ID, options) - return err -} - -// Lock calls the Client's Lock method if it's implemented. -func (s *State) Lock(info *statemgr.LockInfo) (string, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.disableLocks { - return "", nil - } - ctx := context.Background() - - lockErr := &statemgr.LockError{Info: s.lockInfo} - - // Lock the workspace. - _, err := s.tfeClient.Workspaces.Lock(ctx, s.workspace.ID, tfe.WorkspaceLockOptions{ - Reason: tfe.String("Locked by Terraform"), - }) - if err != nil { - if err == tfe.ErrWorkspaceLocked { - lockErr.Info = info - err = fmt.Errorf("%s (lock ID: \"%s/%s\")", err, s.organization, s.workspace.Name) - } - lockErr.Err = err - return "", lockErr - } - - s.lockInfo = info - - return s.lockInfo.ID, nil -} - -// statemgr.Refresher impl. -func (s *State) RefreshState() error { - s.mu.Lock() - defer s.mu.Unlock() - return s.refreshState() -} - -// refreshState is the main implementation of RefreshState, but split out so -// that we can make internal calls to it from methods that are already holding -// the s.mu lock. -func (s *State) refreshState() error { - payload, err := s.getStatePayload() - if err != nil { - return err - } - - // no remote state is OK - if payload == nil { - s.readState = nil - s.lineage = "" - s.serial = 0 - return nil - } - - stateFile, err := statefile.Read(bytes.NewReader(payload.Data)) - if err != nil { - return err - } - - s.lineage = stateFile.Lineage - s.serial = stateFile.Serial - s.state = stateFile.State - - // Properties from the remote must be separate so we can - // track changes as lineage, serial and/or state are mutated - s.readLineage = stateFile.Lineage - s.readSerial = stateFile.Serial - s.readState = s.state.DeepCopy() - return nil -} - -func (s *State) getStatePayload() (*remote.Payload, error) { - ctx := context.Background() - - sv, err := s.tfeClient.StateVersions.ReadCurrent(ctx, s.workspace.ID) - if err != nil { - if err == tfe.ErrResourceNotFound { - // If no state exists, then return nil. - return nil, nil - } - return nil, fmt.Errorf("error retrieving state: %v", err) - } - - state, err := s.tfeClient.StateVersions.Download(ctx, sv.DownloadURL) - if err != nil { - return nil, fmt.Errorf("error downloading state: %v", err) - } - - // If the state is empty, then return nil. - if len(state) == 0 { - return nil, nil - } - - // Get the MD5 checksum of the state. - sum := md5.Sum(state) - - return &remote.Payload{ - Data: state, - MD5: sum[:], - }, nil -} - -// Unlock calls the Client's Unlock method if it's implemented. -func (s *State) Unlock(id string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.disableLocks { - return nil - } - - ctx := context.Background() - - // We first check if there was an error while uploading the latest - // state. If so, we will not unlock the workspace to prevent any - // changes from being applied until the correct state is uploaded. - if s.stateUploadErr { - return nil - } - - lockErr := &statemgr.LockError{Info: s.lockInfo} - - // With lock info this should be treated as a normal unlock. - if s.lockInfo != nil { - // Verify the expected lock ID. - if s.lockInfo.ID != id { - lockErr.Err = fmt.Errorf("lock ID does not match existing lock") - return lockErr - } - - // Unlock the workspace. - _, err := s.tfeClient.Workspaces.Unlock(ctx, s.workspace.ID) - if err != nil { - lockErr.Err = err - return lockErr - } - - return nil - } - - // Verify the optional force-unlock lock ID. - if s.organization+"/"+s.workspace.Name != id { - lockErr.Err = fmt.Errorf( - "lock ID %q does not match existing lock ID \"%s/%s\"", - id, - s.organization, - s.workspace.Name, - ) - return lockErr - } - - // Force unlock the workspace. - _, err := s.tfeClient.Workspaces.ForceUnlock(ctx, s.workspace.ID) - if err != nil { - lockErr.Err = err - return lockErr - } - - return nil -} - -// Delete the remote state. -func (s *State) Delete(force bool) error { - - var err error - - isSafeDeleteSupported := s.workspace.Permissions.CanForceDelete != nil - if force || !isSafeDeleteSupported { - err = s.tfeClient.Workspaces.Delete(context.Background(), s.organization, s.workspace.Name) - } else { - err = s.tfeClient.Workspaces.SafeDelete(context.Background(), s.organization, s.workspace.Name) - } - - if err != nil && err != tfe.ErrResourceNotFound { - return fmt.Errorf("error deleting workspace %s: %v", s.workspace.Name, err) - } - - return nil -} - -// GetRootOutputValues fetches output values from Terraform Cloud -func (s *State) GetRootOutputValues() (map[string]*states.OutputValue, error) { - ctx := context.Background() - - so, err := s.tfeClient.StateVersionOutputs.ReadCurrent(ctx, s.workspace.ID) - - if err != nil { - return nil, fmt.Errorf("could not read state version outputs: %w", err) - } - - result := make(map[string]*states.OutputValue) - - for _, output := range so.Items { - if output.DetailedType == nil { - // If there is no detailed type information available, this state was probably created - // with a version of terraform < 1.3.0. In this case, we'll eject completely from this - // function and fall back to the old behavior of reading the entire state file, which - // requires a higher level of authorization. - log.Printf("[DEBUG] falling back to reading full state") - - if err := s.RefreshState(); err != nil { - return nil, fmt.Errorf("failed to load state: %w", err) - } - - state := s.State() - if state == nil { - // We know that there is supposed to be state (and this is not simply a new workspace - // without state) because the fallback is only invoked when outputs are present but - // detailed types are not available. - return nil, ErrStateVersionUnauthorizedUpgradeState - } - - return state.RootModule().OutputValues, nil - } - - if output.Sensitive { - // Since this is a sensitive value, the output must be requested explicitly in order to - // read its value, which is assumed to be present by callers - sensitiveOutput, err := s.tfeClient.StateVersionOutputs.Read(ctx, output.ID) - if err != nil { - return nil, fmt.Errorf("could not read state version output %s: %w", output.ID, err) - } - output.Value = sensitiveOutput.Value - } - - cval, err := tfeOutputToCtyValue(*output) - if err != nil { - return nil, fmt.Errorf("could not decode output %s (ID %s)", output.Name, output.ID) - } - - result[output.Name] = &states.OutputValue{ - Value: cval, - Sensitive: output.Sensitive, - } - } - - return result, nil -} - -// tfeOutputToCtyValue decodes a combination of TFE output value and detailed-type to create a -// cty value that is suitable for use in terraform. -func tfeOutputToCtyValue(output tfe.StateVersionOutput) (cty.Value, error) { - var result cty.Value - bufType, err := json.Marshal(output.DetailedType) - if err != nil { - return result, fmt.Errorf("could not marshal output %s type: %w", output.ID, err) - } - - var ctype cty.Type - err = ctype.UnmarshalJSON(bufType) - if err != nil { - return result, fmt.Errorf("could not interpret output %s type: %w", output.ID, err) - } - - result, err = gocty.ToCtyValue(output.Value, ctype) - if err != nil { - return result, fmt.Errorf("could not interpret value %v as type %s for output %s: %w", result, ctype.FriendlyName(), output.ID, err) - } - - return result, nil -} diff --git a/internal/cloud/state_test.go b/internal/cloud/state_test.go deleted file mode 100644 index f03bd15c53e5..000000000000 --- a/internal/cloud/state_test.go +++ /dev/null @@ -1,272 +0,0 @@ -package cloud - -import ( - "bytes" - "context" - "io/ioutil" - "testing" - - tfe "github.com/hashicorp/go-tfe" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func TestState_impl(t *testing.T) { - var _ statemgr.Reader = new(State) - var _ statemgr.Writer = new(State) - var _ statemgr.Persister = new(State) - var _ statemgr.Refresher = new(State) - var _ statemgr.OutputReader = new(State) - var _ statemgr.Locker = new(State) -} - -type ExpectedOutput struct { - Name string - Sensitive bool - IsNull bool -} - -func TestState_GetRootOutputValues(t *testing.T) { - b, bCleanup := testBackendWithOutputs(t) - defer bCleanup() - - state := &State{tfeClient: b.client, organization: b.organization, workspace: &tfe.Workspace{ - ID: "ws-abcd", - }} - outputs, err := state.GetRootOutputValues() - - if err != nil { - t.Fatalf("error returned from GetRootOutputValues: %s", err) - } - - cases := []ExpectedOutput{ - { - Name: "sensitive_output", - Sensitive: true, - IsNull: false, - }, - { - Name: "nonsensitive_output", - Sensitive: false, - IsNull: false, - }, - { - Name: "object_output", - Sensitive: false, - IsNull: false, - }, - { - Name: "list_output", - Sensitive: false, - IsNull: false, - }, - } - - if len(outputs) != len(cases) { - t.Errorf("Expected %d item but %d were returned", len(cases), len(outputs)) - } - - for _, testCase := range cases { - so, ok := outputs[testCase.Name] - if !ok { - t.Fatalf("Expected key %s but it was not found", testCase.Name) - } - if so.Value.IsNull() != testCase.IsNull { - t.Errorf("Key %s does not match null expectation %v", testCase.Name, testCase.IsNull) - } - if so.Sensitive != testCase.Sensitive { - t.Errorf("Key %s does not match sensitive expectation %v", testCase.Name, testCase.Sensitive) - } - } -} - -func TestState(t *testing.T) { - var buf bytes.Buffer - s := statemgr.TestFullInitialState() - sf := statefile.New(s, "stub-lineage", 2) - err := statefile.Write(sf, &buf) - if err != nil { - t.Fatalf("err: %s", err) - } - data := buf.Bytes() - - state := testCloudState(t) - - jsonState, err := ioutil.ReadFile("../command/testdata/show-json-state/sensitive-variables/output.json") - if err != nil { - t.Fatal(err) - } - - jsonStateOutputs := []byte(` -{ - "outputs": { - "foo": { - "type": "string", - "value": "bar" - } - } -}`) - - if err := state.uploadState(state.lineage, state.serial, state.forcePush, data, jsonState, jsonStateOutputs); err != nil { - t.Fatalf("put: %s", err) - } - - payload, err := state.getStatePayload() - if err != nil { - t.Fatalf("get: %s", err) - } - if !bytes.Equal(payload.Data, data) { - t.Fatalf("expected full state %q\n\ngot: %q", string(payload.Data), string(data)) - } - - if err := state.Delete(true); err != nil { - t.Fatalf("delete: %s", err) - } - - p, err := state.getStatePayload() - if err != nil { - t.Fatalf("get: %s", err) - } - if p != nil { - t.Fatalf("expected empty state, got: %q", string(p.Data)) - } -} - -func TestCloudLocks(t *testing.T) { - back, bCleanup := testBackendWithName(t) - defer bCleanup() - - a, err := back.StateMgr(testBackendSingleWorkspaceName) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - b, err := back.StateMgr(testBackendSingleWorkspaceName) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - lockerA, ok := a.(statemgr.Locker) - if !ok { - t.Fatal("client A not a statemgr.Locker") - } - - lockerB, ok := b.(statemgr.Locker) - if !ok { - t.Fatal("client B not a statemgr.Locker") - } - - infoA := statemgr.NewLockInfo() - infoA.Operation = "test" - infoA.Who = "clientA" - - infoB := statemgr.NewLockInfo() - infoB.Operation = "test" - infoB.Who = "clientB" - - lockIDA, err := lockerA.Lock(infoA) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - _, err = lockerB.Lock(infoB) - if err == nil { - lockerA.Unlock(lockIDA) - t.Fatal("client B obtained lock while held by client A") - } - if _, ok := err.(*statemgr.LockError); !ok { - t.Errorf("expected a LockError, but was %t: %s", err, err) - } - - if err := lockerA.Unlock(lockIDA); err != nil { - t.Fatal("error unlocking client A", err) - } - - lockIDB, err := lockerB.Lock(infoB) - if err != nil { - t.Fatal("unable to obtain lock from client B") - } - - if lockIDB == lockIDA { - t.Fatalf("duplicate lock IDs: %q", lockIDB) - } - - if err = lockerB.Unlock(lockIDB); err != nil { - t.Fatal("error unlocking client B:", err) - } -} - -func TestDelete_SafeDeleteNotSupported(t *testing.T) { - state := testCloudState(t) - workspaceId := state.workspace.ID - state.workspace.Permissions.CanForceDelete = nil - state.workspace.ResourceCount = 5 - - // Typically delete(false) should safe-delete a cloud workspace, which should fail on this workspace with resources - // However, since we have set the workspace canForceDelete permission to nil, we should fall back to force delete - if err := state.Delete(false); err != nil { - t.Fatalf("delete: %s", err) - } - workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) - if workspace != nil || err != tfe.ErrResourceNotFound { - t.Fatalf("workspace %s not deleted", workspaceId) - } -} - -func TestDelete_ForceDelete(t *testing.T) { - state := testCloudState(t) - workspaceId := state.workspace.ID - state.workspace.Permissions.CanForceDelete = tfe.Bool(true) - state.workspace.ResourceCount = 5 - - if err := state.Delete(true); err != nil { - t.Fatalf("delete: %s", err) - } - workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) - if workspace != nil || err != tfe.ErrResourceNotFound { - t.Fatalf("workspace %s not deleted", workspaceId) - } -} - -func TestDelete_SafeDelete(t *testing.T) { - state := testCloudState(t) - workspaceId := state.workspace.ID - state.workspace.Permissions.CanForceDelete = tfe.Bool(false) - state.workspace.ResourceCount = 5 - - // safe-deleting a workspace with resources should fail - err := state.Delete(false) - if err == nil { - t.Fatalf("workspace should have failed to safe delete") - } - - // safe-deleting a workspace with resources should succeed once it has no resources - state.workspace.ResourceCount = 0 - if err = state.Delete(false); err != nil { - t.Fatalf("workspace safe-delete err: %s", err) - } - - workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) - if workspace != nil || err != tfe.ErrResourceNotFound { - t.Fatalf("workspace %s not deleted", workspaceId) - } -} - -func TestState_PersistState(t *testing.T) { - cloudState := testCloudState(t) - - t.Run("Initial PersistState", func(t *testing.T) { - if cloudState.readState != nil { - t.Fatal("expected nil initial readState") - } - - err := cloudState.PersistState(nil) - if err != nil { - t.Fatalf("expected no error, got %q", err) - } - - var expectedSerial uint64 = 1 - if cloudState.readSerial != expectedSerial { - t.Fatalf("expected initial state readSerial to be %d, got %d", expectedSerial, cloudState.readSerial) - } - }) -} diff --git a/internal/cloud/testing.go b/internal/cloud/testing.go deleted file mode 100644 index 4c5302ce4aad..000000000000 --- a/internal/cloud/testing.go +++ /dev/null @@ -1,520 +0,0 @@ -package cloud - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "path" - "testing" - "time" - - tfe "github.com/hashicorp/go-tfe" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/auth" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/mitchellh/cli" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/hashicorp/terraform/version" - - backendLocal "github.com/hashicorp/terraform/internal/backend/local" -) - -const ( - testCred = "test-auth-token" -) - -var ( - tfeHost = svchost.Hostname(defaultHostname) - credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ - tfeHost: {"token": testCred}, - }) - testBackendSingleWorkspaceName = "app-prod" - defaultTFCPing = map[string]func(http.ResponseWriter, *http.Request){ - "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.5") - w.Header().Set("TFP-AppName", "Terraform Cloud") - }, - } -) - -// mockInput is a mock implementation of terraform.UIInput. -type mockInput struct { - answers map[string]string -} - -func (m *mockInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { - v, ok := m.answers[opts.Id] - if !ok { - return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) - } - if v == "wait-for-external-update" { - select { - case <-ctx.Done(): - case <-time.After(time.Minute): - } - } - delete(m.answers, opts.Id) - return v, nil -} - -func testInput(t *testing.T, answers map[string]string) *mockInput { - return &mockInput{answers: answers} -} - -func testBackendWithName(t *testing.T) (*Cloud, func()) { - obj := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal(testBackendSingleWorkspaceName), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }) - return testBackend(t, obj, defaultTFCPing) -} - -func testBackendWithTags(t *testing.T) (*Cloud, func()) { - obj := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.NullVal(cty.String), - "tags": cty.SetVal( - []cty.Value{ - cty.StringVal("billing"), - }, - ), - }), - }) - return testBackend(t, obj, nil) -} - -func testBackendNoOperations(t *testing.T) (*Cloud, func()) { - obj := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("no-operations"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal(testBackendSingleWorkspaceName), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }) - return testBackend(t, obj, nil) -} - -func testBackendWithHandlers(t *testing.T, handlers map[string]func(http.ResponseWriter, *http.Request)) (*Cloud, func()) { - obj := cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.NullVal(cty.String), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal(testBackendSingleWorkspaceName), - "tags": cty.NullVal(cty.Set(cty.String)), - }), - }) - return testBackend(t, obj, handlers) -} - -func testCloudState(t *testing.T) *State { - b, bCleanup := testBackendWithName(t) - defer bCleanup() - - raw, err := b.StateMgr(testBackendSingleWorkspaceName) - if err != nil { - t.Fatalf("error: %v", err) - } - - return raw.(*State) -} - -func testBackendWithOutputs(t *testing.T) (*Cloud, func()) { - b, cleanup := testBackendWithName(t) - - // Get a new mock client to use for adding outputs - mc := NewMockClient() - - mc.StateVersionOutputs.create("svo-abcd", &tfe.StateVersionOutput{ - ID: "svo-abcd", - Value: "foobar", - Sensitive: true, - Type: "string", - Name: "sensitive_output", - DetailedType: "string", - }) - - mc.StateVersionOutputs.create("svo-zyxw", &tfe.StateVersionOutput{ - ID: "svo-zyxw", - Value: "bazqux", - Type: "string", - Name: "nonsensitive_output", - DetailedType: "string", - }) - - var dt interface{} - var val interface{} - err := json.Unmarshal([]byte(`["object", {"foo":"string"}]`), &dt) - if err != nil { - t.Fatalf("could not unmarshal detailed type: %s", err) - } - err = json.Unmarshal([]byte(`{"foo":"bar"}`), &val) - if err != nil { - t.Fatalf("could not unmarshal value: %s", err) - } - mc.StateVersionOutputs.create("svo-efgh", &tfe.StateVersionOutput{ - ID: "svo-efgh", - Value: val, - Type: "object", - Name: "object_output", - DetailedType: dt, - }) - - err = json.Unmarshal([]byte(`["list", "bool"]`), &dt) - if err != nil { - t.Fatalf("could not unmarshal detailed type: %s", err) - } - err = json.Unmarshal([]byte(`[true, false, true, true]`), &val) - if err != nil { - t.Fatalf("could not unmarshal value: %s", err) - } - mc.StateVersionOutputs.create("svo-ijkl", &tfe.StateVersionOutput{ - ID: "svo-ijkl", - Value: val, - Type: "array", - Name: "list_output", - DetailedType: dt, - }) - - b.client.StateVersionOutputs = mc.StateVersionOutputs - - return b, cleanup -} - -func testBackend(t *testing.T, obj cty.Value, handlers map[string]func(http.ResponseWriter, *http.Request)) (*Cloud, func()) { - var s *httptest.Server - if handlers != nil { - s = testServerWithHandlers(handlers) - } else { - s = testServer(t) - } - b := New(testDisco(s)) - - // Configure the backend so the client is created. - newObj, valDiags := b.PrepareConfig(obj) - if len(valDiags) != 0 { - t.Fatalf("testBackend: backend.PrepareConfig() failed: %s", valDiags.ErrWithWarnings()) - } - obj = newObj - - confDiags := b.Configure(obj) - if len(confDiags) != 0 { - t.Fatalf("testBackend: backend.Configure() failed: %s", confDiags.ErrWithWarnings()) - } - - // Get a new mock client. - mc := NewMockClient() - - // Replace the services we use with our mock services. - b.CLI = cli.NewMockUi() - b.client.Applies = mc.Applies - b.client.ConfigurationVersions = mc.ConfigurationVersions - b.client.CostEstimates = mc.CostEstimates - b.client.Organizations = mc.Organizations - b.client.Plans = mc.Plans - b.client.TaskStages = mc.TaskStages - b.client.PolicySetOutcomes = mc.PolicySetOutcomes - b.client.PolicyChecks = mc.PolicyChecks - b.client.Runs = mc.Runs - b.client.StateVersions = mc.StateVersions - b.client.StateVersionOutputs = mc.StateVersionOutputs - b.client.Variables = mc.Variables - b.client.Workspaces = mc.Workspaces - - // Set local to a local test backend. - b.local = testLocalBackend(t, b) - b.input = true - - baseURL, err := url.Parse("https://app.terraform.io") - if err != nil { - t.Fatalf("testBackend: failed to parse base URL for client") - } - baseURL.Path = "/api/v2/" - - readRedactedPlan = func(ctx context.Context, baseURL url.URL, token, planID string) (*jsonformat.Plan, error) { - return mc.RedactedPlans.Read(ctx, baseURL.Hostname(), token, planID) - } - - ctx := context.Background() - - // Create the organization. - _, err = b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ - Name: tfe.String(b.organization), - }) - if err != nil { - t.Fatalf("error: %v", err) - } - - // Create the default workspace if required. - if b.WorkspaceMapping.Name != "" { - _, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{ - Name: tfe.String(b.WorkspaceMapping.Name), - }) - if err != nil { - t.Fatalf("error: %v", err) - } - } - - return b, s.Close -} - -// testUnconfiguredBackend is used for testing the configuration of the backend -// with the mock client -func testUnconfiguredBackend(t *testing.T) (*Cloud, func()) { - s := testServer(t) - b := New(testDisco(s)) - - // Normally, the client is created during configuration, but the configuration uses the - // client to read entitlements. - var err error - b.client, err = tfe.NewClient(&tfe.Config{ - Token: "fake-token", - }) - if err != nil { - t.Fatal(err) - } - - // Get a new mock client. - mc := NewMockClient() - - // Replace the services we use with our mock services. - b.CLI = cli.NewMockUi() - b.client.Applies = mc.Applies - b.client.ConfigurationVersions = mc.ConfigurationVersions - b.client.CostEstimates = mc.CostEstimates - b.client.Organizations = mc.Organizations - b.client.Plans = mc.Plans - b.client.PolicySetOutcomes = mc.PolicySetOutcomes - b.client.PolicyChecks = mc.PolicyChecks - b.client.Runs = mc.Runs - b.client.StateVersions = mc.StateVersions - b.client.Variables = mc.Variables - b.client.Workspaces = mc.Workspaces - - baseURL, err := url.Parse("https://app.terraform.io") - if err != nil { - t.Fatalf("testBackend: failed to parse base URL for client") - } - baseURL.Path = "/api/v2/" - - readRedactedPlan = func(ctx context.Context, baseURL url.URL, token, planID string) (*jsonformat.Plan, error) { - return mc.RedactedPlans.Read(ctx, baseURL.Hostname(), token, planID) - } - - // Set local to a local test backend. - b.local = testLocalBackend(t, b) - - return b, s.Close -} - -func testLocalBackend(t *testing.T, cloud *Cloud) backend.Enhanced { - b := backendLocal.NewWithBackend(cloud) - - // Add a test provider to the local backend. - p := backendLocal.TestLocalProvider(t, b, "null", &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "null_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - })} - - return b -} - -// testServer returns a started *httptest.Server used for local testing with the default set of -// request handlers. -func testServer(t *testing.T) *httptest.Server { - return testServerWithHandlers(testDefaultRequestHandlers) -} - -// testServerWithHandlers returns a started *httptest.Server with the given set of request handlers -// overriding any default request handlers (testDefaultRequestHandlers). -func testServerWithHandlers(handlers map[string]func(http.ResponseWriter, *http.Request)) *httptest.Server { - mux := http.NewServeMux() - for route, handler := range handlers { - mux.HandleFunc(route, handler) - } - for route, handler := range testDefaultRequestHandlers { - if handlers[route] == nil { - mux.HandleFunc(route, handler) - } - } - - return httptest.NewServer(mux) -} - -// testDefaultRequestHandlers is a map of request handlers intended to be used in a request -// multiplexer for a test server. A caller may use testServerWithHandlers to start a server with -// this base set of routes, and override a particular route for whatever edge case is being tested. -var testDefaultRequestHandlers = map[string]func(http.ResponseWriter, *http.Request){ - // Respond to service discovery calls. - "/well-known/terraform.json": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - io.WriteString(w, `{ - "tfe.v2": "/api/v2/", -}`) - }, - - // Respond to service version constraints calls. - "/v1/versions/": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - io.WriteString(w, fmt.Sprintf(`{ - "service": "%s", - "product": "terraform", - "minimum": "0.1.0", - "maximum": "10.0.0" -}`, path.Base(r.URL.Path))) - }, - - // Respond to pings to get the API version header. - "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("TFP-API-Version", "2.5") - }, - - // Respond to the initial query to read the hashicorp org entitlements. - "/api/v2/organizations/hashicorp/entitlement-set": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.api+json") - io.WriteString(w, `{ - "data": { - "id": "org-GExadygjSbKP8hsY", - "type": "entitlement-sets", - "attributes": { - "operations": true, - "private-module-registry": true, - "sentinel": true, - "state-storage": true, - "teams": true, - "vcs-integrations": true - } - } -}`) - }, - - // Respond to the initial query to read the no-operations org entitlements. - "/api/v2/organizations/no-operations/entitlement-set": func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.api+json") - io.WriteString(w, `{ - "data": { - "id": "org-ufxa3y8jSbKP8hsT", - "type": "entitlement-sets", - "attributes": { - "operations": false, - "private-module-registry": true, - "sentinel": true, - "state-storage": true, - "teams": true, - "vcs-integrations": true - } - } -}`) - }, - - // All tests that are assumed to pass will use the hashicorp organization, - // so for all other organization requests we will return a 404. - "/api/v2/organizations/": func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(404) - io.WriteString(w, `{ - "errors": [ - { - "status": "404", - "title": "not found" - } - ] -}`) - }, -} - -func mockColorize() *colorstring.Colorize { - colors := make(map[string]string) - for k, v := range colorstring.DefaultColors { - colors[k] = v - } - colors["purple"] = "38;5;57" - - return &colorstring.Colorize{ - Colors: colors, - Disable: false, - Reset: true, - } -} - -func mockSROWorkspace(t *testing.T, b *Cloud, workspaceName string) { - _, err := b.client.Workspaces.Update(context.Background(), "hashicorp", workspaceName, tfe.WorkspaceUpdateOptions{ - StructuredRunOutputEnabled: tfe.Bool(true), - TerraformVersion: tfe.String("1.4.0"), - }) - if err != nil { - t.Fatalf("Error enabling SRO on workspace %s: %v", workspaceName, err) - } -} - -// testDisco returns a *disco.Disco mapping app.terraform.io and -// localhost to a local test server. -func testDisco(s *httptest.Server) *disco.Disco { - services := map[string]interface{}{ - "tfe.v2": fmt.Sprintf("%s/api/v2/", s.URL), - } - d := disco.NewWithCredentialsSource(credsSrc) - d.SetUserAgent(httpclient.TerraformUserAgent(version.String())) - - d.ForceHostServices(svchost.Hostname(defaultHostname), services) - d.ForceHostServices(svchost.Hostname("localhost"), services) - d.ForceHostServices(svchost.Hostname("nontfe.local"), nil) - return d -} - -type unparsedVariableValue struct { - value string - source terraform.ValueSourceType -} - -func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { - return &terraform.InputValue{ - Value: cty.StringVal(v.value), - SourceType: v.source, - }, tfdiags.Diagnostics{} -} - -// testVariable returns a backend.UnparsedVariableValue used for testing. -func testVariables(s terraform.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue { - vars := make(map[string]backend.UnparsedVariableValue, len(vs)) - for _, v := range vs { - vars[v] = &unparsedVariableValue{ - value: v, - source: s, - } - } - return vars -} diff --git a/internal/command/apply.go b/internal/command/apply.go deleted file mode 100644 index 072709472afe..000000000000 --- a/internal/command/apply.go +++ /dev/null @@ -1,393 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ApplyCommand is a Command implementation that applies a Terraform -// configuration and actually builds or changes infrastructure. -type ApplyCommand struct { - Meta - - // If true, then this apply command will become the "destroy" - // command. It is just like apply but only processes a destroy. - Destroy bool -} - -func (c *ApplyCommand) Run(rawArgs []string) int { - var diags tfdiags.Diagnostics - - // Parse and apply global view arguments - common, rawArgs := arguments.ParseView(rawArgs) - c.View.Configure(common) - - // Propagate -no-color for legacy use of Ui. The remote backend and - // cloud package use this; it should be removed when/if they are - // migrated to views. - c.Meta.color = !common.NoColor - c.Meta.Color = c.Meta.color - - // Parse and validate flags - var args *arguments.Apply - switch { - case c.Destroy: - args, diags = arguments.ParseApplyDestroy(rawArgs) - default: - args, diags = arguments.ParseApply(rawArgs) - } - - // Instantiate the view, even if there are flag errors, so that we render - // diagnostics according to the desired view - view := views.NewApply(args.ViewType, c.Destroy, c.View) - - if diags.HasErrors() { - view.Diagnostics(diags) - view.HelpPrompt() - return 1 - } - - // Check for user-supplied plugin path - var err error - if c.pluginPath, err = c.loadPluginPath(); err != nil { - diags = diags.Append(err) - view.Diagnostics(diags) - return 1 - } - - // Attempt to load the plan file, if specified - planFile, diags := c.LoadPlanFile(args.PlanPath) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Check for invalid combination of plan file and variable overrides - if planFile != nil && !args.Vars.Empty() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Can't set variables when applying a saved plan", - "The -var and -var-file options cannot be used when applying a saved plan file, because a saved plan includes the variable values that were set when it was created.", - )) - view.Diagnostics(diags) - return 1 - } - - // FIXME: the -input flag value is needed to initialize the backend and the - // operation, but there is no clear path to pass this value down, so we - // continue to mutate the Meta object state for now. - c.Meta.input = args.InputEnabled - - // FIXME: the -parallelism flag is used to control the concurrency of - // Terraform operations. At the moment, this value is used both to - // initialize the backend via the ContextOpts field inside CLIOpts, and to - // set a largely unused field on the Operation request. Again, there is no - // clear path to pass this value down, so we continue to mutate the Meta - // object state for now. - c.Meta.parallelism = args.Operation.Parallelism - - // Prepare the backend, passing the plan file if present, and the - // backend-specific arguments - be, beDiags := c.PrepareBackend(planFile, args.State, args.ViewType) - diags = diags.Append(beDiags) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Build the operation request - opReq, opDiags := c.OperationRequest(be, view, args.ViewType, planFile, args.Operation, args.AutoApprove) - diags = diags.Append(opDiags) - - // Collect variable value and add them to the operation request - diags = diags.Append(c.GatherVariables(opReq, args.Vars)) - - // Before we delegate to the backend, we'll print any warning diagnostics - // we've accumulated here, since the backend will start fresh with its own - // diagnostics. - view.Diagnostics(diags) - if diags.HasErrors() { - return 1 - } - diags = nil - - // Run the operation - op, err := c.RunOperation(be, opReq) - if err != nil { - diags = diags.Append(err) - view.Diagnostics(diags) - return 1 - } - - if op.Result != backend.OperationSuccess { - return op.Result.ExitStatus() - } - - // Render the resource count and outputs, unless those counts are being - // rendered already in a remote Terraform process. - if rb, isRemoteBackend := be.(BackendWithRemoteTerraformVersion); !isRemoteBackend || rb.IsLocalOperations() { - view.ResourceCount(args.State.StateOutPath) - if !c.Destroy && op.State != nil { - view.Outputs(op.State.RootModule().OutputValues) - } - } - - view.Diagnostics(diags) - - if diags.HasErrors() { - return 1 - } - - return 0 -} - -func (c *ApplyCommand) LoadPlanFile(path string) (*planfile.Reader, tfdiags.Diagnostics) { - var planFile *planfile.Reader - var diags tfdiags.Diagnostics - - // Try to load plan if path is specified - if path != "" { - var err error - planFile, err = c.PlanFile(path) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("Failed to load %q as a plan file", path), - fmt.Sprintf("Error: %s", err), - )) - return nil, diags - } - - // If the path doesn't look like a plan, both planFile and err will be - // nil. In that case, the user is probably trying to use the positional - // argument to specify a configuration path. Point them at -chdir. - if planFile == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("Failed to load %q as a plan file", path), - "The specified path is a directory, not a plan file. You can use the global -chdir flag to use this directory as the configuration root.", - )) - return nil, diags - } - - // If we successfully loaded a plan but this is a destroy operation, - // explain that this is not supported. - if c.Destroy { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Destroy can't be called with a plan file", - fmt.Sprintf("If this plan was created using plan -destroy, apply it using:\n terraform apply %q", path), - )) - return nil, diags - } - } - - return planFile, diags -} - -func (c *ApplyCommand) PrepareBackend(planFile *planfile.Reader, args *arguments.State, viewType arguments.ViewType) (backend.Enhanced, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // FIXME: we need to apply the state arguments to the meta object here - // because they are later used when initializing the backend. Carving a - // path to pass these arguments to the functions that need them is - // difficult but would make their use easier to understand. - c.Meta.applyStateArguments(args) - - // Load the backend - var be backend.Enhanced - var beDiags tfdiags.Diagnostics - if planFile == nil { - backendConfig, configDiags := c.loadBackendConfig(".") - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags - } - - be, beDiags = c.Backend(&BackendOpts{ - Config: backendConfig, - ViewType: viewType, - }) - } else { - plan, err := planFile.ReadPlan() - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read plan from plan file", - fmt.Sprintf("Cannot read the plan from the given plan file: %s.", err), - )) - return nil, diags - } - if plan.Backend.Config == nil { - // Should never happen; always indicates a bug in the creation of the plan file - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read plan from plan file", - "The given plan file does not have a valid backend configuration. This is a bug in the Terraform command that generated this plan file.", - )) - return nil, diags - } - be, beDiags = c.BackendForPlan(plan.Backend) - } - - diags = diags.Append(beDiags) - if beDiags.HasErrors() { - return nil, diags - } - return be, diags -} - -func (c *ApplyCommand) OperationRequest( - be backend.Enhanced, - view views.Apply, - viewType arguments.ViewType, - planFile *planfile.Reader, - args *arguments.Operation, - autoApprove bool, -) (*backend.Operation, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Applying changes with dev overrides in effect could make it impossible - // to switch back to a release version if the schema isn't compatible, - // so we'll warn about it. - diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) - - // Build the operation - opReq := c.Operation(be, viewType) - opReq.AutoApprove = autoApprove - opReq.ConfigDir = "." - opReq.PlanMode = args.PlanMode - opReq.Hooks = view.Hooks() - opReq.PlanFile = planFile - opReq.PlanRefresh = args.Refresh - opReq.Targets = args.Targets - opReq.ForceReplace = args.ForceReplace - opReq.Type = backend.OperationTypeApply - opReq.View = view.Operation() - - var err error - opReq.ConfigLoader, err = c.initConfigLoader() - if err != nil { - diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %s", err)) - return nil, diags - } - - return opReq, diags -} - -func (c *ApplyCommand) GatherVariables(opReq *backend.Operation, args *arguments.Vars) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // FIXME the arguments package currently trivially gathers variable related - // arguments in a heterogenous slice, in order to minimize the number of - // code paths gathering variables during the transition to this structure. - // Once all commands that gather variables have been converted to this - // structure, we could move the variable gathering code to the arguments - // package directly, removing this shim layer. - - varArgs := args.All() - items := make([]rawFlag, len(varArgs)) - for i := range varArgs { - items[i].Name = varArgs[i].Name - items[i].Value = varArgs[i].Value - } - c.Meta.variableArgs = rawFlags{items: &items} - opReq.Variables, diags = c.collectVariableValues() - - return diags -} - -func (c *ApplyCommand) Help() string { - if c.Destroy { - return c.helpDestroy() - } - - return c.helpApply() -} - -func (c *ApplyCommand) Synopsis() string { - if c.Destroy { - return "Destroy previously-created infrastructure" - } - - return "Create or update infrastructure" -} - -func (c *ApplyCommand) helpApply() string { - helpText := ` -Usage: terraform [global options] apply [options] [PLAN] - - Creates or updates infrastructure according to Terraform configuration - files in the current directory. - - By default, Terraform will generate a new plan and present it for your - approval before taking any action. You can optionally provide a plan - file created by a previous call to "terraform plan", in which case - Terraform will take the actions described in that plan without any - confirmation prompt. - -Options: - - -auto-approve Skip interactive approval of plan before applying. - - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. - - -compact-warnings If Terraform produces any warnings that are not - accompanied by errors, show them in a more compact - form that includes only the summary messages. - - -destroy Destroy Terraform-managed infrastructure. - The command "terraform destroy" is a convenience alias - for this option. - - -lock=false Don't hold a state lock during the operation. This is - dangerous if others might concurrently run commands - against the same workspace. - - -lock-timeout=0s Duration to retry a state lock. - - -input=true Ask for input for variables if not directly set. - - -no-color If specified, output won't contain any color. - - -parallelism=n Limit the number of parallel resource operations. - Defaults to 10. - - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". - - -state-out=path Path to write state to that is different than - "-state". This can be used to preserve the old - state. - - If you don't provide a saved plan file then this command will also accept - all of the plan-customization options accepted by the terraform plan command. - For more information on those options, run: - terraform plan -help -` - return strings.TrimSpace(helpText) -} - -func (c *ApplyCommand) helpDestroy() string { - helpText := ` -Usage: terraform [global options] destroy [options] - - Destroy Terraform-managed infrastructure. - - This command is a convenience alias for: - terraform apply -destroy - - This command also accepts many of the plan-customization options accepted by - the terraform plan command. For more information on those options, run: - terraform plan -help -` - return strings.TrimSpace(helpText) -} diff --git a/internal/command/apply_test.go b/internal/command/apply_test.go deleted file mode 100644 index ba75c5e84a5f..000000000000 --- a/internal/command/apply_test.go +++ /dev/null @@ -1,2232 +0,0 @@ -package command - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestApply(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - p := applyFixtureProvider() - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } -} - -func TestApply_path(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - p := applyFixtureProvider() - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-auto-approve", - testFixturePath("apply"), - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - if !strings.Contains(output.Stderr(), "-chdir") { - t.Fatal("expected command output to refer to -chdir flag, but got:", output.Stderr()) - } -} - -func TestApply_approveNo(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - defer testInputMap(t, map[string]string{ - "approve": "no", - })() - - // Do not use the NewMockUi initializer here, as we want to delay - // the call to init until after setting up the input mocks - ui := new(cli.MockUi) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - Ui: ui, - View: view, - }, - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - if got, want := output.Stdout(), "Apply cancelled"; !strings.Contains(got, want) { - t.Fatalf("expected output to include %q, but was:\n%s", want, got) - } - - if _, err := os.Stat(statePath); err == nil || !os.IsNotExist(err) { - t.Fatalf("state file should not exist") - } -} - -func TestApply_approveYes(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - p := applyFixtureProvider() - - defer testInputMap(t, map[string]string{ - "approve": "yes", - })() - - // Do not use the NewMockUi initializer here, as we want to delay - // the call to init until after setting up the input mocks - ui := new(cli.MockUi) - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - Ui: ui, - View: view, - }, - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } -} - -// test apply with locked state -func TestApply_lockedState(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - unlock, err := testLockState(t, testDataDir, statePath) - if err != nil { - t.Fatal(err) - } - defer unlock() - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code == 0 { - t.Fatal("expected error") - } - - if !strings.Contains(output.Stderr(), "lock") { - t.Fatal("command output does not look like a lock error:", output.Stderr()) - } -} - -// test apply with locked state, waiting for unlock -func TestApply_lockedStateWait(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - unlock, err := testLockState(t, testDataDir, statePath) - if err != nil { - t.Fatal(err) - } - - // unlock during apply - go func() { - time.Sleep(500 * time.Millisecond) - unlock() - }() - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - // wait 4s just in case the lock process doesn't release in under a second, - // and we want our context to be alive for a second retry at the 3s mark. - args := []string{ - "-state", statePath, - "-lock-timeout", "4s", - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("lock should have succeeded in less than 3s: %s", output.Stderr()) - } -} - -// Verify that the parallelism flag allows no more than the desired number of -// concurrent calls to ApplyResourceChange. -func TestApply_parallelism(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("parallelism"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - par := 4 - - // started is a semaphore that we use to ensure that we never have more - // than "par" apply operations happening concurrently - started := make(chan struct{}, par) - - // beginCtx is used as a starting gate to hold back ApplyResourceChange - // calls until we reach the desired concurrency. The cancel func "begin" is - // called once we reach the desired concurrency, allowing all apply calls - // to proceed in unison. - beginCtx, begin := context.WithCancel(context.Background()) - - // Since our mock provider has its own mutex preventing concurrent calls - // to ApplyResourceChange, we need to use a number of separate providers - // here. They will all have the same mock implementation function assigned - // but crucially they will each have their own mutex. - providerFactories := map[addrs.Provider]providers.Factory{} - for i := 0; i < 10; i++ { - name := fmt.Sprintf("test%d", i) - provider := &terraform.MockProvider{} - provider.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - name + "_instance": {Block: &configschema.Block{}}, - }, - } - provider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - provider.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - - // If we ever have more than our intended parallelism number of - // apply operations running concurrently, the semaphore will fail. - select { - case started <- struct{}{}: - defer func() { - <-started - }() - default: - t.Fatal("too many concurrent apply operations") - } - - // If we never reach our intended parallelism, the context will - // never be canceled and the test will time out. - if len(started) >= par { - begin() - } - <-beginCtx.Done() - - // do some "work" - // Not required for correctness, but makes it easier to spot a - // failure when there is more overlap. - time.Sleep(10 * time.Millisecond) - - return providers.ApplyResourceChangeResponse{ - NewState: cty.EmptyObjectVal, - } - } - providerFactories[addrs.NewDefaultProvider(name)] = providers.FactoryFixed(provider) - } - testingOverrides := &testingOverrides{ - Providers: providerFactories, - } - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: testingOverrides, - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - fmt.Sprintf("-parallelism=%d", par), - } - - res := c.Run(args) - output := done(t) - if res != 0 { - t.Fatal(output.Stdout()) - } -} - -func TestApply_configInvalid(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-config-invalid"), td) - defer testChdir(t, td)() - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", testTempFile(t), - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: \n%s", output.Stdout()) - } -} - -func TestApply_defaultState(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := filepath.Join(td, DefaultStateFilename) - - // Change to the temporary directory - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(filepath.Dir(statePath)); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - // create an existing state file - localState := statemgr.NewFilesystem(statePath) - if err := localState.WriteState(states.NewState()); err != nil { - t.Fatal(err) - } - - args := []string{ - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } -} - -func TestApply_error(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-error"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - var lock sync.Mutex - errored := false - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - lock.Lock() - defer lock.Unlock() - - if !errored { - errored = true - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) - } - - s := req.PlannedState.AsValueMap() - s["id"] = cty.StringVal("foo") - - resp.NewState = cty.ObjectVal(s) - return - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - s := req.ProposedNewState.AsValueMap() - s["id"] = cty.UnknownVal(cty.String) - resp.PlannedState = cty.ObjectVal(s) - return - } - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "error": {Type: cty.Bool, Optional: true}, - }, - }, - }, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("wrong exit code %d; want 1\n%s", code, output.Stdout()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } - if len(state.RootModule().Resources) == 0 { - t.Fatal("no resources in state") - } -} - -func TestApply_input(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-input"), td) - defer testChdir(t, td)() - - // Disable test mode so input would be asked - test = false - defer func() { test = true }() - - // The configuration for this test includes a declaration of variable - // "foo" with no default, and we don't set it on the command line below, - // so the apply command will produce an interactive prompt for the - // value of var.foo. We'll answer "foo" here, and we expect the output - // value "result" to echo that back to us below. - defaultInputReader = bytes.NewBufferString("foo\n") - defaultInputWriter = new(bytes.Buffer) - - statePath := testTempFile(t) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - expected := strings.TrimSpace(` - -Outputs: - -result = foo - `) - testStateOutput(t, statePath, expected) -} - -// When only a partial set of the variables are set, Terraform -// should still ask for the unset ones by default (with -input=true) -func TestApply_inputPartial(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-input-partial"), td) - defer testChdir(t, td)() - - // Disable test mode so input would be asked - test = false - defer func() { test = true }() - - // Set some default reader/writers for the inputs - defaultInputReader = bytes.NewBufferString("one\ntwo\n") - defaultInputWriter = new(bytes.Buffer) - - statePath := testTempFile(t) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - "-var", "foo=foovalue", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - expected := strings.TrimSpace(` - -Outputs: - -bar = one -foo = foovalue - `) - testStateOutput(t, statePath, expected) -} - -func TestApply_noArgs(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } -} - -func TestApply_plan(t *testing.T) { - // Disable test mode so input would be asked - test = false - defer func() { test = true }() - - // Set some default reader/writers for the inputs - defaultInputReader = new(bytes.Buffer) - defaultInputWriter = new(bytes.Buffer) - - planPath := applyFixturePlanFile(t) - statePath := testTempFile(t) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state-out", statePath, - planPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } -} - -func TestApply_plan_backup(t *testing.T) { - statePath := testTempFile(t) - backupPath := testTempFile(t) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - // create a state file that needs to be backed up - fs := statemgr.NewFilesystem(statePath) - fs.StateSnapshotMeta() - err := fs.WriteState(states.NewState()) - if err != nil { - t.Fatal(err) - } - - // the plan file must contain the metadata from the prior state to be - // backed up - planPath := applyFixturePlanFileMatchState(t, fs.StateSnapshotMeta()) - - args := []string{ - "-state", statePath, - "-backup", backupPath, - planPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Should have a backup file - testStateRead(t, backupPath) -} - -func TestApply_plan_noBackup(t *testing.T) { - planPath := applyFixturePlanFile(t) - statePath := testTempFile(t) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state-out", statePath, - "-backup", "-", - planPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Ensure there is no backup - _, err := os.Stat(statePath + DefaultBackupExtension) - if err == nil || !os.IsNotExist(err) { - t.Fatalf("backup should not exist") - } - - // Ensure there is no literal "-" - _, err = os.Stat("-") - if err == nil || !os.IsNotExist(err) { - t.Fatalf("backup should not exist") - } -} - -func TestApply_plan_remoteState(t *testing.T) { - // Disable test mode so input would be asked - test = false - defer func() { test = true }() - tmp := testCwd(t) - remoteStatePath := filepath.Join(tmp, DefaultDataDir, DefaultStateFilename) - if err := os.MkdirAll(filepath.Dir(remoteStatePath), 0755); err != nil { - t.Fatalf("err: %s", err) - } - - // Set some default reader/writers for the inputs - defaultInputReader = new(bytes.Buffer) - defaultInputWriter = new(bytes.Buffer) - - // Create a remote state - state := testState() - _, srv := testRemoteState(t, state, 200) - defer srv.Close() - - _, snap := testModuleWithSnapshot(t, "apply") - backendConfig := cty.ObjectVal(map[string]cty.Value{ - "address": cty.StringVal(srv.URL), - "update_method": cty.NullVal(cty.String), - "lock_address": cty.NullVal(cty.String), - "unlock_address": cty.NullVal(cty.String), - "lock_method": cty.NullVal(cty.String), - "unlock_method": cty.NullVal(cty.String), - "username": cty.NullVal(cty.String), - "password": cty.NullVal(cty.String), - "skip_cert_verification": cty.NullVal(cty.Bool), - "retry_max": cty.NullVal(cty.String), - "retry_wait_min": cty.NullVal(cty.String), - "retry_wait_max": cty.NullVal(cty.String), - "client_ca_certificate_pem": cty.NullVal(cty.String), - "client_certificate_pem": cty.NullVal(cty.String), - "client_private_key_pem": cty.NullVal(cty.String), - }) - backendConfigRaw, err := plans.NewDynamicValue(backendConfig, backendConfig.Type()) - if err != nil { - t.Fatal(err) - } - planPath := testPlanFile(t, snap, state, &plans.Plan{ - Backend: plans.Backend{ - Type: "http", - Config: backendConfigRaw, - }, - Changes: plans.NewChanges(), - }) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - planPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // State file should be not be installed - if _, err := os.Stat(filepath.Join(tmp, DefaultStateFilename)); err == nil { - data, _ := ioutil.ReadFile(DefaultStateFilename) - t.Fatalf("State path should not exist: %s", string(data)) - } - - // Check that there is no remote state config - if src, err := ioutil.ReadFile(remoteStatePath); err == nil { - t.Fatalf("has %s file; should not\n%s", remoteStatePath, src) - } -} - -func TestApply_planWithVarFile(t *testing.T) { - varFileDir := testTempDir(t) - varFilePath := filepath.Join(varFileDir, "terraform.tfvars") - if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - planPath := applyFixturePlanFile(t) - statePath := testTempFile(t) - - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(varFileDir); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state-out", statePath, - planPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } -} - -func TestApply_planVars(t *testing.T) { - planPath := applyFixturePlanFile(t) - statePath := testTempFile(t) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-var", "foo=bar", - planPath, - } - code := c.Run(args) - output := done(t) - if code == 0 { - t.Fatal("should've failed: ", output.Stdout()) - } -} - -// we should be able to apply a plan file with no other file dependencies -func TestApply_planNoModuleFiles(t *testing.T) { - // temporary data directory which we can remove between commands - td := testTempDir(t) - defer os.RemoveAll(td) - - defer testChdir(t, td)() - - p := applyFixtureProvider() - planPath := applyFixturePlanFile(t) - view, done := testView(t) - apply := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - Ui: new(cli.MockUi), - View: view, - }, - } - args := []string{ - planPath, - } - apply.Run(args) - done(t) -} - -func TestApply_refresh(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"ami":"bar"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - statePath := testStateFile(t, originalState) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !p.ReadResourceCalled { - t.Fatal("should call ReadResource") - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } - - // Should have a backup file - backupState := testStateRead(t, statePath+DefaultBackupExtension) - - actualStr := strings.TrimSpace(backupState.String()) - expectedStr := strings.TrimSpace(originalState.String()) - if actualStr != expectedStr { - t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) - } -} - -func TestApply_refreshFalse(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"ami":"bar"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - statePath := testStateFile(t, originalState) - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - "-refresh=false", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if p.ReadResourceCalled { - t.Fatal("should not call ReadResource when refresh=false") - } -} -func TestApply_shutdown(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-shutdown"), td) - defer testChdir(t, td)() - - cancelled := make(chan struct{}) - shutdownCh := make(chan struct{}) - - statePath := testTempFile(t) - p := testProvider() - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - ShutdownCh: shutdownCh, - }, - } - - p.StopFn = func() error { - close(cancelled) - return nil - } - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - return - } - - var once sync.Once - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - // only cancel once - once.Do(func() { - shutdownCh <- struct{}{} - }) - - // Because of the internal lock in the MockProvider, we can't - // coordiante directly with the calling of Stop, and making the - // MockProvider concurrent is disruptive to a lot of existing tests. - // Wait here a moment to help make sure the main goroutine gets to the - // Stop call before we exit, or the plan may finish before it can be - // canceled. - time.Sleep(200 * time.Millisecond) - - resp.NewState = req.PlannedState - return - } - - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - select { - case <-cancelled: - default: - t.Fatal("command not cancelled") - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } -} - -func TestApply_state(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"ami":"foo"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - statePath := testStateFile(t, originalState) - - p := applyFixtureProvider() - p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "ami": cty.StringVal("bar"), - }), - } - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "ami": cty.StringVal("bar"), - }), - } - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - // Run the apply command pointing to our existing state - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Verify that the provider was called with the existing state - actual := p.PlanResourceChangeRequest.PriorState - expected := cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.StringVal("foo"), - }) - if !expected.RawEquals(actual) { - t.Fatalf("wrong prior state during plan\ngot: %#v\nwant: %#v", actual, expected) - } - - actual = p.ApplyResourceChangeRequest.PriorState - expected = cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.StringVal("foo"), - }) - if !expected.RawEquals(actual) { - t.Fatalf("wrong prior state during apply\ngot: %#v\nwant: %#v", actual, expected) - } - - // Verify a new state exists - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } - - backupState := testStateRead(t, statePath+DefaultBackupExtension) - - actualStr := strings.TrimSpace(backupState.String()) - expectedStr := strings.TrimSpace(originalState.String()) - if actualStr != expectedStr { - t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) - } -} - -func TestApply_stateNoExist(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - p := applyFixtureProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "idontexist.tfstate", - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: \n%s", output.Stdout()) - } -} - -func TestApply_sensitiveOutput(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-sensitive-output"), td) - defer testChdir(t, td)() - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - statePath := testTempFile(t) - - args := []string{ - "-state", statePath, - "-auto-approve", - } - - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stdout()) - } - - stdout := output.Stdout() - if !strings.Contains(stdout, "notsensitive = \"Hello world\"") { - t.Fatalf("bad: output should contain 'notsensitive' output\n%s", stdout) - } - if !strings.Contains(stdout, "sensitive = ") { - t.Fatalf("bad: output should contain 'sensitive' output\n%s", stdout) - } -} - -func TestApply_vars(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-vars"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - actual := "" - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - return providers.ApplyResourceChangeResponse{ - NewState: req.PlannedState, - } - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - actual = req.ProposedNewState.GetAttr("value").AsString() - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - args := []string{ - "-auto-approve", - "-var", "foo=bar", - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if actual != "bar" { - t.Fatal("didn't work") - } -} - -func TestApply_varFile(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-vars"), td) - defer testChdir(t, td)() - - varFilePath := testTempFile(t) - if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - statePath := testTempFile(t) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - actual := "" - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - return providers.ApplyResourceChangeResponse{ - NewState: req.PlannedState, - } - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - actual = req.ProposedNewState.GetAttr("value").AsString() - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - args := []string{ - "-auto-approve", - "-var-file", varFilePath, - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if actual != "bar" { - t.Fatal("didn't work") - } -} - -func TestApply_varFileDefault(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-vars"), td) - defer testChdir(t, td)() - - varFilePath := filepath.Join(td, "terraform.tfvars") - if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - statePath := testTempFile(t) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - actual := "" - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - return providers.ApplyResourceChangeResponse{ - NewState: req.PlannedState, - } - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - actual = req.ProposedNewState.GetAttr("value").AsString() - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - args := []string{ - "-auto-approve", - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if actual != "bar" { - t.Fatal("didn't work") - } -} - -func TestApply_varFileDefaultJSON(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-vars"), td) - defer testChdir(t, td)() - - varFilePath := filepath.Join(td, "terraform.tfvars.json") - if err := ioutil.WriteFile(varFilePath, []byte(applyVarFileJSON), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - statePath := testTempFile(t) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - actual := "" - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - return providers.ApplyResourceChangeResponse{ - NewState: req.PlannedState, - } - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - actual = req.ProposedNewState.GetAttr("value").AsString() - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - args := []string{ - "-auto-approve", - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if actual != "bar" { - t.Fatal("didn't work") - } -} - -func TestApply_backup(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte("{\n \"id\": \"bar\"\n }"), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - statePath := testStateFile(t, originalState) - backupPath := testTempFile(t) - - p := applyFixtureProvider() - p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "ami": cty.StringVal("bar"), - }), - } - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - // Run the apply command pointing to our existing state - args := []string{ - "-auto-approve", - "-state", statePath, - "-backup", backupPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Verify a new state exists - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } - - backupState := testStateRead(t, backupPath) - - actual := backupState.RootModule().Resources["test_instance.foo"] - expected := originalState.RootModule().Resources["test_instance.foo"] - if !cmp.Equal(actual, expected, cmpopts.EquateEmpty()) { - t.Fatalf( - "wrong aws_instance.foo state\n%s", - cmp.Diff(expected, actual, cmp.Transformer("bytesAsString", func(b []byte) string { - return string(b) - })), - ) - } -} - -func TestApply_disableBackup(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - originalState := testState() - statePath := testStateFile(t, originalState) - - p := applyFixtureProvider() - p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "ami": cty.StringVal("bar"), - }), - } - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - // Run the apply command pointing to our existing state - args := []string{ - "-auto-approve", - "-state", statePath, - "-backup", "-", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Verify that the provider was called with the existing state - actual := p.PlanResourceChangeRequest.PriorState - expected := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "ami": cty.NullVal(cty.String), - }) - if !expected.RawEquals(actual) { - t.Fatalf("wrong prior state during plan\ngot: %#v\nwant: %#v", actual, expected) - } - - actual = p.ApplyResourceChangeRequest.PriorState - expected = cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "ami": cty.NullVal(cty.String), - }) - if !expected.RawEquals(actual) { - t.Fatalf("wrong prior state during apply\ngot: %#v\nwant: %#v", actual, expected) - } - - // Verify a new state exists - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } - - // Ensure there is no backup - _, err := os.Stat(statePath + DefaultBackupExtension) - if err == nil || !os.IsNotExist(err) { - t.Fatalf("backup should not exist") - } - - // Ensure there is no literal "-" - _, err = os.Stat("-") - if err == nil || !os.IsNotExist(err) { - t.Fatalf("backup should not exist") - } -} - -// Test that the Terraform env is passed through -func TestApply_terraformEnv(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-terraform-env"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-auto-approve", - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - expected := strings.TrimSpace(` - -Outputs: - -output = default - `) - testStateOutput(t, statePath, expected) -} - -// Test that the Terraform env is passed through -func TestApply_terraformEnvNonDefault(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-terraform-env"), td) - defer testChdir(t, td)() - - // Create new env - { - ui := new(cli.MockUi) - newCmd := &WorkspaceNewCommand{ - Meta: Meta{ - Ui: ui, - }, - } - if code := newCmd.Run([]string{"test"}); code != 0 { - t.Fatal("error creating workspace") - } - } - - // Switch to it - { - args := []string{"test"} - ui := new(cli.MockUi) - selCmd := &WorkspaceSelectCommand{ - Meta: Meta{ - Ui: ui, - }, - } - if code := selCmd.Run(args); code != 0 { - t.Fatal("error switching workspace") - } - } - - p := testProvider() - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - statePath := filepath.Join("terraform.tfstate.d", "test", "terraform.tfstate") - expected := strings.TrimSpace(` - -Outputs: - -output = test - `) - testStateOutput(t, statePath, expected) -} - -// Config with multiple resources, targeting apply of a subset -func TestApply_targeted(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-targeted"), td) - defer testChdir(t, td)() - - p := testProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-auto-approve", - "-target", "test_instance.foo", - "-target", "test_instance.baz", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if got, want := output.Stdout(), "3 added, 0 changed, 0 destroyed"; !strings.Contains(got, want) { - t.Fatalf("bad change summary, want %q, got:\n%s", want, got) - } -} - -// Diagnostics for invalid -target flags -func TestApply_targetFlagsDiags(t *testing.T) { - testCases := map[string]string{ - "test_instance.": "Dot must be followed by attribute name.", - "test_instance": "Resource specification must include a resource type and name.", - } - - for target, wantDiag := range testCases { - t.Run(target, func(t *testing.T) { - td := testTempDir(t) - defer os.RemoveAll(td) - defer testChdir(t, td)() - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - View: view, - }, - } - - args := []string{ - "-auto-approve", - "-target", target, - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - got := output.Stderr() - if !strings.Contains(got, target) { - t.Fatalf("bad error output, want %q, got:\n%s", target, got) - } - if !strings.Contains(got, wantDiag) { - t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) - } - }) - } -} - -func TestApply_replace(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-replace"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "a", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"hello"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - statePath := testStateFile(t, originalState) - - p := testProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - createCount := 0 - deleteCount := 0 - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - if req.PriorState.IsNull() { - createCount++ - } - if req.PlannedState.IsNull() { - deleteCount++ - } - return providers.ApplyResourceChangeResponse{ - NewState: req.PlannedState, - } - } - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-auto-approve", - "-state", statePath, - "-replace", "test_instance.a", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("wrong exit code %d\n\n%s", code, output.Stderr()) - } - - if got, want := output.Stdout(), "1 added, 0 changed, 1 destroyed"; !strings.Contains(got, want) { - t.Errorf("wrong change summary\ngot output:\n%s\n\nwant substring: %s", got, want) - } - - if got, want := createCount, 1; got != want { - t.Errorf("wrong create count %d; want %d", got, want) - } - if got, want := deleteCount, 1; got != want { - t.Errorf("wrong create count %d; want %d", got, want) - } -} - -func TestApply_pluginPath(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - p := applyFixtureProvider() - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - pluginPath := []string{"a", "b", "c"} - - if err := c.Meta.storePluginPath(pluginPath); err != nil { - t.Fatal(err) - } - c.Meta.pluginPath = nil - - args := []string{ - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !reflect.DeepEqual(pluginPath, c.Meta.pluginPath) { - t.Fatalf("expected plugin path %#v, got %#v", pluginPath, c.Meta.pluginPath) - } -} - -func TestApply_jsonGoldenReference(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - statePath := testTempFile(t) - - p := applyFixtureProvider() - - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-json", - "-state", statePath, - "-auto-approve", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if _, err := os.Stat(statePath); err != nil { - t.Fatalf("err: %s", err) - } - - state := testStateRead(t, statePath) - if state == nil { - t.Fatal("state should not be nil") - } - - checkGoldenReference(t, output, "apply") -} - -func TestApply_warnings(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - p := testProvider() - p.GetProviderSchemaResponse = applyFixtureSchema() - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - Diagnostics: tfdiags.Diagnostics{ - tfdiags.SimpleWarning("warning 1"), - tfdiags.SimpleWarning("warning 2"), - }, - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - return providers.ApplyResourceChangeResponse{ - NewState: cty.UnknownAsNull(req.PlannedState), - } - } - - t.Run("full warnings", func(t *testing.T) { - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{"-auto-approve"} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - wantWarnings := []string{ - "warning 1", - "warning 2", - } - for _, want := range wantWarnings { - if !strings.Contains(output.Stdout(), want) { - t.Errorf("missing warning %s", want) - } - } - }) - - t.Run("compact warnings", func(t *testing.T) { - view, done := testView(t) - c := &ApplyCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - code := c.Run([]string{"-auto-approve", "-compact-warnings"}) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - // the output should contain 2 warnings and a message about -compact-warnings - wantWarnings := []string{ - "warning 1", - "warning 2", - "To see the full warning notes, run Terraform without -compact-warnings.", - } - for _, want := range wantWarnings { - if !strings.Contains(output.Stdout(), want) { - t.Errorf("missing warning %s", want) - } - } - }) -} - -// applyFixtureSchema returns a schema suitable for processing the -// configuration in testdata/apply . This schema should be -// assigned to a mock provider named "test". -func applyFixtureSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } -} - -// applyFixtureProvider returns a mock provider that is configured for basic -// operation with the configuration in testdata/apply. This mock has -// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, -// with the plan/apply steps just passing through the data determined by -// Terraform Core. -func applyFixtureProvider() *terraform.MockProvider { - p := testProvider() - p.GetProviderSchemaResponse = applyFixtureSchema() - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - return providers.ApplyResourceChangeResponse{ - NewState: cty.UnknownAsNull(req.PlannedState), - } - } - return p -} - -// applyFixturePlanFile creates a plan file at a temporary location containing -// a single change to create the test_instance.foo that is included in the -// "apply" test fixture, returning the location of that plan file. -func applyFixturePlanFile(t *testing.T) string { - return applyFixturePlanFileMatchState(t, statemgr.SnapshotMeta{}) -} - -// applyFixturePlanFileMatchState creates a planfile like applyFixturePlanFile, -// but inserts the state meta information if that plan must match a preexisting -// state. -func applyFixturePlanFileMatchState(t *testing.T, stateMeta statemgr.SnapshotMeta) string { - _, snap := testModuleWithSnapshot(t, "apply") - plannedVal := cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("bar"), - }) - priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) - if err != nil { - t.Fatal(err) - } - plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) - if err != nil { - t.Fatal(err) - } - plan := testPlan(t) - plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - Before: priorValRaw, - After: plannedValRaw, - }, - }) - return testPlanFileMatchState( - t, - snap, - states.NewState(), - plan, - stateMeta, - ) -} - -const applyVarFile = ` -foo = "bar" -` - -const applyVarFileJSON = ` -{ "foo": "bar" } -` diff --git a/internal/command/arguments/apply.go b/internal/command/arguments/apply.go deleted file mode 100644 index 4d2e6760555e..000000000000 --- a/internal/command/arguments/apply.go +++ /dev/null @@ -1,147 +0,0 @@ -package arguments - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Apply represents the command-line arguments for the apply command. -type Apply struct { - // State, Operation, and Vars are the common extended flags - State *State - Operation *Operation - Vars *Vars - - // AutoApprove skips the manual verification step for the apply operation. - AutoApprove bool - - // InputEnabled is used to disable interactive input for unspecified - // variable and backend config values. Default is true. - InputEnabled bool - - // PlanPath contains an optional path to a stored plan file - PlanPath string - - // ViewType specifies which output format to use - ViewType ViewType -} - -// ParseApply processes CLI arguments, returning an Apply value and errors. -// If errors are encountered, an Apply value is still returned representing -// the best effort interpretation of the arguments. -func ParseApply(args []string) (*Apply, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - apply := &Apply{ - State: &State{}, - Operation: &Operation{}, - Vars: &Vars{}, - } - - cmdFlags := extendedFlagSet("apply", apply.State, apply.Operation, apply.Vars) - cmdFlags.BoolVar(&apply.AutoApprove, "auto-approve", false, "auto-approve") - cmdFlags.BoolVar(&apply.InputEnabled, "input", true, "input") - - var json bool - cmdFlags.BoolVar(&json, "json", false, "json") - - if err := cmdFlags.Parse(args); err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - err.Error(), - )) - } - - args = cmdFlags.Args() - if len(args) > 0 { - apply.PlanPath = args[0] - args = args[1:] - } - - if len(args) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Too many command line arguments", - "Expected at most one positional argument.", - )) - } - - // JSON view currently does not support input, so we disable it here. - if json { - apply.InputEnabled = false - } - - // JSON view cannot confirm apply, so we require either a plan file or - // auto-approve to be specified. We intentionally fail here rather than - // override auto-approve, which would be dangerous. - if json && apply.PlanPath == "" && !apply.AutoApprove { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Plan file or auto-approve required", - "Terraform cannot ask for interactive approval when -json is set. You can either apply a saved plan file, or enable the -auto-approve option.", - )) - } - - diags = diags.Append(apply.Operation.Parse()) - - switch { - case json: - apply.ViewType = ViewJSON - default: - apply.ViewType = ViewHuman - } - - return apply, diags -} - -// ParseApplyDestroy is a special case of ParseApply that deals with the -// "terraform destroy" command, which is effectively an alias for -// "terraform apply -destroy". -func ParseApplyDestroy(args []string) (*Apply, tfdiags.Diagnostics) { - apply, diags := ParseApply(args) - - // So far ParseApply was using the command line options like -destroy - // and -refresh-only to determine the plan mode. For "terraform destroy" - // we expect neither of those arguments to be set, and so the plan mode - // should currently be set to NormalMode, which we'll replace with - // DestroyMode here. If it's already set to something else then that - // suggests incorrect usage. - switch apply.Operation.PlanMode { - case plans.NormalMode: - // This indicates that the user didn't specify any mode options at - // all, which is correct, although we know from the command that - // they actually intended to use DestroyMode here. - apply.Operation.PlanMode = plans.DestroyMode - case plans.DestroyMode: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid mode option", - "The -destroy option is not valid for \"terraform destroy\", because this command always runs in destroy mode.", - )) - case plans.RefreshOnlyMode: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid mode option", - "The -refresh-only option is not valid for \"terraform destroy\".", - )) - default: - // This is a non-ideal error message for if we forget to handle a - // newly-handled plan mode in Operation.Parse. Ideally they should all - // have cases above so we can produce better error messages. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid mode option", - fmt.Sprintf("The \"terraform destroy\" command doesn't support %s.", apply.Operation.PlanMode), - )) - } - - // NOTE: It's also invalid to have apply.PlanPath set in this codepath, - // but we don't check that in here because we'll return a different error - // message depending on whether the given path seems to refer to a saved - // plan file or to a configuration directory. The apply command - // implementation itself therefore handles this situation. - - return apply, diags -} diff --git a/internal/command/arguments/apply_test.go b/internal/command/arguments/apply_test.go deleted file mode 100644 index 80388338004d..000000000000 --- a/internal/command/arguments/apply_test.go +++ /dev/null @@ -1,389 +0,0 @@ -package arguments - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" -) - -func TestParseApply_basicValid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Apply - }{ - "defaults": { - nil, - &Apply{ - AutoApprove: false, - InputEnabled: true, - PlanPath: "", - ViewType: ViewHuman, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.NormalMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - "auto-approve, disabled input, and plan path": { - []string{"-auto-approve", "-input=false", "saved.tfplan"}, - &Apply{ - AutoApprove: true, - InputEnabled: false, - PlanPath: "saved.tfplan", - ViewType: ViewHuman, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.NormalMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - "destroy mode": { - []string{"-destroy"}, - &Apply{ - AutoApprove: false, - InputEnabled: true, - PlanPath: "", - ViewType: ViewHuman, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.DestroyMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - "JSON view disables input": { - []string{"-json", "-auto-approve"}, - &Apply{ - AutoApprove: true, - InputEnabled: false, - PlanPath: "", - ViewType: ViewJSON, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.NormalMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - } - - cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseApply(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { - t.Errorf("unexpected result\n%s", diff) - } - }) - } -} - -func TestParseApply_json(t *testing.T) { - testCases := map[string]struct { - args []string - wantSuccess bool - }{ - "-json": { - []string{"-json"}, - false, - }, - "-json -auto-approve": { - []string{"-json", "-auto-approve"}, - true, - }, - "-json saved.tfplan": { - []string{"-json", "saved.tfplan"}, - true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseApply(tc.args) - - if tc.wantSuccess { - if len(diags) > 0 { - t.Errorf("unexpected diags: %v", diags) - } - } else { - if got, want := diags.Err().Error(), "Plan file or auto-approve required"; !strings.Contains(got, want) { - t.Errorf("wrong diags\n got: %s\nwant: %s", got, want) - } - } - - if got.ViewType != ViewJSON { - t.Errorf("unexpected view type. got: %#v, want: %#v", got.ViewType, ViewJSON) - } - }) - } -} - -func TestParseApply_invalid(t *testing.T) { - got, diags := ParseApply([]string{"-frob"}) - if len(diags) == 0 { - t.Fatal("expected diags but got none") - } - if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) - } - if got.ViewType != ViewHuman { - t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) - } -} - -func TestParseApply_tooManyArguments(t *testing.T) { - got, diags := ParseApply([]string{"saved.tfplan", "please"}) - if len(diags) == 0 { - t.Fatal("expected diags but got none") - } - if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) - } - if got.ViewType != ViewHuman { - t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) - } -} - -func TestParseApply_targets(t *testing.T) { - foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") - boop, _ := addrs.ParseTargetStr("module.boop") - testCases := map[string]struct { - args []string - want []addrs.Targetable - wantErr string - }{ - "no targets by default": { - args: nil, - want: nil, - }, - "one target": { - args: []string{"-target=foo_bar.baz"}, - want: []addrs.Targetable{foobarbaz.Subject}, - }, - "two targets": { - args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, - want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, - }, - "invalid traversal": { - args: []string{"-target=foo."}, - want: nil, - wantErr: "Dot must be followed by attribute name", - }, - "invalid target": { - args: []string{"-target=data[0].foo"}, - want: nil, - wantErr: "A data source name is required", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseApply(tc.args) - if len(diags) > 0 { - if tc.wantErr == "" { - t.Fatalf("unexpected diags: %v", diags) - } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) - } - } - if !cmp.Equal(got.Operation.Targets, tc.want) { - t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) - } - }) - } -} - -func TestParseApply_replace(t *testing.T) { - foobarbaz, _ := addrs.ParseAbsResourceInstanceStr("foo_bar.baz") - foobarbeep, _ := addrs.ParseAbsResourceInstanceStr("foo_bar.beep") - testCases := map[string]struct { - args []string - want []addrs.AbsResourceInstance - wantErr string - }{ - "no addresses by default": { - args: nil, - want: nil, - }, - "one address": { - args: []string{"-replace=foo_bar.baz"}, - want: []addrs.AbsResourceInstance{foobarbaz}, - }, - "two addresses": { - args: []string{"-replace=foo_bar.baz", "-replace", "foo_bar.beep"}, - want: []addrs.AbsResourceInstance{foobarbaz, foobarbeep}, - }, - "non-resource-instance address": { - args: []string{"-replace=module.boop"}, - want: nil, - wantErr: "A resource instance address is required here.", - }, - "data resource address": { - args: []string{"-replace=data.foo.bar"}, - want: nil, - wantErr: "Only managed resources can be used", - }, - "invalid traversal": { - args: []string{"-replace=foo."}, - want: nil, - wantErr: "Dot must be followed by attribute name", - }, - "invalid address": { - args: []string{"-replace=data[0].foo"}, - want: nil, - wantErr: "A data source name is required", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseApply(tc.args) - if len(diags) > 0 { - if tc.wantErr == "" { - t.Fatalf("unexpected diags: %v", diags) - } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) - } - } - if !cmp.Equal(got.Operation.ForceReplace, tc.want) { - t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) - } - }) - } -} - -func TestParseApply_vars(t *testing.T) { - testCases := map[string]struct { - args []string - want []FlagNameValue - }{ - "no var flags by default": { - args: nil, - want: nil, - }, - "one var": { - args: []string{"-var", "foo=bar"}, - want: []FlagNameValue{ - {Name: "-var", Value: "foo=bar"}, - }, - }, - "one var-file": { - args: []string{"-var-file", "cool.tfvars"}, - want: []FlagNameValue{ - {Name: "-var-file", Value: "cool.tfvars"}, - }, - }, - "ordering preserved": { - args: []string{ - "-var", "foo=bar", - "-var-file", "cool.tfvars", - "-var", "boop=beep", - }, - want: []FlagNameValue{ - {Name: "-var", Value: "foo=bar"}, - {Name: "-var-file", Value: "cool.tfvars"}, - {Name: "-var", Value: "boop=beep"}, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseApply(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { - t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) - } - if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { - t.Fatalf("expected Empty() to return %t, but was %t", want, got) - } - }) - } -} - -func TestParseApplyDestroy_basicValid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Apply - }{ - "defaults": { - nil, - &Apply{ - AutoApprove: false, - InputEnabled: true, - ViewType: ViewHuman, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.DestroyMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - "auto-approve and disabled input": { - []string{"-auto-approve", "-input=false"}, - &Apply{ - AutoApprove: true, - InputEnabled: false, - ViewType: ViewHuman, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.DestroyMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - } - - cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseApplyDestroy(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { - t.Errorf("unexpected result\n%s", diff) - } - }) - } -} - -func TestParseApplyDestroy_invalid(t *testing.T) { - t.Run("explicit destroy mode", func(t *testing.T) { - got, diags := ParseApplyDestroy([]string{"-destroy"}) - if len(diags) == 0 { - t.Fatal("expected diags but got none") - } - if got, want := diags.Err().Error(), "Invalid mode option:"; !strings.Contains(got, want) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) - } - if got.ViewType != ViewHuman { - t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) - } - }) -} diff --git a/internal/command/arguments/output.go b/internal/command/arguments/output.go deleted file mode 100644 index debf05dd83b4..000000000000 --- a/internal/command/arguments/output.go +++ /dev/null @@ -1,88 +0,0 @@ -package arguments - -import ( - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Output represents the command-line arguments for the output command. -type Output struct { - // Name identifies which root module output to show. If empty, show all - // outputs. - Name string - - // StatePath is an optional path to a state file, from which outputs will - // be loaded. - StatePath string - - // ViewType specifies which output format to use: human, JSON, or "raw". - ViewType ViewType -} - -// ParseOutput processes CLI arguments, returning an Output value and errors. -// If errors are encountered, an Output value is still returned representing -// the best effort interpretation of the arguments. -func ParseOutput(args []string) (*Output, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - output := &Output{} - - var jsonOutput, rawOutput bool - var statePath string - cmdFlags := defaultFlagSet("output") - cmdFlags.BoolVar(&jsonOutput, "json", false, "json") - cmdFlags.BoolVar(&rawOutput, "raw", false, "raw") - cmdFlags.StringVar(&statePath, "state", "", "path") - - if err := cmdFlags.Parse(args); err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - err.Error(), - )) - } - - args = cmdFlags.Args() - if len(args) > 1 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unexpected argument", - "The output command expects exactly one argument with the name of an output variable or no arguments to show all outputs.", - )) - } - - if jsonOutput && rawOutput { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output format", - "The -raw and -json options are mutually-exclusive.", - )) - - // Since the desired output format is unknowable, fall back to default - jsonOutput = false - rawOutput = false - } - - output.StatePath = statePath - - if len(args) > 0 { - output.Name = args[0] - } - - if rawOutput && output.Name == "" { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Output name required", - "You must give the name of a single output value when using the -raw option.", - )) - } - - switch { - case jsonOutput: - output.ViewType = ViewJSON - case rawOutput: - output.ViewType = ViewRaw - default: - output.ViewType = ViewHuman - } - - return output, diags -} diff --git a/internal/command/arguments/output_test.go b/internal/command/arguments/output_test.go deleted file mode 100644 index cb258081378d..000000000000 --- a/internal/command/arguments/output_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package arguments - -import ( - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestParseOutput_valid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Output - }{ - "defaults": { - nil, - &Output{ - Name: "", - ViewType: ViewHuman, - StatePath: "", - }, - }, - "json": { - []string{"-json"}, - &Output{ - Name: "", - ViewType: ViewJSON, - StatePath: "", - }, - }, - "raw": { - []string{"-raw", "foo"}, - &Output{ - Name: "foo", - ViewType: ViewRaw, - StatePath: "", - }, - }, - "state": { - []string{"-state=foobar.tfstate", "-raw", "foo"}, - &Output{ - Name: "foo", - ViewType: ViewRaw, - StatePath: "foobar.tfstate", - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseOutput(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if *got != *tc.want { - t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) - } - }) - } -} - -func TestParseOutput_invalid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Output - wantDiags tfdiags.Diagnostics - }{ - "unknown flag": { - []string{"-boop"}, - &Output{ - Name: "", - ViewType: ViewHuman, - StatePath: "", - }, - tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - "flag provided but not defined: -boop", - ), - }, - }, - "json and raw specified": { - []string{"-json", "-raw"}, - &Output{ - Name: "", - ViewType: ViewHuman, - StatePath: "", - }, - tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Error, - "Invalid output format", - "The -raw and -json options are mutually-exclusive.", - ), - }, - }, - "raw with no name": { - []string{"-raw"}, - &Output{ - Name: "", - ViewType: ViewRaw, - StatePath: "", - }, - tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Error, - "Output name required", - "You must give the name of a single output value when using the -raw option.", - ), - }, - }, - "too many arguments": { - []string{"-raw", "-state=foo.tfstate", "bar", "baz"}, - &Output{ - Name: "bar", - ViewType: ViewRaw, - StatePath: "foo.tfstate", - }, - tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Error, - "Unexpected argument", - "The output command expects exactly one argument with the name of an output variable or no arguments to show all outputs.", - ), - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, gotDiags := ParseOutput(tc.args) - if *got != *tc.want { - t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) - } - if !reflect.DeepEqual(gotDiags, tc.wantDiags) { - t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) - } - }) - } -} diff --git a/internal/command/arguments/plan.go b/internal/command/arguments/plan.go deleted file mode 100644 index 2300dc7a5ded..000000000000 --- a/internal/command/arguments/plan.go +++ /dev/null @@ -1,81 +0,0 @@ -package arguments - -import ( - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Plan represents the command-line arguments for the plan command. -type Plan struct { - // State, Operation, and Vars are the common extended flags - State *State - Operation *Operation - Vars *Vars - - // DetailedExitCode enables different exit codes for error, success with - // changes, and success with no changes. - DetailedExitCode bool - - // InputEnabled is used to disable interactive input for unspecified - // variable and backend config values. Default is true. - InputEnabled bool - - // OutPath contains an optional path to store the plan file - OutPath string - - // ViewType specifies which output format to use - ViewType ViewType -} - -// ParsePlan processes CLI arguments, returning a Plan value and errors. -// If errors are encountered, a Plan value is still returned representing -// the best effort interpretation of the arguments. -func ParsePlan(args []string) (*Plan, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - plan := &Plan{ - State: &State{}, - Operation: &Operation{}, - Vars: &Vars{}, - } - - cmdFlags := extendedFlagSet("plan", plan.State, plan.Operation, plan.Vars) - cmdFlags.BoolVar(&plan.DetailedExitCode, "detailed-exitcode", false, "detailed-exitcode") - cmdFlags.BoolVar(&plan.InputEnabled, "input", true, "input") - cmdFlags.StringVar(&plan.OutPath, "out", "", "out") - - var json bool - cmdFlags.BoolVar(&json, "json", false, "json") - - if err := cmdFlags.Parse(args); err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - err.Error(), - )) - } - - args = cmdFlags.Args() - - if len(args) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Too many command line arguments", - "To specify a working directory for the plan, use the global -chdir flag.", - )) - } - - diags = diags.Append(plan.Operation.Parse()) - - // JSON view currently does not support input, so we disable it here - if json { - plan.InputEnabled = false - } - - switch { - case json: - plan.ViewType = ViewJSON - default: - plan.ViewType = ViewHuman - } - - return plan, diags -} diff --git a/internal/command/arguments/plan_test.go b/internal/command/arguments/plan_test.go deleted file mode 100644 index b547d3f7ab2f..000000000000 --- a/internal/command/arguments/plan_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package arguments - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" -) - -func TestParsePlan_basicValid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Plan - }{ - "defaults": { - nil, - &Plan{ - DetailedExitCode: false, - InputEnabled: true, - OutPath: "", - ViewType: ViewHuman, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.NormalMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - "setting all options": { - []string{"-destroy", "-detailed-exitcode", "-input=false", "-out=saved.tfplan"}, - &Plan{ - DetailedExitCode: true, - InputEnabled: false, - OutPath: "saved.tfplan", - ViewType: ViewHuman, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.DestroyMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - "JSON view disables input": { - []string{"-json"}, - &Plan{ - DetailedExitCode: false, - InputEnabled: false, - OutPath: "", - ViewType: ViewJSON, - State: &State{Lock: true}, - Vars: &Vars{}, - Operation: &Operation{ - PlanMode: plans.NormalMode, - Parallelism: 10, - Refresh: true, - }, - }, - }, - } - - cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParsePlan(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { - t.Errorf("unexpected result\n%s", diff) - } - }) - } -} - -func TestParsePlan_invalid(t *testing.T) { - got, diags := ParsePlan([]string{"-frob"}) - if len(diags) == 0 { - t.Fatal("expected diags but got none") - } - if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) - } - if got.ViewType != ViewHuman { - t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) - } -} - -func TestParsePlan_tooManyArguments(t *testing.T) { - got, diags := ParsePlan([]string{"saved.tfplan"}) - if len(diags) == 0 { - t.Fatal("expected diags but got none") - } - if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) - } - if got.ViewType != ViewHuman { - t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) - } -} - -func TestParsePlan_targets(t *testing.T) { - foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") - boop, _ := addrs.ParseTargetStr("module.boop") - testCases := map[string]struct { - args []string - want []addrs.Targetable - wantErr string - }{ - "no targets by default": { - args: nil, - want: nil, - }, - "one target": { - args: []string{"-target=foo_bar.baz"}, - want: []addrs.Targetable{foobarbaz.Subject}, - }, - "two targets": { - args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, - want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, - }, - "invalid traversal": { - args: []string{"-target=foo."}, - want: nil, - wantErr: "Dot must be followed by attribute name", - }, - "invalid target": { - args: []string{"-target=data[0].foo"}, - want: nil, - wantErr: "A data source name is required", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParsePlan(tc.args) - if len(diags) > 0 { - if tc.wantErr == "" { - t.Fatalf("unexpected diags: %v", diags) - } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) - } - } - if !cmp.Equal(got.Operation.Targets, tc.want) { - t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) - } - }) - } -} - -func TestParsePlan_vars(t *testing.T) { - testCases := map[string]struct { - args []string - want []FlagNameValue - }{ - "no var flags by default": { - args: nil, - want: nil, - }, - "one var": { - args: []string{"-var", "foo=bar"}, - want: []FlagNameValue{ - {Name: "-var", Value: "foo=bar"}, - }, - }, - "one var-file": { - args: []string{"-var-file", "cool.tfvars"}, - want: []FlagNameValue{ - {Name: "-var-file", Value: "cool.tfvars"}, - }, - }, - "ordering preserved": { - args: []string{ - "-var", "foo=bar", - "-var-file", "cool.tfvars", - "-var", "boop=beep", - }, - want: []FlagNameValue{ - {Name: "-var", Value: "foo=bar"}, - {Name: "-var-file", Value: "cool.tfvars"}, - {Name: "-var", Value: "boop=beep"}, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParsePlan(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { - t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) - } - if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { - t.Fatalf("expected Empty() to return %t, but was %t", want, got) - } - }) - } -} diff --git a/internal/command/arguments/refresh.go b/internal/command/arguments/refresh.go deleted file mode 100644 index bc08d9df4d90..000000000000 --- a/internal/command/arguments/refresh.go +++ /dev/null @@ -1,71 +0,0 @@ -package arguments - -import ( - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Refresh represents the command-line arguments for the apply command. -type Refresh struct { - // State, Operation, and Vars are the common extended flags - State *State - Operation *Operation - Vars *Vars - - // InputEnabled is used to disable interactive input for unspecified - // variable and backend config values. Default is true. - InputEnabled bool - - // ViewType specifies which output format to use - ViewType ViewType -} - -// ParseRefresh processes CLI arguments, returning a Refresh value and errors. -// If errors are encountered, a Refresh value is still returned representing -// the best effort interpretation of the arguments. -func ParseRefresh(args []string) (*Refresh, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - refresh := &Refresh{ - State: &State{}, - Operation: &Operation{}, - Vars: &Vars{}, - } - - cmdFlags := extendedFlagSet("refresh", refresh.State, refresh.Operation, refresh.Vars) - cmdFlags.BoolVar(&refresh.InputEnabled, "input", true, "input") - - var json bool - cmdFlags.BoolVar(&json, "json", false, "json") - - if err := cmdFlags.Parse(args); err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - err.Error(), - )) - } - - args = cmdFlags.Args() - if len(args) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Too many command line arguments", - "Expected at most one positional argument.", - )) - } - - diags = diags.Append(refresh.Operation.Parse()) - - // JSON view currently does not support input, so we disable it here - if json { - refresh.InputEnabled = false - } - - switch { - case json: - refresh.ViewType = ViewJSON - default: - refresh.ViewType = ViewHuman - } - - return refresh, diags -} diff --git a/internal/command/arguments/refresh_test.go b/internal/command/arguments/refresh_test.go deleted file mode 100644 index 3f35053f7bed..000000000000 --- a/internal/command/arguments/refresh_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package arguments - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestParseRefresh_basicValid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Refresh - }{ - "defaults": { - nil, - &Refresh{ - InputEnabled: true, - ViewType: ViewHuman, - }, - }, - "input=false": { - []string{"-input=false"}, - &Refresh{ - InputEnabled: false, - ViewType: ViewHuman, - }, - }, - "JSON view disables input": { - []string{"-json"}, - &Refresh{ - InputEnabled: false, - ViewType: ViewJSON, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseRefresh(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - // Ignore the extended arguments for simplicity - got.State = nil - got.Operation = nil - got.Vars = nil - if *got != *tc.want { - t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) - } - }) - } -} - -func TestParseRefresh_invalid(t *testing.T) { - got, diags := ParseRefresh([]string{"-frob"}) - if len(diags) == 0 { - t.Fatal("expected diags but got none") - } - if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) - } - if got.ViewType != ViewHuman { - t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) - } -} - -func TestParseRefresh_tooManyArguments(t *testing.T) { - got, diags := ParseRefresh([]string{"saved.tfplan"}) - if len(diags) == 0 { - t.Fatal("expected diags but got none") - } - if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) - } - if got.ViewType != ViewHuman { - t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) - } -} - -func TestParseRefresh_targets(t *testing.T) { - foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") - boop, _ := addrs.ParseTargetStr("module.boop") - testCases := map[string]struct { - args []string - want []addrs.Targetable - wantErr string - }{ - "no targets by default": { - args: nil, - want: nil, - }, - "one target": { - args: []string{"-target=foo_bar.baz"}, - want: []addrs.Targetable{foobarbaz.Subject}, - }, - "two targets": { - args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, - want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, - }, - "invalid traversal": { - args: []string{"-target=foo."}, - want: nil, - wantErr: "Dot must be followed by attribute name", - }, - "invalid target": { - args: []string{"-target=data[0].foo"}, - want: nil, - wantErr: "A data source name is required", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseRefresh(tc.args) - if len(diags) > 0 { - if tc.wantErr == "" { - t.Fatalf("unexpected diags: %v", diags) - } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { - t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) - } - } - if !cmp.Equal(got.Operation.Targets, tc.want) { - t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) - } - }) - } -} - -func TestParseRefresh_vars(t *testing.T) { - testCases := map[string]struct { - args []string - want []FlagNameValue - }{ - "no var flags by default": { - args: nil, - want: nil, - }, - "one var": { - args: []string{"-var", "foo=bar"}, - want: []FlagNameValue{ - {Name: "-var", Value: "foo=bar"}, - }, - }, - "one var-file": { - args: []string{"-var-file", "cool.tfvars"}, - want: []FlagNameValue{ - {Name: "-var-file", Value: "cool.tfvars"}, - }, - }, - "ordering preserved": { - args: []string{ - "-var", "foo=bar", - "-var-file", "cool.tfvars", - "-var", "boop=beep", - }, - want: []FlagNameValue{ - {Name: "-var", Value: "foo=bar"}, - {Name: "-var-file", Value: "cool.tfvars"}, - {Name: "-var", Value: "boop=beep"}, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseRefresh(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { - t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) - } - if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { - t.Fatalf("expected Empty() to return %t, but was %t", want, got) - } - }) - } -} diff --git a/internal/command/arguments/show.go b/internal/command/arguments/show.go deleted file mode 100644 index 4d95fc1daa7b..000000000000 --- a/internal/command/arguments/show.go +++ /dev/null @@ -1,59 +0,0 @@ -package arguments - -import ( - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Show represents the command-line arguments for the show command. -type Show struct { - // Path is the path to the state file or plan file to be displayed. If - // unspecified, show will display the latest state snapshot. - Path string - - // ViewType specifies which output format to use: human, JSON, or "raw". - ViewType ViewType -} - -// ParseShow processes CLI arguments, returning a Show value and errors. -// If errors are encountered, a Show value is still returned representing -// the best effort interpretation of the arguments. -func ParseShow(args []string) (*Show, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - show := &Show{ - Path: "", - } - - var jsonOutput bool - cmdFlags := defaultFlagSet("show") - cmdFlags.BoolVar(&jsonOutput, "json", false, "json") - - if err := cmdFlags.Parse(args); err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - err.Error(), - )) - } - - args = cmdFlags.Args() - if len(args) > 1 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Too many command line arguments", - "Expected at most one positional argument.", - )) - } - - if len(args) > 0 { - show.Path = args[0] - } - - switch { - case jsonOutput: - show.ViewType = ViewJSON - default: - show.ViewType = ViewHuman - } - - return show, diags -} diff --git a/internal/command/arguments/show_test.go b/internal/command/arguments/show_test.go deleted file mode 100644 index 5088e1a94e5b..000000000000 --- a/internal/command/arguments/show_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package arguments - -import ( - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestParseShow_valid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Show - }{ - "defaults": { - nil, - &Show{ - Path: "", - ViewType: ViewHuman, - }, - }, - "json": { - []string{"-json"}, - &Show{ - Path: "", - ViewType: ViewJSON, - }, - }, - "path": { - []string{"-json", "foo"}, - &Show{ - Path: "foo", - ViewType: ViewJSON, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseShow(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if *got != *tc.want { - t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) - } - }) - } -} - -func TestParseShow_invalid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Show - wantDiags tfdiags.Diagnostics - }{ - "unknown flag": { - []string{"-boop"}, - &Show{ - Path: "", - ViewType: ViewHuman, - }, - tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - "flag provided but not defined: -boop", - ), - }, - }, - "too many arguments": { - []string{"-json", "bar", "baz"}, - &Show{ - Path: "bar", - ViewType: ViewJSON, - }, - tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Error, - "Too many command line arguments", - "Expected at most one positional argument.", - ), - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, gotDiags := ParseShow(tc.args) - if *got != *tc.want { - t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) - } - if !reflect.DeepEqual(gotDiags, tc.wantDiags) { - t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) - } - }) - } -} diff --git a/internal/command/arguments/test.go b/internal/command/arguments/test.go deleted file mode 100644 index 8ffbd4914d54..000000000000 --- a/internal/command/arguments/test.go +++ /dev/null @@ -1,63 +0,0 @@ -package arguments - -import ( - "flag" - "io/ioutil" - - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Test represents the command line arguments for the "terraform test" command. -type Test struct { - Output TestOutput -} - -// TestOutput represents a subset of the arguments for "terraform test" -// related to how it presents its results. That is, it's the arguments that -// are relevant to the command's view rather than its controller. -type TestOutput struct { - // If not an empty string, JUnitXMLFile gives a filename where JUnit-style - // XML test result output should be written, in addition to the normal - // output printed to the standard output and error streams. - // (The typical usage pattern for tools that can consume this file format - // is to configure them to look for a separate test result file on disk - // after running the tests.) - JUnitXMLFile string -} - -// ParseTest interprets a slice of raw command line arguments into a -// Test value. -func ParseTest(args []string) (Test, tfdiags.Diagnostics) { - var ret Test - var diags tfdiags.Diagnostics - - // NOTE: ParseTest should still return at least a partial - // Test even on error, containing enough information for the - // command to report error diagnostics in a suitable way. - - f := flag.NewFlagSet("test", flag.ContinueOnError) - f.SetOutput(ioutil.Discard) - f.Usage = func() {} - f.StringVar(&ret.Output.JUnitXMLFile, "junit-xml", "", "Write a JUnit XML file describing the results") - - err := f.Parse(args) - if err != nil { - diags = diags.Append(err) - return ret, diags - } - - // We'll now discard all of the arguments that the flag package handled, - // and focus only on the positional arguments for the rest of the function. - args = f.Args() - - if len(args) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid command arguments", - "The test command doesn't expect any positional command-line arguments.", - )) - return ret, diags - } - - return ret, diags -} diff --git a/internal/command/arguments/test_test.go b/internal/command/arguments/test_test.go deleted file mode 100644 index 9a1c7fed013a..000000000000 --- a/internal/command/arguments/test_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package arguments - -import ( - "testing" - - "github.com/apparentlymart/go-shquot/shquot" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestParseTest(t *testing.T) { - tests := []struct { - Input []string - Want Test - WantError string - }{ - { - nil, - Test{ - Output: TestOutput{ - JUnitXMLFile: "", - }, - }, - ``, - }, - { - []string{"-invalid"}, - Test{ - Output: TestOutput{ - JUnitXMLFile: "", - }, - }, - `flag provided but not defined: -invalid`, - }, - { - []string{"-junit-xml=result.xml"}, - Test{ - Output: TestOutput{ - JUnitXMLFile: "result.xml", - }, - }, - ``, - }, - { - []string{"baz"}, - Test{ - Output: TestOutput{ - JUnitXMLFile: "", - }, - }, - `Invalid command arguments`, - }, - } - - baseCmdline := []string{"terraform", "test"} - for _, test := range tests { - name := shquot.POSIXShell(append(baseCmdline, test.Input...)) - t.Run(name, func(t *testing.T) { - t.Log(name) - got, diags := ParseTest(test.Input) - - if test.WantError != "" { - if len(diags) != 1 { - t.Fatalf("got %d diagnostics; want exactly 1\n%s", len(diags), diags.Err().Error()) - } - if diags[0].Severity() != tfdiags.Error { - t.Fatalf("got a warning; want an error\n%s", diags.Err().Error()) - } - if desc := diags[0].Description(); desc.Summary != test.WantError { - t.Fatalf("wrong error\ngot: %s\nwant: %s", desc.Summary, test.WantError) - } - } else { - if len(diags) != 0 { - t.Fatalf("got %d diagnostics; want none\n%s", len(diags), diags.Err().Error()) - } - } - - if diff := cmp.Diff(test.Want, got); diff != "" { - t.Errorf("wrong result\n%s", diff) - } - }) - } -} diff --git a/internal/command/arguments/validate.go b/internal/command/arguments/validate.go deleted file mode 100644 index daadd7ed537c..000000000000 --- a/internal/command/arguments/validate.go +++ /dev/null @@ -1,59 +0,0 @@ -package arguments - -import ( - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Validate represents the command-line arguments for the validate command. -type Validate struct { - // Path is the directory containing the configuration to be validated. If - // unspecified, validate will use the current directory. - Path string - - // ViewType specifies which output format to use: human, JSON, or "raw". - ViewType ViewType -} - -// ParseValidate processes CLI arguments, returning a Validate value and errors. -// If errors are encountered, a Validate value is still returned representing -// the best effort interpretation of the arguments. -func ParseValidate(args []string) (*Validate, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - validate := &Validate{ - Path: ".", - } - - var jsonOutput bool - cmdFlags := defaultFlagSet("validate") - cmdFlags.BoolVar(&jsonOutput, "json", false, "json") - - if err := cmdFlags.Parse(args); err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - err.Error(), - )) - } - - args = cmdFlags.Args() - if len(args) > 1 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Too many command line arguments", - "Expected at most one positional argument.", - )) - } - - if len(args) > 0 { - validate.Path = args[0] - } - - switch { - case jsonOutput: - validate.ViewType = ViewJSON - default: - validate.ViewType = ViewHuman - } - - return validate, diags -} diff --git a/internal/command/arguments/validate_test.go b/internal/command/arguments/validate_test.go deleted file mode 100644 index e7440555073b..000000000000 --- a/internal/command/arguments/validate_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package arguments - -import ( - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestParseValidate_valid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Validate - }{ - "defaults": { - nil, - &Validate{ - Path: ".", - ViewType: ViewHuman, - }, - }, - "json": { - []string{"-json"}, - &Validate{ - Path: ".", - ViewType: ViewJSON, - }, - }, - "path": { - []string{"-json", "foo"}, - &Validate{ - Path: "foo", - ViewType: ViewJSON, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, diags := ParseValidate(tc.args) - if len(diags) > 0 { - t.Fatalf("unexpected diags: %v", diags) - } - if *got != *tc.want { - t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) - } - }) - } -} - -func TestParseValidate_invalid(t *testing.T) { - testCases := map[string]struct { - args []string - want *Validate - wantDiags tfdiags.Diagnostics - }{ - "unknown flag": { - []string{"-boop"}, - &Validate{ - Path: ".", - ViewType: ViewHuman, - }, - tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Error, - "Failed to parse command-line flags", - "flag provided but not defined: -boop", - ), - }, - }, - "too many arguments": { - []string{"-json", "bar", "baz"}, - &Validate{ - Path: "bar", - ViewType: ViewJSON, - }, - tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Error, - "Too many command line arguments", - "Expected at most one positional argument.", - ), - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got, gotDiags := ParseValidate(tc.args) - if *got != *tc.want { - t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) - } - if !reflect.DeepEqual(gotDiags, tc.wantDiags) { - t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) - } - }) - } -} diff --git a/internal/command/clistate/state.go b/internal/command/clistate/state.go deleted file mode 100644 index a9946b6e6a96..000000000000 --- a/internal/command/clistate/state.go +++ /dev/null @@ -1,190 +0,0 @@ -// Package state exposes common helpers for working with state from the CLI. -// -// This is a separate package so that backends can use this for consistent -// messaging without creating a circular reference to the command package. -package clistate - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/helper/slowmessage" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -const ( - LockThreshold = 400 * time.Millisecond - LockErrorMessage = `Error message: %s - -Terraform acquires a state lock to protect the state from being written -by multiple users at the same time. Please resolve the issue above and try -again. For most commands, you can disable locking with the "-lock=false" -flag, but this is not recommended.` - - UnlockErrorMessage = `Error message: %s - -Terraform acquires a lock when accessing your state to prevent others -running Terraform to potentially modify the state at the same time. An -error occurred while releasing this lock. This could mean that the lock -did or did not release properly. If the lock didn't release properly, -Terraform may not be able to run future commands since it'll appear as if -the lock is held. - -In this scenario, please call the "force-unlock" command to unlock the -state manually. This is a very dangerous operation since if it is done -erroneously it could result in two people modifying state at the same time. -Only call this command if you're certain that the unlock above failed and -that no one else is holding a lock.` -) - -// Locker allows for more convenient usage of the lower-level statemgr.Locker -// implementations. -// The statemgr.Locker API requires passing in a statemgr.LockInfo struct. Locker -// implementations are expected to create the required LockInfo struct when -// Lock is called, populate the Operation field with the "reason" string -// provided, and pass that on to the underlying statemgr.Locker. -// Locker implementations are also expected to store any state required to call -// Unlock, which is at a minimum the LockID string returned by the -// statemgr.Locker. -type Locker interface { - // Returns a shallow copy of the locker with its context changed to ctx. - WithContext(ctx context.Context) Locker - - // Lock the provided state manager, storing the reason string in the LockInfo. - Lock(s statemgr.Locker, reason string) tfdiags.Diagnostics - - // Unlock the previously locked state. - Unlock() tfdiags.Diagnostics - - // Timeout returns the configured timeout duration - Timeout() time.Duration -} - -type locker struct { - mu sync.Mutex - ctx context.Context - timeout time.Duration - state statemgr.Locker - view views.StateLocker - lockID string -} - -var _ Locker = (*locker)(nil) - -// Create a new Locker. -// This Locker uses state.LockWithContext to retry the lock until the provided -// timeout is reached, or the context is canceled. Lock progress will be be -// reported to the user through the provided UI. -func NewLocker(timeout time.Duration, view views.StateLocker) Locker { - return &locker{ - ctx: context.Background(), - timeout: timeout, - view: view, - } -} - -// WithContext returns a new Locker with the specified context, copying the -// timeout and view parameters from the original Locker. -func (l *locker) WithContext(ctx context.Context) Locker { - if ctx == nil { - panic("nil context") - } - return &locker{ - ctx: ctx, - timeout: l.timeout, - view: l.view, - } -} - -// Locker locks the given state and outputs to the user if locking is taking -// longer than the threshold. The lock is retried until the context is -// cancelled. -func (l *locker) Lock(s statemgr.Locker, reason string) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - l.mu.Lock() - defer l.mu.Unlock() - - l.state = s - - ctx, cancel := context.WithTimeout(l.ctx, l.timeout) - defer cancel() - - lockInfo := statemgr.NewLockInfo() - lockInfo.Operation = reason - - err := slowmessage.Do(LockThreshold, func() error { - id, err := statemgr.LockWithContext(ctx, s, lockInfo) - l.lockID = id - return err - }, l.view.Locking) - - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error acquiring the state lock", - fmt.Sprintf(LockErrorMessage, err), - )) - } - - return diags -} - -func (l *locker) Unlock() tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - l.mu.Lock() - defer l.mu.Unlock() - - if l.lockID == "" { - return diags - } - - err := slowmessage.Do(LockThreshold, func() error { - return l.state.Unlock(l.lockID) - }, l.view.Unlocking) - - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error releasing the state lock", - fmt.Sprintf(UnlockErrorMessage, err), - )) - } - - return diags - -} - -func (l *locker) Timeout() time.Duration { - return l.timeout -} - -type noopLocker struct{} - -// NewNoopLocker returns a valid Locker that does nothing. -func NewNoopLocker() Locker { - return noopLocker{} -} - -var _ Locker = noopLocker{} - -func (l noopLocker) WithContext(ctx context.Context) Locker { - return l -} - -func (l noopLocker) Lock(statemgr.Locker, string) tfdiags.Diagnostics { - return nil -} - -func (l noopLocker) Unlock() tfdiags.Diagnostics { - return nil -} - -func (l noopLocker) Timeout() time.Duration { - return 0 -} diff --git a/internal/command/clistate/state_test.go b/internal/command/clistate/state_test.go deleted file mode 100644 index e0daecdd545c..000000000000 --- a/internal/command/clistate/state_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package clistate - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terminal" -) - -func TestUnlock(t *testing.T) { - streams, _ := terminal.StreamsForTesting(t) - view := views.NewView(streams) - - l := NewLocker(0, views.NewStateLocker(arguments.ViewHuman, view)) - l.Lock(statemgr.NewUnlockErrorFull(nil, nil), "test-lock") - - diags := l.Unlock() - if diags.HasErrors() { - t.Log(diags.Err().Error()) - } else { - t.Error("expected error") - } -} diff --git a/internal/command/e2etest/init_test.go b/internal/command/e2etest/init_test.go deleted file mode 100644 index b365267d026e..000000000000 --- a/internal/command/e2etest/init_test.go +++ /dev/null @@ -1,408 +0,0 @@ -package e2etest - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/hashicorp/terraform/internal/e2e" -) - -func TestInitProviders(t *testing.T) { - t.Parallel() - - // This test reaches out to releases.hashicorp.com to download the - // template provider, so it can only run if network access is allowed. - // We intentionally don't try to stub this here, because there's already - // a stubbed version of this in the "command" package and so the goal here - // is to test the interaction with the real repository. - skipIfCannotAccessNetwork(t) - - fixturePath := filepath.Join("testdata", "template-provider") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - stdout, stderr, err := tf.Run("init") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - if !strings.Contains(stdout, "Terraform has been successfully initialized!") { - t.Errorf("success message is missing from output:\n%s", stdout) - } - - if !strings.Contains(stdout, "- Installing hashicorp/template v") { - t.Errorf("provider download message is missing from output:\n%s", stdout) - t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") - } - - if !strings.Contains(stdout, "Terraform has created a lock file") { - t.Errorf("lock file notification is missing from output:\n%s", stdout) - } - -} - -func TestInitProvidersInternal(t *testing.T) { - t.Parallel() - - // This test should _not_ reach out anywhere because the "terraform" - // provider is internal to the core terraform binary. - - fixturePath := filepath.Join("testdata", "terraform-provider") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - stdout, stderr, err := tf.Run("init") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - if !strings.Contains(stdout, "Terraform has been successfully initialized!") { - t.Errorf("success message is missing from output:\n%s", stdout) - } - - if strings.Contains(stdout, "Installing hashicorp/terraform") { - // Shouldn't have downloaded anything with this config, because the - // provider is built in. - t.Errorf("provider download message appeared in output:\n%s", stdout) - } - - if strings.Contains(stdout, "Installing terraform.io/builtin/terraform") { - // Shouldn't have downloaded anything with this config, because the - // provider is built in. - t.Errorf("provider download message appeared in output:\n%s", stdout) - } -} - -func TestInitProvidersVendored(t *testing.T) { - t.Parallel() - - // This test will try to reach out to registry.terraform.io as one of the - // possible installation locations for - // hashicorp/null, where it will find that - // versions do exist but will ultimately select the version that is - // vendored due to the version constraint. - skipIfCannotAccessNetwork(t) - - fixturePath := filepath.Join("testdata", "vendored-provider") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - // Our fixture dir has a generic os_arch dir, which we need to customize - // to the actual OS/arch where this test is running in order to get the - // desired result. - fixtMachineDir := tf.Path("terraform.d/plugins/registry.terraform.io/hashicorp/null/1.0.0+local/os_arch") - wantMachineDir := tf.Path("terraform.d/plugins/registry.terraform.io/hashicorp/null/1.0.0+local/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) - err := os.Rename(fixtMachineDir, wantMachineDir) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - stdout, stderr, err := tf.Run("init") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - if !strings.Contains(stdout, "Terraform has been successfully initialized!") { - t.Errorf("success message is missing from output:\n%s", stdout) - } - - if !strings.Contains(stdout, "- Installing hashicorp/null v1.0.0+local") { - t.Errorf("provider download message is missing from output:\n%s", stdout) - t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") - } - -} - -func TestInitProvidersLocalOnly(t *testing.T) { - t.Parallel() - - // This test should not reach out to the network if it is behaving as - // intended. If it _does_ try to access an upstream registry and encounter - // an error doing so then that's a legitimate test failure that should be - // fixed. (If it incorrectly reaches out anywhere then it's likely to be - // to the host "example.com", which is the placeholder domain we use in - // the test fixture.) - - fixturePath := filepath.Join("testdata", "local-only-provider") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - // If you run this test on a workstation with a plugin-cache directory - // configured, it will leave a bad directory behind and terraform init will - // not work until you remove it. - // - // To avoid this, we will "zero out" any existing cli config file. - tf.AddEnv("TF_CLI_CONFIG_FILE=") - - // Our fixture dir has a generic os_arch dir, which we need to customize - // to the actual OS/arch where this test is running in order to get the - // desired result. - fixtMachineDir := tf.Path("terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch") - wantMachineDir := tf.Path("terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) - err := os.Rename(fixtMachineDir, wantMachineDir) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - stdout, stderr, err := tf.Run("init") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - if !strings.Contains(stdout, "Terraform has been successfully initialized!") { - t.Errorf("success message is missing from output:\n%s", stdout) - } - - if !strings.Contains(stdout, "- Installing example.com/awesomecorp/happycloud v1.2.0") { - t.Errorf("provider download message is missing from output:\n%s", stdout) - t.Logf("(this can happen if you have a conflicting copy of the plugin in one of the global plugin search dirs)") - } -} - -func TestInitProvidersCustomMethod(t *testing.T) { - t.Parallel() - - // This test should not reach out to the network if it is behaving as - // intended. If it _does_ try to access an upstream registry and encounter - // an error doing so then that's a legitimate test failure that should be - // fixed. (If it incorrectly reaches out anywhere then it's likely to be - // to the host "example.com", which is the placeholder domain we use in - // the test fixture.) - - for _, configFile := range []string{"cliconfig.tfrc", "cliconfig.tfrc.json"} { - t.Run(configFile, func(t *testing.T) { - fixturePath := filepath.Join("testdata", "custom-provider-install-method") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - // Our fixture dir has a generic os_arch dir, which we need to customize - // to the actual OS/arch where this test is running in order to get the - // desired result. - fixtMachineDir := tf.Path("fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch") - wantMachineDir := tf.Path("fs-mirror/example.com/awesomecorp/happycloud/1.2.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) - err := os.Rename(fixtMachineDir, wantMachineDir) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // We'll use a local CLI configuration file taken from our fixture - // directory so we can force a custom installation method config. - tf.AddEnv("TF_CLI_CONFIG_FILE=" + tf.Path(configFile)) - - stdout, stderr, err := tf.Run("init") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - if !strings.Contains(stdout, "Terraform has been successfully initialized!") { - t.Errorf("success message is missing from output:\n%s", stdout) - } - - if !strings.Contains(stdout, "- Installing example.com/awesomecorp/happycloud v1.2.0") { - t.Errorf("provider download message is missing from output:\n%s", stdout) - } - }) - } -} - -func TestInitProviders_pluginCache(t *testing.T) { - t.Parallel() - - // This test reaches out to releases.hashicorp.com to access plugin - // metadata, and download the null plugin, though the template plugin - // should come from local cache. - skipIfCannotAccessNetwork(t) - - fixturePath := filepath.Join("testdata", "plugin-cache") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - // Our fixture dir has a generic os_arch dir, which we need to customize - // to the actual OS/arch where this test is running in order to get the - // desired result. - fixtMachineDir := tf.Path("cache/registry.terraform.io/hashicorp/template/2.1.0/os_arch") - wantMachineDir := tf.Path("cache/registry.terraform.io/hashicorp/template/2.1.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) - err := os.Rename(fixtMachineDir, wantMachineDir) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - cmd := tf.Cmd("init") - - // convert the slashes if building for windows. - p := filepath.FromSlash("./cache") - cmd.Env = append(cmd.Env, "TF_PLUGIN_CACHE_DIR="+p) - err = cmd.Run() - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - path := filepath.FromSlash(fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/template/2.1.0/%s_%s/terraform-provider-template_v2.1.0_x4", runtime.GOOS, runtime.GOARCH)) - content, err := tf.ReadFile(path) - if err != nil { - t.Fatalf("failed to read installed plugin from %s: %s", path, err) - } - if strings.TrimSpace(string(content)) != "this is not a real plugin" { - t.Errorf("template plugin was not installed from local cache") - } - - nullLinkPath := filepath.FromSlash(fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/null/2.1.0/%s_%s/terraform-provider-null_v2.1.0_x4", runtime.GOOS, runtime.GOARCH)) - if runtime.GOOS == "windows" { - nullLinkPath = nullLinkPath + ".exe" - } - if !tf.FileExists(nullLinkPath) { - t.Errorf("null plugin was not installed into %s", nullLinkPath) - } - - nullCachePath := filepath.FromSlash(fmt.Sprintf("cache/registry.terraform.io/hashicorp/null/2.1.0/%s_%s/terraform-provider-null_v2.1.0_x4", runtime.GOOS, runtime.GOARCH)) - if runtime.GOOS == "windows" { - nullCachePath = nullCachePath + ".exe" - } - if !tf.FileExists(nullCachePath) { - t.Errorf("null plugin is not in cache after install. expected in: %s", nullCachePath) - } -} - -func TestInit_fromModule(t *testing.T) { - t.Parallel() - - // This test reaches out to registry.terraform.io and github.com to lookup - // and fetch a module. - skipIfCannotAccessNetwork(t) - - fixturePath := filepath.Join("testdata", "empty") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - cmd := tf.Cmd("init", "-from-module=hashicorp/vault/aws") - cmd.Stdin = nil - cmd.Stderr = &bytes.Buffer{} - - err := cmd.Run() - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - stderr := cmd.Stderr.(*bytes.Buffer).String() - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - content, err := tf.ReadFile("main.tf") - if err != nil { - t.Fatalf("failed to read main.tf: %s", err) - } - if !bytes.Contains(content, []byte("vault")) { - t.Fatalf("main.tf doesn't appear to be a vault configuration: \n%s", content) - } -} - -func TestInitProviderNotFound(t *testing.T) { - t.Parallel() - - // This test will reach out to registry.terraform.io as one of the possible - // installation locations for hashicorp/nonexist, which should not exist. - skipIfCannotAccessNetwork(t) - - fixturePath := filepath.Join("testdata", "provider-not-found") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - t.Run("registry provider not found", func(t *testing.T) { - _, stderr, err := tf.Run("init", "-no-color") - if err == nil { - t.Fatal("expected error, got success") - } - - oneLineStderr := strings.ReplaceAll(stderr, "\n", " ") - if !strings.Contains(oneLineStderr, "provider registry registry.terraform.io does not have a provider named registry.terraform.io/hashicorp/nonexist") { - t.Errorf("expected error message is missing from output:\n%s", stderr) - } - - if !strings.Contains(oneLineStderr, "All modules should specify their required_providers") { - t.Errorf("expected error message is missing from output:\n%s", stderr) - } - }) - - t.Run("local provider not found", func(t *testing.T) { - // The -plugin-dir directory must exist for the provider installer to search it. - pluginDir := tf.Path("empty") - if err := os.Mkdir(pluginDir, os.ModePerm); err != nil { - t.Fatal(err) - } - - _, stderr, err := tf.Run("init", "-no-color", "-plugin-dir="+pluginDir) - if err == nil { - t.Fatal("expected error, got success") - } - - if !strings.Contains(stderr, "provider registry.terraform.io/hashicorp/nonexist was not\nfound in any of the search locations\n\n - "+pluginDir) { - t.Errorf("expected error message is missing from output:\n%s", stderr) - } - }) - - t.Run("special characters enabled", func(t *testing.T) { - _, stderr, err := tf.Run("init") - if err == nil { - t.Fatal("expected error, got success") - } - - expectedErr := `╷ -│ Error: Failed to query available provider packages -│` + ` ` + ` -│ Could not retrieve the list of available versions for provider -│ hashicorp/nonexist: provider registry registry.terraform.io does not have a -│ provider named registry.terraform.io/hashicorp/nonexist -│ -│ All modules should specify their required_providers so that external -│ consumers will get the correct providers when using a module. To see which -│ modules are currently depending on hashicorp/nonexist, run the following -│ command: -│ terraform providers -╵ - -` - if stripAnsi(stderr) != expectedErr { - t.Errorf("wrong output:\n%s", cmp.Diff(stripAnsi(stderr), expectedErr)) - } - }) -} - -func TestInitProviderWarnings(t *testing.T) { - t.Parallel() - - // This test will reach out to registry.terraform.io as one of the possible - // installation locations for hashicorp/nonexist, which should not exist. - skipIfCannotAccessNetwork(t) - - fixturePath := filepath.Join("testdata", "provider-warnings") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - stdout, _, err := tf.Run("init") - if err == nil { - t.Fatal("expected error, got success") - } - - if !strings.Contains(stdout, "This provider is archived and no longer needed.") { - t.Errorf("expected warning message is missing from output:\n%s", stdout) - } - -} diff --git a/internal/command/e2etest/main_test.go b/internal/command/e2etest/main_test.go deleted file mode 100644 index 3c9ba5a5e1b6..000000000000 --- a/internal/command/e2etest/main_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package e2etest - -import ( - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/hashicorp/terraform/internal/e2e" -) - -var terraformBin string - -// canRunGoBuild is a short-term compromise to account for the fact that we -// have a small number of tests that work by building helper programs using -// "go build" at runtime, but we can't do that in our isolated test mode -// driven by the make-archive.sh script. -// -// FIXME: Rework this a bit so that we build the necessary helper programs -// (test plugins, etc) as part of the initial suite setup, and in the -// make-archive.sh script, so that we can run all of the tests in both -// situations with the tests just using the executable already built for -// them, as we do for terraformBin. -var canRunGoBuild bool - -func TestMain(m *testing.M) { - teardown := setup() - code := m.Run() - teardown() - os.Exit(code) -} - -func setup() func() { - if terraformBin != "" { - // this is pre-set when we're running in a binary produced from - // the make-archive.sh script, since that is for testing an - // executable obtained from a real release package. However, we do - // need to turn it into an absolute path so that we can find it - // when we change the working directory during tests. - var err error - terraformBin, err = filepath.Abs(terraformBin) - if err != nil { - panic(fmt.Sprintf("failed to find absolute path of terraform executable: %s", err)) - } - return func() {} - } - - tmpFilename := e2e.GoBuild("github.com/hashicorp/terraform", "terraform") - - // Make the executable available for use in tests - terraformBin = tmpFilename - - // Tests running in the ad-hoc testing mode are allowed to use "go build" - // and similar to produce other test executables. - // (See the comment on this variable's declaration for more information.) - canRunGoBuild = true - - return func() { - os.Remove(tmpFilename) - } -} - -func canAccessNetwork() bool { - // We re-use the flag normally used for acceptance tests since that's - // established as a way to opt-in to reaching out to real systems that - // may suffer transient errors. - return os.Getenv("TF_ACC") != "" -} - -func skipIfCannotAccessNetwork(t *testing.T) { - t.Helper() - - if !canAccessNetwork() { - t.Skip("network access not allowed; use TF_ACC=1 to enable") - } -} diff --git a/internal/command/e2etest/provisioner_test.go b/internal/command/e2etest/provisioner_test.go deleted file mode 100644 index 63d53576eb66..000000000000 --- a/internal/command/e2etest/provisioner_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package e2etest - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/e2e" -) - -// TestProviderDevOverrides is a test that terraform can execute a 3rd party -// provisioner plugin. -func TestProvisioner(t *testing.T) { - t.Parallel() - - // This test reaches out to releases.hashicorp.com to download the - // template and null providers, so it can only run if network access is - // allowed. - skipIfCannotAccessNetwork(t) - - tf := e2e.NewBinary(t, terraformBin, "testdata/provisioner") - - //// INIT - _, stderr, err := tf.Run("init") - if err != nil { - t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) - } - - //// PLAN - _, stderr, err = tf.Run("plan", "-out=tfplan") - if err != nil { - t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) - } - - //// APPLY - stdout, stderr, err := tf.Run("apply", "tfplan") - if err != nil { - t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) - } - - if !strings.Contains(stdout, "HelloProvisioner") { - t.Fatalf("missing provisioner output:\n%s", stdout) - } -} diff --git a/internal/command/e2etest/terraform_test.go b/internal/command/e2etest/terraform_test.go deleted file mode 100644 index a6e706c3bb01..000000000000 --- a/internal/command/e2etest/terraform_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package e2etest - -import ( - "path/filepath" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/e2e" -) - -func TestTerraformProviderData(t *testing.T) { - - fixturePath := filepath.Join("testdata", "terraform-managed-data") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - _, stderr, err := tf.Run("init", "-input=false") - if err != nil { - t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) - } - - stdout, stderr, err := tf.Run("plan", "-out=tfplan", "-input=false") - if err != nil { - t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) - } - - if !strings.Contains(stdout, "4 to add, 0 to change, 0 to destroy") { - t.Errorf("incorrect plan tally; want 4 to add:\n%s", stdout) - } - - stdout, stderr, err = tf.Run("apply", "-input=false", "tfplan") - if err != nil { - t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) - } - - if !strings.Contains(stdout, "Resources: 4 added, 0 changed, 0 destroyed") { - t.Errorf("incorrect apply tally; want 4 added:\n%s", stdout) - } - - state, err := tf.LocalState() - if err != nil { - t.Fatalf("failed to read state file: %s", err) - } - - // we'll check the final output to validate the resources - d := state.Module(addrs.RootModuleInstance).OutputValues["d"].Value - input := d.GetAttr("input") - output := d.GetAttr("output") - if input.IsNull() { - t.Fatal("missing input from resource d") - } - if !input.RawEquals(output) { - t.Fatalf("input %#v does not equal output %#v\n", input, output) - } -} diff --git a/internal/command/e2etest/version_test.go b/internal/command/e2etest/version_test.go deleted file mode 100644 index 90c716aff4ce..000000000000 --- a/internal/command/e2etest/version_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package e2etest - -import ( - "fmt" - "path/filepath" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/e2e" - "github.com/hashicorp/terraform/version" -) - -func TestVersion(t *testing.T) { - // Along with testing the "version" command in particular, this serves - // as a good smoke test for whether the Terraform binary can even be - // compiled and run, since it doesn't require any external network access - // to do its job. - - t.Parallel() - - fixturePath := filepath.Join("testdata", "empty") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - stdout, stderr, err := tf.Run("version") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - wantVersion := fmt.Sprintf("Terraform v%s", version.String()) - if !strings.Contains(stdout, wantVersion) { - t.Errorf("output does not contain our current version %q:\n%s", wantVersion, stdout) - } -} - -func TestVersionWithProvider(t *testing.T) { - // This is a more elaborate use of "version" that shows the selected - // versions of plugins too. - t.Parallel() - - // This test reaches out to releases.hashicorp.com to download the - // template and null providers, so it can only run if network access is - // allowed. - skipIfCannotAccessNetwork(t) - - fixturePath := filepath.Join("testdata", "template-provider") - tf := e2e.NewBinary(t, terraformBin, fixturePath) - - // Initial run (before "init") should work without error but will not - // include the provider version, since we've not "locked" one yet. - { - stdout, stderr, err := tf.Run("version") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - wantVersion := fmt.Sprintf("Terraform v%s", version.String()) - if !strings.Contains(stdout, wantVersion) { - t.Errorf("output does not contain our current version %q:\n%s", wantVersion, stdout) - } - } - - { - _, _, err := tf.Run("init") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - } - - // After running init, we additionally include information about the - // selected version of the "template" provider. - { - stdout, stderr, err := tf.Run("version") - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - if stderr != "" { - t.Errorf("unexpected stderr output:\n%s", stderr) - } - - wantMsg := "+ provider registry.terraform.io/hashicorp/template v" // we don't know which version we'll get here - if !strings.Contains(stdout, wantMsg) { - t.Errorf("output does not contain provider information %q:\n%s", wantMsg, stdout) - } - } -} diff --git a/internal/command/format/diagnostic.go b/internal/command/format/diagnostic.go deleted file mode 100644 index 780592a08c51..000000000000 --- a/internal/command/format/diagnostic.go +++ /dev/null @@ -1,319 +0,0 @@ -package format - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - - viewsjson "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/tfdiags" - - "github.com/mitchellh/colorstring" - wordwrap "github.com/mitchellh/go-wordwrap" -) - -var disabledColorize = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, -} - -// Diagnostic formats a single diagnostic message. -// -// The width argument specifies at what column the diagnostic messages will -// be wrapped. If set to zero, messages will not be wrapped by this function -// at all. Although the long-form text parts of the message are wrapped, -// not all aspects of the message are guaranteed to fit within the specified -// terminal width. -func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { - return DiagnosticFromJSON(viewsjson.NewDiagnostic(diag, sources), color, width) -} - -func DiagnosticFromJSON(diag *viewsjson.Diagnostic, color *colorstring.Colorize, width int) string { - if diag == nil { - // No good reason to pass a nil diagnostic in here... - return "" - } - - var buf bytes.Buffer - - // these leftRule* variables are markers for the beginning of the lines - // containing the diagnostic that are intended to help sighted users - // better understand the information hierarchy when diagnostics appear - // alongside other information or alongside other diagnostics. - // - // Without this, it seems (based on folks sharing incomplete messages when - // asking questions, or including extra content that's not part of the - // diagnostic) that some readers have trouble easily identifying which - // text belongs to the diagnostic and which does not. - var leftRuleLine, leftRuleStart, leftRuleEnd string - var leftRuleWidth int // in visual character cells - - switch diag.Severity { - case viewsjson.DiagnosticSeverityError: - buf.WriteString(color.Color("[bold][red]Error: [reset]")) - leftRuleLine = color.Color("[red]│[reset] ") - leftRuleStart = color.Color("[red]╷[reset]") - leftRuleEnd = color.Color("[red]╵[reset]") - leftRuleWidth = 2 - case viewsjson.DiagnosticSeverityWarning: - buf.WriteString(color.Color("[bold][yellow]Warning: [reset]")) - leftRuleLine = color.Color("[yellow]│[reset] ") - leftRuleStart = color.Color("[yellow]╷[reset]") - leftRuleEnd = color.Color("[yellow]╵[reset]") - leftRuleWidth = 2 - default: - // Clear out any coloring that might be applied by Terraform's UI helper, - // so our result is not context-sensitive. - buf.WriteString(color.Color("\n[reset]")) - } - - // We don't wrap the summary, since we expect it to be terse, and since - // this is where we put the text of a native Go error it may not always - // be pure text that lends itself well to word-wrapping. - fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), diag.Summary) - - appendSourceSnippets(&buf, diag, color) - - if diag.Detail != "" { - paraWidth := width - leftRuleWidth - 1 // leave room for the left rule - if paraWidth > 0 { - lines := strings.Split(diag.Detail, "\n") - for _, line := range lines { - if !strings.HasPrefix(line, " ") { - line = wordwrap.WrapString(line, uint(paraWidth)) - } - fmt.Fprintf(&buf, "%s\n", line) - } - } else { - fmt.Fprintf(&buf, "%s\n", diag.Detail) - } - } - - // Before we return, we'll finally add the left rule prefixes to each - // line so that the overall message is visually delimited from what's - // around it. We'll do that by scanning over what we already generated - // and adding the prefix for each line. - var ruleBuf strings.Builder - sc := bufio.NewScanner(&buf) - ruleBuf.WriteString(leftRuleStart) - ruleBuf.WriteByte('\n') - for sc.Scan() { - line := sc.Text() - prefix := leftRuleLine - if line == "" { - // Don't print the space after the line if there would be nothing - // after it anyway. - prefix = strings.TrimSpace(prefix) - } - ruleBuf.WriteString(prefix) - ruleBuf.WriteString(line) - ruleBuf.WriteByte('\n') - } - ruleBuf.WriteString(leftRuleEnd) - ruleBuf.WriteByte('\n') - - return ruleBuf.String() -} - -// DiagnosticPlain is an alternative to Diagnostic which minimises the use of -// virtual terminal formatting sequences. -// -// It is intended for use in automation and other contexts in which diagnostic -// messages are parsed from the Terraform output. -func DiagnosticPlain(diag tfdiags.Diagnostic, sources map[string][]byte, width int) string { - return DiagnosticPlainFromJSON(viewsjson.NewDiagnostic(diag, sources), width) -} - -func DiagnosticPlainFromJSON(diag *viewsjson.Diagnostic, width int) string { - if diag == nil { - // No good reason to pass a nil diagnostic in here... - return "" - } - - var buf bytes.Buffer - - switch diag.Severity { - case viewsjson.DiagnosticSeverityError: - buf.WriteString("\nError: ") - case viewsjson.DiagnosticSeverityWarning: - buf.WriteString("\nWarning: ") - default: - buf.WriteString("\n") - } - - // We don't wrap the summary, since we expect it to be terse, and since - // this is where we put the text of a native Go error it may not always - // be pure text that lends itself well to word-wrapping. - fmt.Fprintf(&buf, "%s\n\n", diag.Summary) - - appendSourceSnippets(&buf, diag, disabledColorize) - - if diag.Detail != "" { - if width > 1 { - lines := strings.Split(diag.Detail, "\n") - for _, line := range lines { - if !strings.HasPrefix(line, " ") { - line = wordwrap.WrapString(line, uint(width-1)) - } - fmt.Fprintf(&buf, "%s\n", line) - } - } else { - fmt.Fprintf(&buf, "%s\n", diag.Detail) - } - } - - return buf.String() -} - -// DiagnosticWarningsCompact is an alternative to Diagnostic for when all of -// the given diagnostics are warnings and we want to show them compactly, -// with only two lines per warning and excluding all of the detail information. -// -// The caller may optionally pre-process the given diagnostics with -// ConsolidateWarnings, in which case this function will recognize consolidated -// messages and include an indication that they are consolidated. -// -// Do not pass non-warning diagnostics to this function, or the result will -// be nonsense. -func DiagnosticWarningsCompact(diags tfdiags.Diagnostics, color *colorstring.Colorize) string { - var b strings.Builder - b.WriteString(color.Color("[bold][yellow]Warnings:[reset]\n\n")) - for _, diag := range diags { - sources := tfdiags.WarningGroupSourceRanges(diag) - b.WriteString(fmt.Sprintf("- %s\n", diag.Description().Summary)) - if len(sources) > 0 { - mainSource := sources[0] - if mainSource.Subject != nil { - if len(sources) > 1 { - b.WriteString(fmt.Sprintf( - " on %s line %d (and %d more)\n", - mainSource.Subject.Filename, - mainSource.Subject.Start.Line, - len(sources)-1, - )) - } else { - b.WriteString(fmt.Sprintf( - " on %s line %d\n", - mainSource.Subject.Filename, - mainSource.Subject.Start.Line, - )) - } - } else if len(sources) > 1 { - b.WriteString(fmt.Sprintf( - " (%d occurences of this warning)\n", - len(sources), - )) - } - } - } - - return b.String() -} - -func appendSourceSnippets(buf *bytes.Buffer, diag *viewsjson.Diagnostic, color *colorstring.Colorize) { - if diag.Address != "" { - fmt.Fprintf(buf, " with %s,\n", diag.Address) - } - - if diag.Range == nil { - return - } - - if diag.Snippet == nil { - // This should generally not happen, as long as sources are always - // loaded through the main loader. We may load things in other - // ways in weird cases, so we'll tolerate it at the expense of - // a not-so-helpful error message. - fmt.Fprintf(buf, " on %s line %d:\n (source code not available)\n", diag.Range.Filename, diag.Range.Start.Line) - } else { - snippet := diag.Snippet - code := snippet.Code - - var contextStr string - if snippet.Context != nil { - contextStr = fmt.Sprintf(", in %s", *snippet.Context) - } - fmt.Fprintf(buf, " on %s line %d%s:\n", diag.Range.Filename, diag.Range.Start.Line, contextStr) - - // Split the snippet and render the highlighted section with underlines - start := snippet.HighlightStartOffset - end := snippet.HighlightEndOffset - - // Only buggy diagnostics can have an end range before the start, but - // we need to ensure we don't crash here if that happens. - if end < start { - end = start + 1 - if end > len(code) { - end = len(code) - } - } - - // If either start or end is out of range for the code buffer then - // we'll cap them at the bounds just to avoid a panic, although - // this would happen only if there's a bug in the code generating - // the snippet objects. - if start < 0 { - start = 0 - } else if start > len(code) { - start = len(code) - } - if end < 0 { - end = 0 - } else if end > len(code) { - end = len(code) - } - - before, highlight, after := code[0:start], code[start:end], code[end:] - code = fmt.Sprintf(color.Color("%s[underline]%s[reset]%s"), before, highlight, after) - - // Split the snippet into lines and render one at a time - lines := strings.Split(code, "\n") - for i, line := range lines { - fmt.Fprintf( - buf, "%4d: %s\n", - snippet.StartLine+i, - line, - ) - } - - if len(snippet.Values) > 0 || (snippet.FunctionCall != nil && snippet.FunctionCall.Signature != nil) { - // The diagnostic may also have information about the dynamic - // values of relevant variables at the point of evaluation. - // This is particularly useful for expressions that get evaluated - // multiple times with different values, such as blocks using - // "count" and "for_each", or within "for" expressions. - values := make([]viewsjson.DiagnosticExpressionValue, len(snippet.Values)) - copy(values, snippet.Values) - sort.Slice(values, func(i, j int) bool { - return values[i].Traversal < values[j].Traversal - }) - - fmt.Fprint(buf, color.Color(" [dark_gray]├────────────────[reset]\n")) - if callInfo := snippet.FunctionCall; callInfo != nil && callInfo.Signature != nil { - - fmt.Fprintf(buf, color.Color(" [dark_gray]│[reset] while calling [bold]%s[reset]("), callInfo.CalledAs) - for i, param := range callInfo.Signature.Params { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(param.Name) - } - if param := callInfo.Signature.VariadicParam; param != nil { - if len(callInfo.Signature.Params) > 0 { - buf.WriteString(", ") - } - buf.WriteString(param.Name) - buf.WriteString("...") - } - buf.WriteString(")\n") - } - for _, value := range values { - fmt.Fprintf(buf, color.Color(" [dark_gray]│[reset] [bold]%s[reset] %s\n"), value.Traversal, value.Statement) - } - } - } - - buf.WriteByte('\n') -} diff --git a/internal/command/format/diagnostic_test.go b/internal/command/format/diagnostic_test.go deleted file mode 100644 index 95f2ed6aa1ba..000000000000 --- a/internal/command/format/diagnostic_test.go +++ /dev/null @@ -1,945 +0,0 @@ -package format - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/hcl/v2/hcltest" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - - viewsjson "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/lang/marks" - - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestDiagnostic(t *testing.T) { - - tests := map[string]struct { - Diag interface{} - Want string - }{ - "sourceless error": { - tfdiags.Sourceless( - tfdiags.Error, - "A sourceless error", - "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", - ), - `[red]╷[reset] -[red]│[reset] [bold][red]Error: [reset][bold]A sourceless error[reset] -[red]│[reset] -[red]│[reset] It has no source references but it -[red]│[reset] does have a pretty long detail that -[red]│[reset] should wrap over multiple lines. -[red]╵[reset] -`, - }, - "sourceless warning": { - tfdiags.Sourceless( - tfdiags.Warning, - "A sourceless warning", - "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", - ), - `[yellow]╷[reset] -[yellow]│[reset] [bold][yellow]Warning: [reset][bold]A sourceless warning[reset] -[yellow]│[reset] -[yellow]│[reset] It has no source references but it -[yellow]│[reset] does have a pretty long detail that -[yellow]│[reset] should wrap over multiple lines. -[yellow]╵[reset] -`, - }, - "error with source code subject": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - }, - `[red]╷[reset] -[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] -[red]│[reset] -[red]│[reset] on test.tf line 1: -[red]│[reset] 1: test [underline]source[reset] code -[red]│[reset] -[red]│[reset] Whatever shall we do? -[red]╵[reset] -`, - }, - "error with source code subject and known expression": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.StringVal("blah"), - }), - }, - }, - }, - `[red]╷[reset] -[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] -[red]│[reset] -[red]│[reset] on test.tf line 1: -[red]│[reset] 1: test [underline]source[reset] code -[red]│[reset] [dark_gray]├────────────────[reset] -[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] is "blah" -[red]│[reset] -[red]│[reset] Whatever shall we do? -[red]╵[reset] -`, - }, - "error with source code subject and expression referring to sensitive value": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.StringVal("blah").Mark(marks.Sensitive), - }), - }, - }, - Extra: diagnosticCausedBySensitive(true), - }, - `[red]╷[reset] -[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] -[red]│[reset] -[red]│[reset] on test.tf line 1: -[red]│[reset] 1: test [underline]source[reset] code -[red]│[reset] [dark_gray]├────────────────[reset] -[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] has a sensitive value -[red]│[reset] -[red]│[reset] Whatever shall we do? -[red]╵[reset] -`, - }, - "error with source code subject and unknown string expression": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.UnknownVal(cty.String), - }), - }, - }, - Extra: diagnosticCausedByUnknown(true), - }, - `[red]╷[reset] -[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] -[red]│[reset] -[red]│[reset] on test.tf line 1: -[red]│[reset] 1: test [underline]source[reset] code -[red]│[reset] [dark_gray]├────────────────[reset] -[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] is a string, known only after apply -[red]│[reset] -[red]│[reset] Whatever shall we do? -[red]╵[reset] -`, - }, - "error with source code subject and unknown expression of unknown type": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.UnknownVal(cty.DynamicPseudoType), - }), - }, - }, - Extra: diagnosticCausedByUnknown(true), - }, - `[red]╷[reset] -[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] -[red]│[reset] -[red]│[reset] on test.tf line 1: -[red]│[reset] 1: test [underline]source[reset] code -[red]│[reset] [dark_gray]├────────────────[reset] -[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] will be known only after apply -[red]│[reset] -[red]│[reset] Whatever shall we do? -[red]╵[reset] -`, - }, - "error with source code subject and function call annotation": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprLiteral(cty.True), - EvalContext: &hcl.EvalContext{ - Functions: map[string]function.Function{ - "beep": function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "pos_param_0", - Type: cty.String, - }, - { - Name: "pos_param_1", - Type: cty.Number, - }, - }, - VarParam: &function.Parameter{ - Name: "var_param", - Type: cty.Bool, - }, - }), - }, - }, - // This is simulating what the HCL function call expression - // type would generate on evaluation, by implementing the - // same interface it uses. - Extra: fakeDiagFunctionCallExtra("beep"), - }, - `[red]╷[reset] -[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] -[red]│[reset] -[red]│[reset] on test.tf line 1: -[red]│[reset] 1: test [underline]source[reset] code -[red]│[reset] [dark_gray]├────────────────[reset] -[red]│[reset] [dark_gray]│[reset] while calling [bold]beep[reset](pos_param_0, pos_param_1, var_param...) -[red]│[reset] -[red]│[reset] Whatever shall we do? -[red]╵[reset] -`, - }, - } - - sources := map[string][]byte{ - "test.tf": []byte(`test source code`), - } - - // This empty Colorize just passes through all of the formatting codes - // untouched, because it doesn't define any formatting keywords. - colorize := &colorstring.Colorize{} - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var diags tfdiags.Diagnostics - diags = diags.Append(test.Diag) // to normalize it into a tfdiag.Diagnostic - diag := diags[0] - got := strings.TrimSpace(Diagnostic(diag, sources, colorize, 40)) - want := strings.TrimSpace(test.Want) - if got != want { - t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) - } - }) - } -} - -func TestDiagnosticPlain(t *testing.T) { - - tests := map[string]struct { - Diag interface{} - Want string - }{ - "sourceless error": { - tfdiags.Sourceless( - tfdiags.Error, - "A sourceless error", - "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", - ), - ` -Error: A sourceless error - -It has no source references but it does -have a pretty long detail that should -wrap over multiple lines. -`, - }, - "sourceless warning": { - tfdiags.Sourceless( - tfdiags.Warning, - "A sourceless warning", - "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", - ), - ` -Warning: A sourceless warning - -It has no source references but it does -have a pretty long detail that should -wrap over multiple lines. -`, - }, - "error with source code subject": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - }, - ` -Error: Bad bad bad - - on test.tf line 1: - 1: test source code - -Whatever shall we do? -`, - }, - "error with source code subject and known expression": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.StringVal("blah"), - }), - }, - }, - }, - ` -Error: Bad bad bad - - on test.tf line 1: - 1: test source code - ├──────────────── - │ boop.beep is "blah" - -Whatever shall we do? -`, - }, - "error with source code subject and expression referring to sensitive value": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.StringVal("blah").Mark(marks.Sensitive), - }), - }, - }, - Extra: diagnosticCausedBySensitive(true), - }, - ` -Error: Bad bad bad - - on test.tf line 1: - 1: test source code - ├──────────────── - │ boop.beep has a sensitive value - -Whatever shall we do? -`, - }, - "error with source code subject and expression referring to sensitive value when not related to sensitivity": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.StringVal("blah").Mark(marks.Sensitive), - }), - }, - }, - }, - ` -Error: Bad bad bad - - on test.tf line 1: - 1: test source code - -Whatever shall we do? -`, - }, - "error with source code subject and unknown string expression": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.UnknownVal(cty.String), - }), - }, - }, - Extra: diagnosticCausedByUnknown(true), - }, - ` -Error: Bad bad bad - - on test.tf line 1: - 1: test source code - ├──────────────── - │ boop.beep is a string, known only after apply - -Whatever shall we do? -`, - }, - "error with source code subject and unknown string expression when problem isn't unknown-related": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.UnknownVal(cty.String), - }), - }, - }, - }, - ` -Error: Bad bad bad - - on test.tf line 1: - 1: test source code - ├──────────────── - │ boop.beep is a string - -Whatever shall we do? -`, - }, - "error with source code subject and unknown expression of unknown type": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.UnknownVal(cty.DynamicPseudoType), - }), - }, - }, - Extra: diagnosticCausedByUnknown(true), - }, - ` -Error: Bad bad bad - - on test.tf line 1: - 1: test source code - ├──────────────── - │ boop.beep will be known only after apply - -Whatever shall we do? -`, - }, - "error with source code subject and unknown expression of unknown type when problem isn't unknown-related": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad bad bad", - Detail: "Whatever shall we do?", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "boop"}, - hcl.TraverseAttr{Name: "beep"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "beep": cty.UnknownVal(cty.DynamicPseudoType), - }), - }, - }, - }, - ` -Error: Bad bad bad - - on test.tf line 1: - 1: test source code - -Whatever shall we do? -`, - }, - } - - sources := map[string][]byte{ - "test.tf": []byte(`test source code`), - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var diags tfdiags.Diagnostics - diags = diags.Append(test.Diag) // to normalize it into a tfdiag.Diagnostic - diag := diags[0] - got := strings.TrimSpace(DiagnosticPlain(diag, sources, 40)) - want := strings.TrimSpace(test.Want) - if got != want { - t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) - } - }) - } -} - -func TestDiagnosticWarningsCompact(t *testing.T) { - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.SimpleWarning("foo")) - diags = diags.Append(tfdiags.SimpleWarning("foo")) - diags = diags.Append(tfdiags.SimpleWarning("bar")) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "source foo", - Detail: "...", - Subject: &hcl.Range{ - Filename: "source.tf", - Start: hcl.Pos{Line: 2, Column: 1, Byte: 5}, - End: hcl.Pos{Line: 2, Column: 1, Byte: 5}, - }, - }) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "source foo", - Detail: "...", - Subject: &hcl.Range{ - Filename: "source.tf", - Start: hcl.Pos{Line: 3, Column: 1, Byte: 7}, - End: hcl.Pos{Line: 3, Column: 1, Byte: 7}, - }, - }) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "source bar", - Detail: "...", - Subject: &hcl.Range{ - Filename: "source2.tf", - Start: hcl.Pos{Line: 1, Column: 1, Byte: 1}, - End: hcl.Pos{Line: 1, Column: 1, Byte: 1}, - }, - }) - - // ConsolidateWarnings groups together the ones - // that have source location information and that - // have the same summary text. - diags = diags.ConsolidateWarnings(1) - - // A zero-value Colorize just passes all the formatting - // codes back to us, so we can test them literally. - got := DiagnosticWarningsCompact(diags, &colorstring.Colorize{}) - want := `[bold][yellow]Warnings:[reset] - -- foo -- foo -- bar -- source foo - on source.tf line 2 (and 1 more) -- source bar - on source2.tf line 1 -` - if got != want { - t.Errorf( - "wrong result\ngot:\n%s\n\nwant:\n%s\n\ndiff:\n%s", - got, want, cmp.Diff(want, got), - ) - } -} - -// Test case via https://github.com/hashicorp/terraform/issues/21359 -func TestDiagnostic_nonOverlappingHighlightContext(t *testing.T) { - var diags tfdiags.Diagnostics - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Some error", - Detail: "...", - Subject: &hcl.Range{ - Filename: "source.tf", - Start: hcl.Pos{Line: 1, Column: 5, Byte: 5}, - End: hcl.Pos{Line: 1, Column: 5, Byte: 5}, - }, - Context: &hcl.Range{ - Filename: "source.tf", - Start: hcl.Pos{Line: 1, Column: 5, Byte: 5}, - End: hcl.Pos{Line: 4, Column: 2, Byte: 60}, - }, - }) - sources := map[string][]byte{ - "source.tf": []byte(`x = somefunc("testing", { - alpha = "foo" - beta = "bar" -}) -`), - } - color := &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Reset: true, - Disable: true, - } - expected := `╷ -│ Error: Some error -│ -│ on source.tf line 1: -│ 1: x = somefunc("testing", { -│ 2: alpha = "foo" -│ 3: beta = "bar" -│ 4: }) -│ -│ ... -╵ -` - output := Diagnostic(diags[0], sources, color, 80) - - if output != expected { - t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) - } -} - -func TestDiagnostic_emptyOverlapHighlightContext(t *testing.T) { - var diags tfdiags.Diagnostics - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Some error", - Detail: "...", - Subject: &hcl.Range{ - Filename: "source.tf", - Start: hcl.Pos{Line: 3, Column: 10, Byte: 38}, - End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, - }, - Context: &hcl.Range{ - Filename: "source.tf", - Start: hcl.Pos{Line: 2, Column: 13, Byte: 27}, - End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, - }, - }) - sources := map[string][]byte{ - "source.tf": []byte(`variable "x" { - default = { - "foo" - } -`), - } - color := &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Reset: true, - Disable: true, - } - expected := `╷ -│ Error: Some error -│ -│ on source.tf line 3, in variable "x": -│ 2: default = { -│ 3: "foo" -│ 4: } -│ -│ ... -╵ -` - output := Diagnostic(diags[0], sources, color, 80) - - if output != expected { - t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) - } -} - -func TestDiagnosticPlain_emptyOverlapHighlightContext(t *testing.T) { - var diags tfdiags.Diagnostics - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Some error", - Detail: "...", - Subject: &hcl.Range{ - Filename: "source.tf", - Start: hcl.Pos{Line: 3, Column: 10, Byte: 38}, - End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, - }, - Context: &hcl.Range{ - Filename: "source.tf", - Start: hcl.Pos{Line: 2, Column: 13, Byte: 27}, - End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, - }, - }) - sources := map[string][]byte{ - "source.tf": []byte(`variable "x" { - default = { - "foo" - } -`), - } - - expected := ` -Error: Some error - - on source.tf line 3, in variable "x": - 2: default = { - 3: "foo" - 4: } - -... -` - output := DiagnosticPlain(diags[0], sources, 80) - - if output != expected { - t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) - } -} - -func TestDiagnostic_wrapDetailIncludingCommand(t *testing.T) { - var diags tfdiags.Diagnostics - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Everything went wrong", - Detail: "This is a very long sentence about whatever went wrong which is supposed to wrap onto multiple lines. Thank-you very much for listening.\n\nTo fix this, run this very long command:\n terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces\n\nHere is a coda which is also long enough to wrap and so it should eventually make it onto multiple lines. THE END", - }) - color := &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Reset: true, - Disable: true, - } - expected := `╷ -│ Error: Everything went wrong -│ -│ This is a very long sentence about whatever went wrong which is supposed -│ to wrap onto multiple lines. Thank-you very much for listening. -│ -│ To fix this, run this very long command: -│ terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces -│ -│ Here is a coda which is also long enough to wrap and so it should -│ eventually make it onto multiple lines. THE END -╵ -` - output := Diagnostic(diags[0], nil, color, 76) - - if output != expected { - t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) - } -} - -func TestDiagnosticPlain_wrapDetailIncludingCommand(t *testing.T) { - var diags tfdiags.Diagnostics - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Everything went wrong", - Detail: "This is a very long sentence about whatever went wrong which is supposed to wrap onto multiple lines. Thank-you very much for listening.\n\nTo fix this, run this very long command:\n terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces\n\nHere is a coda which is also long enough to wrap and so it should eventually make it onto multiple lines. THE END", - }) - - expected := ` -Error: Everything went wrong - -This is a very long sentence about whatever went wrong which is supposed to -wrap onto multiple lines. Thank-you very much for listening. - -To fix this, run this very long command: - terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces - -Here is a coda which is also long enough to wrap and so it should -eventually make it onto multiple lines. THE END -` - output := DiagnosticPlain(diags[0], nil, 76) - - if output != expected { - t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) - } -} - -// Test cases covering invalid JSON diagnostics which should still render -// correctly. These JSON diagnostic values cannot be generated from the -// json.NewDiagnostic code path, but we may read and display JSON diagnostics -// in future from other sources. -func TestDiagnosticFromJSON_invalid(t *testing.T) { - tests := map[string]struct { - Diag *viewsjson.Diagnostic - Want string - }{ - "zero-value end range and highlight end byte": { - &viewsjson.Diagnostic{ - Severity: viewsjson.DiagnosticSeverityError, - Summary: "Bad end", - Detail: "It all went wrong.", - Range: &viewsjson.DiagnosticRange{ - Filename: "ohno.tf", - Start: viewsjson.Pos{Line: 1, Column: 23, Byte: 22}, - End: viewsjson.Pos{Line: 0, Column: 0, Byte: 0}, - }, - Snippet: &viewsjson.DiagnosticSnippet{ - Code: `resource "foo_bar "baz" {`, - StartLine: 1, - HighlightStartOffset: 22, - HighlightEndOffset: 0, - }, - }, - `[red]╷[reset] -[red]│[reset] [bold][red]Error: [reset][bold]Bad end[reset] -[red]│[reset] -[red]│[reset] on ohno.tf line 1: -[red]│[reset] 1: resource "foo_bar "baz[underline]"[reset] { -[red]│[reset] -[red]│[reset] It all went wrong. -[red]╵[reset] -`, - }, - } - - // This empty Colorize just passes through all of the formatting codes - // untouched, because it doesn't define any formatting keywords. - colorize := &colorstring.Colorize{} - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - got := strings.TrimSpace(DiagnosticFromJSON(test.Diag, colorize, 40)) - want := strings.TrimSpace(test.Want) - if got != want { - t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) - } - }) - } -} - -// fakeDiagFunctionCallExtra is a fake implementation of the interface that -// HCL uses to provide "extra information" associated with diagnostics that -// describe errors during a function call. -type fakeDiagFunctionCallExtra string - -var _ hclsyntax.FunctionCallDiagExtra = fakeDiagFunctionCallExtra("") - -func (e fakeDiagFunctionCallExtra) CalledFunctionName() string { - return string(e) -} - -func (e fakeDiagFunctionCallExtra) FunctionCallError() error { - return nil -} - -// diagnosticCausedByUnknown is a testing helper for exercising our logic -// for selectively showing unknown values alongside our source snippets for -// diagnostics that are explicitly marked as being caused by unknown values. -type diagnosticCausedByUnknown bool - -var _ tfdiags.DiagnosticExtraBecauseUnknown = diagnosticCausedByUnknown(true) - -func (e diagnosticCausedByUnknown) DiagnosticCausedByUnknown() bool { - return bool(e) -} - -// diagnosticCausedBySensitive is a testing helper for exercising our logic -// for selectively showing sensitive values alongside our source snippets for -// diagnostics that are explicitly marked as being caused by sensitive values. -type diagnosticCausedBySensitive bool - -var _ tfdiags.DiagnosticExtraBecauseSensitive = diagnosticCausedBySensitive(true) - -func (e diagnosticCausedBySensitive) DiagnosticCausedBySensitive() bool { - return bool(e) -} diff --git a/internal/command/format/diff.go b/internal/command/format/diff.go deleted file mode 100644 index ed30e3214aff..000000000000 --- a/internal/command/format/diff.go +++ /dev/null @@ -1,2061 +0,0 @@ -package format - -import ( - "bufio" - "bytes" - "fmt" - "log" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/objchange" - "github.com/hashicorp/terraform/internal/states" -) - -// DiffLanguage controls the description of the resource change reasons. -type DiffLanguage rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type=DiffLanguage diff.go - -const ( - // DiffLanguageProposedChange indicates that the change is one which is - // planned to be applied. - DiffLanguageProposedChange DiffLanguage = 'P' - - // DiffLanguageDetectedDrift indicates that the change is detected drift - // from the configuration. - DiffLanguageDetectedDrift DiffLanguage = 'D' -) - -// ResourceChange returns a string representation of a change to a particular -// resource, for inclusion in user-facing plan output. -// -// The resource schema must be provided along with the change so that the -// formatted change can reflect the configuration structure for the associated -// resource. -// -// If "color" is non-nil, it will be used to color the result. Otherwise, -// no color codes will be included. -func ResourceChange( - change *plans.ResourceInstanceChange, - schema *configschema.Block, - color *colorstring.Colorize, - language DiffLanguage, -) string { - addr := change.Addr - var buf bytes.Buffer - - if color == nil { - color = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - Reset: false, - } - } - - dispAddr := addr.String() - if change.DeposedKey != states.NotDeposed { - dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) - } - - switch change.Action { - case plans.Create: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be created"), dispAddr)) - case plans.Read: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be read during apply"), dispAddr)) - switch change.ActionReason { - case plans.ResourceInstanceReadBecauseConfigUnknown: - buf.WriteString("\n # (config refers to values not yet known)") - case plans.ResourceInstanceReadBecauseDependencyPending: - buf.WriteString("\n # (depends on a resource or a module with changes pending)") - } - case plans.Update: - switch language { - case DiffLanguageProposedChange: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be updated in-place"), dispAddr)) - case DiffLanguageDetectedDrift: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] has changed"), dispAddr)) - default: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] update (unknown reason %s)"), dispAddr, language)) - } - case plans.CreateThenDelete, plans.DeleteThenCreate: - switch change.ActionReason { - case plans.ResourceInstanceReplaceBecauseTainted: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] is tainted, so must be [bold][red]replaced"), dispAddr)) - case plans.ResourceInstanceReplaceByRequest: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be [bold][red]replaced[reset], as requested"), dispAddr)) - case plans.ResourceInstanceReplaceByTriggers: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be [bold][red]replaced[reset] due to changes in replace_triggered_by"), dispAddr)) - default: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] must be [bold][red]replaced"), dispAddr)) - } - case plans.Delete: - switch language { - case DiffLanguageProposedChange: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] will be [bold][red]destroyed"), dispAddr)) - case DiffLanguageDetectedDrift: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] has been deleted"), dispAddr)) - default: - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] delete (unknown reason %s)"), dispAddr, language)) - } - // We can sometimes give some additional detail about why we're - // proposing to delete. We show this as additional notes, rather than - // as additional wording in the main action statement, in an attempt - // to make the "will be destroyed" message prominent and consistent - // in all cases, for easier scanning of this often-risky action. - switch change.ActionReason { - case plans.ResourceInstanceDeleteBecauseNoResourceConfig: - buf.WriteString(fmt.Sprintf("\n # (because %s is not in configuration)", addr.Resource.Resource)) - case plans.ResourceInstanceDeleteBecauseNoMoveTarget: - buf.WriteString(fmt.Sprintf("\n # (because %s was moved to %s, which is not in configuration)", change.PrevRunAddr, addr.Resource.Resource)) - case plans.ResourceInstanceDeleteBecauseNoModule: - // FIXME: Ideally we'd truncate addr.Module to reflect the earliest - // step that doesn't exist, so it's clearer which call this refers - // to, but we don't have enough information out here in the UI layer - // to decide that; only the "expander" in Terraform Core knows - // which module instance keys are actually declared. - buf.WriteString(fmt.Sprintf("\n # (because %s is not in configuration)", addr.Module)) - case plans.ResourceInstanceDeleteBecauseWrongRepetition: - // We have some different variations of this one - switch addr.Resource.Key.(type) { - case nil: - buf.WriteString("\n # (because resource uses count or for_each)") - case addrs.IntKey: - buf.WriteString("\n # (because resource does not use count)") - case addrs.StringKey: - buf.WriteString("\n # (because resource does not use for_each)") - } - case plans.ResourceInstanceDeleteBecauseCountIndex: - buf.WriteString(fmt.Sprintf("\n # (because index %s is out of range for count)", addr.Resource.Key)) - case plans.ResourceInstanceDeleteBecauseEachKey: - buf.WriteString(fmt.Sprintf("\n # (because key %s is not in for_each map)", addr.Resource.Key)) - } - if change.DeposedKey != states.NotDeposed { - // Some extra context about this unusual situation. - buf.WriteString(color.Color("\n # (left over from a partially-failed replacement of this instance)")) - } - case plans.NoOp: - if change.Moved() { - buf.WriteString(fmt.Sprintf(color.Color("[bold] # %s[reset] has moved to [bold]%s[reset]"), change.PrevRunAddr.String(), dispAddr)) - break - } - fallthrough - default: - // should never happen, since the above is exhaustive - buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) - } - buf.WriteString(color.Color("[reset]\n")) - - if change.Moved() && change.Action != plans.NoOp { - buf.WriteString(fmt.Sprintf(color.Color(" # [reset](moved from %s)\n"), change.PrevRunAddr.String())) - } - - if change.Moved() && change.Action == plans.NoOp { - buf.WriteString(" ") - } else { - buf.WriteString(color.Color(DiffActionSymbol(change.Action)) + " ") - } - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - buf.WriteString(fmt.Sprintf( - "resource %q %q", - addr.Resource.Resource.Type, - addr.Resource.Resource.Name, - )) - case addrs.DataResourceMode: - buf.WriteString(fmt.Sprintf( - "data %q %q", - addr.Resource.Resource.Type, - addr.Resource.Resource.Name, - )) - default: - // should never happen, since the above is exhaustive - buf.WriteString(addr.String()) - } - - buf.WriteString(" {") - - p := blockBodyDiffPrinter{ - buf: &buf, - color: color, - action: change.Action, - requiredReplace: change.RequiredReplace, - } - - // Most commonly-used resources have nested blocks that result in us - // going at least three traversals deep while we recurse here, so we'll - // start with that much capacity and then grow as needed for deeper - // structures. - path := make(cty.Path, 0, 3) - - result := p.writeBlockBodyDiff(schema, change.Before, change.After, 6, path) - if result.bodyWritten { - buf.WriteString("\n") - buf.WriteString(strings.Repeat(" ", 4)) - } - buf.WriteString("}\n") - - return buf.String() -} - -// OutputChanges returns a string representation of a set of changes to output -// values for inclusion in user-facing plan output. -// -// If "color" is non-nil, it will be used to color the result. Otherwise, -// no color codes will be included. -func OutputChanges( - changes []*plans.OutputChangeSrc, - color *colorstring.Colorize, -) string { - var buf bytes.Buffer - p := blockBodyDiffPrinter{ - buf: &buf, - color: color, - action: plans.Update, // not actually used in this case, because we're not printing a containing block - } - - // We're going to reuse the codepath we used for printing resource block - // diffs, by pretending that the set of defined outputs are the attributes - // of some resource. It's a little forced to do this, but it gives us all - // the same formatting heuristics as we normally use for resource - // attributes. - oldVals := make(map[string]cty.Value, len(changes)) - newVals := make(map[string]cty.Value, len(changes)) - synthSchema := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute, len(changes)), - } - for _, changeSrc := range changes { - name := changeSrc.Addr.OutputValue.Name - change, err := changeSrc.Decode() - if err != nil { - // It'd be weird to get a decoding error here because that would - // suggest that Terraform itself just produced an invalid plan, and - // we don't have any good way to ignore it in this codepath, so - // we'll just log it and ignore it. - log.Printf("[ERROR] format.OutputChanges: Failed to decode planned change for output %q: %s", name, err) - continue - } - synthSchema.Attributes[name] = &configschema.Attribute{ - Type: cty.DynamicPseudoType, // output types are decided dynamically based on the given value - Optional: true, - Sensitive: change.Sensitive, - } - oldVals[name] = change.Before - newVals[name] = change.After - } - - p.writeBlockBodyDiff(synthSchema, cty.ObjectVal(oldVals), cty.ObjectVal(newVals), 2, nil) - - return buf.String() -} - -type blockBodyDiffPrinter struct { - buf *bytes.Buffer - color *colorstring.Colorize - action plans.Action - requiredReplace cty.PathSet - // verbose is set to true when using the "diff" printer to format state - verbose bool -} - -type blockBodyDiffResult struct { - bodyWritten bool - skippedAttributes int - skippedBlocks int -} - -const ( - forcesNewResourceCaption = " [red]# forces replacement[reset]" - sensitiveCaption = "(sensitive value)" -) - -// writeBlockBodyDiff writes attribute or block differences -// and returns true if any differences were found and written -func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) blockBodyDiffResult { - path = ctyEnsurePathCapacity(path, 1) - result := blockBodyDiffResult{} - - // write the attributes diff - blankBeforeBlocks := p.writeAttrsDiff(schema.Attributes, old, new, indent, path, &result) - p.writeSkippedAttr(result.skippedAttributes, indent+2) - - { - blockTypeNames := make([]string, 0, len(schema.BlockTypes)) - for name := range schema.BlockTypes { - blockTypeNames = append(blockTypeNames, name) - } - sort.Strings(blockTypeNames) - - for _, name := range blockTypeNames { - blockS := schema.BlockTypes[name] - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - - result.bodyWritten = true - skippedBlocks := p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) - if skippedBlocks > 0 { - result.skippedBlocks += skippedBlocks - } - - // Always include a blank for any subsequent block types. - blankBeforeBlocks = true - } - if result.skippedBlocks > 0 { - noun := "blocks" - if result.skippedBlocks == 1 { - noun = "block" - } - p.buf.WriteString("\n\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), result.skippedBlocks, noun)) - } - } - - return result -} - -func (p *blockBodyDiffPrinter) writeAttrsDiff( - attrsS map[string]*configschema.Attribute, - old, new cty.Value, - indent int, - path cty.Path, - result *blockBodyDiffResult) bool { - - attrNames := make([]string, 0, len(attrsS)) - displayAttrNames := make(map[string]string, len(attrsS)) - attrNameLen := 0 - for name := range attrsS { - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - if oldVal.IsNull() && newVal.IsNull() { - // Skip attributes where both old and new values are null - // (we do this early here so that we'll do our value alignment - // based on the longest attribute name that has a change, rather - // than the longest attribute name in the full set.) - continue - } - - attrNames = append(attrNames, name) - displayAttrNames[name] = displayAttributeName(name) - if len(displayAttrNames[name]) > attrNameLen { - attrNameLen = len(displayAttrNames[name]) - } - } - sort.Strings(attrNames) - if len(attrNames) == 0 { - return false - } - - for _, name := range attrNames { - attrS := attrsS[name] - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - - result.bodyWritten = true - skipped := p.writeAttrDiff(displayAttrNames[name], attrS, oldVal, newVal, attrNameLen, indent, path) - if skipped { - result.skippedAttributes++ - } - } - - return true -} - -// getPlanActionAndShow returns the action value -// and a boolean for showJustNew. In this function we -// modify the old and new values to remove any possible marks -func getPlanActionAndShow(old cty.Value, new cty.Value) (plans.Action, bool) { - var action plans.Action - showJustNew := false - switch { - case old.IsNull(): - action = plans.Create - showJustNew = true - case new.IsNull(): - action = plans.Delete - case ctyEqualWithUnknown(old, new): - action = plans.NoOp - showJustNew = true - default: - action = plans.Update - } - return action, showJustNew -} - -func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) bool { - path = append(path, cty.GetAttrStep{Name: name}) - action, showJustNew := getPlanActionAndShow(old, new) - - if action == plans.NoOp && !p.verbose && !identifyingAttribute(name, attrS) { - return true - } - - if attrS.NestedType != nil { - p.writeNestedAttrDiff(name, attrS, old, new, nameLen, indent, path, action, showJustNew) - return false - } - - p.buf.WriteString("\n") - - p.writeSensitivityWarning(old, new, indent, action, false) - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.writeActionSymbol(action) - - p.buf.WriteString(p.color.Color("[bold]")) - p.buf.WriteString(name) - p.buf.WriteString(p.color.Color("[reset]")) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) - p.buf.WriteString(" = ") - - if attrS.Sensitive { - p.buf.WriteString(sensitiveCaption) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - } else { - switch { - case showJustNew: - p.writeValue(new, action, indent+2) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - default: - // We show new even if it is null to emphasize the fact - // that it is being unset, since otherwise it is easy to - // misunderstand that the value is still set to the old value. - p.writeValueDiff(old, new, indent+2, path) - } - } - - return false -} - -// writeNestedAttrDiff is responsible for formatting Attributes with NestedTypes -// in the diff. -func (p *blockBodyDiffPrinter) writeNestedAttrDiff( - name string, attrWithNestedS *configschema.Attribute, old, new cty.Value, - nameLen, indent int, path cty.Path, action plans.Action, showJustNew bool) { - - objS := attrWithNestedS.NestedType - - p.buf.WriteString("\n") - p.writeSensitivityWarning(old, new, indent, action, false) - p.buf.WriteString(strings.Repeat(" ", indent)) - p.writeActionSymbol(action) - - p.buf.WriteString(p.color.Color("[bold]")) - p.buf.WriteString(name) - p.buf.WriteString(p.color.Color("[reset]")) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) - - // Then schema of the attribute itself can be marked sensitive, or the values assigned - sensitive := attrWithNestedS.Sensitive || old.HasMark(marks.Sensitive) || new.HasMark(marks.Sensitive) - if sensitive { - p.buf.WriteString(" = ") - p.buf.WriteString(sensitiveCaption) - - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - return - } - - result := &blockBodyDiffResult{} - switch objS.Nesting { - case configschema.NestingSingle: - p.buf.WriteString(" = {") - if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.writeAttrsDiff(objS.Attributes, old, new, indent+4, path, result) - p.writeSkippedAttr(result.skippedAttributes, indent+6) - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.buf.WriteString("}") - - if !new.IsKnown() { - p.buf.WriteString(" -> (known after apply)") - } else if new.IsNull() { - p.buf.WriteString(p.color.Color("[dark_gray] -> null[reset]")) - } - - case configschema.NestingList: - p.buf.WriteString(" = [") - if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - // Here we intentionally preserve the index-based correspondance - // between old and new, rather than trying to detect insertions - // and removals in the list, because this more accurately reflects - // how Terraform Core and providers will understand the change, - // particularly when the nested block contains computed attributes - // that will themselves maintain correspondance by index. - - // commonLen is number of elements that exist in both lists, which - // will be presented as updates (~). Any additional items in one - // of the lists will be presented as either creates (+) or deletes (-) - // depending on which list they belong to. maxLen is the number of - // elements in that longer list. - var commonLen int - var maxLen int - // unchanged is the number of unchanged elements - var unchanged int - - switch { - case len(oldItems) < len(newItems): - commonLen = len(oldItems) - maxLen = len(newItems) - default: - commonLen = len(newItems) - maxLen = len(oldItems) - } - for i := 0; i < maxLen; i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - - var action plans.Action - var oldItem, newItem cty.Value - switch { - case i < commonLen: - oldItem = oldItems[i] - newItem = newItems[i] - if oldItem.RawEquals(newItem) { - action = plans.NoOp - unchanged++ - } else { - action = plans.Update - } - case i < len(oldItems): - oldItem = oldItems[i] - newItem = cty.NullVal(oldItem.Type()) - action = plans.Delete - case i < len(newItems): - newItem = newItems[i] - oldItem = cty.NullVal(newItem.Type()) - action = plans.Create - default: - action = plans.NoOp - } - - if action != plans.NoOp { - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeActionSymbol(action) - p.buf.WriteString("{") - - result := &blockBodyDiffResult{} - p.writeAttrsDiff(objS.Attributes, oldItem, newItem, indent+8, path, result) - if action == plans.Update { - p.writeSkippedAttr(result.skippedAttributes, indent+10) - } - p.buf.WriteString("\n") - - p.buf.WriteString(strings.Repeat(" ", indent+6)) - p.buf.WriteString("},\n") - } - } - p.writeSkippedElems(unchanged, indent+6) - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.buf.WriteString("]") - - if !new.IsKnown() { - p.buf.WriteString(" -> (known after apply)") - } else if new.IsNull() { - p.buf.WriteString(p.color.Color("[dark_gray] -> null[reset]")) - } - - case configschema.NestingSet: - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - var all cty.Value - if len(oldItems)+len(newItems) > 0 { - allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) - allItems = append(allItems, oldItems...) - allItems = append(allItems, newItems...) - - all = cty.SetVal(allItems) - } else { - all = cty.SetValEmpty(old.Type().ElementType()) - } - - p.buf.WriteString(" = [") - - var unchanged int - - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - var action plans.Action - var oldValue, newValue cty.Value - switch { - case !val.IsKnown(): - action = plans.Update - newValue = val - case !new.IsKnown(): - action = plans.Delete - // the value must have come from the old set - oldValue = val - // Mark the new val as null, but the entire set will be - // displayed as "(unknown after apply)" - newValue = cty.NullVal(val.Type()) - case old.IsNull() || !old.HasElement(val).True(): - action = plans.Create - oldValue = cty.NullVal(val.Type()) - newValue = val - case new.IsNull() || !new.HasElement(val).True(): - action = plans.Delete - oldValue = val - newValue = cty.NullVal(val.Type()) - default: - action = plans.NoOp - oldValue = val - newValue = val - } - - if action == plans.NoOp { - unchanged++ - continue - } - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeActionSymbol(action) - p.buf.WriteString("{") - - if p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1]) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - - path := append(path, cty.IndexStep{Key: val}) - p.writeAttrsDiff(objS.Attributes, oldValue, newValue, indent+8, path, result) - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+6)) - p.buf.WriteString("},") - } - p.buf.WriteString("\n") - p.writeSkippedElems(unchanged, indent+6) - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.buf.WriteString("]") - - if !new.IsKnown() { - p.buf.WriteString(" -> (known after apply)") - } else if new.IsNull() { - p.buf.WriteString(p.color.Color("[dark_gray] -> null[reset]")) - } - - case configschema.NestingMap: - // For the sake of handling nested blocks, we'll treat a null map - // the same as an empty map since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockMapAsEmpty(old) - new = ctyNullBlockMapAsEmpty(new) - - oldItems := old.AsValueMap() - - newItems := map[string]cty.Value{} - - if new.IsKnown() { - newItems = new.AsValueMap() - } - - allKeys := make(map[string]bool) - for k := range oldItems { - allKeys[k] = true - } - for k := range newItems { - allKeys[k] = true - } - allKeysOrder := make([]string, 0, len(allKeys)) - for k := range allKeys { - allKeysOrder = append(allKeysOrder, k) - } - sort.Strings(allKeysOrder) - - p.buf.WriteString(" = {\n") - - // unchanged tracks the number of unchanged elements - unchanged := 0 - for _, k := range allKeysOrder { - var action plans.Action - oldValue := oldItems[k] - - newValue := newItems[k] - switch { - case oldValue == cty.NilVal: - oldValue = cty.NullVal(newValue.Type()) - action = plans.Create - case newValue == cty.NilVal: - newValue = cty.NullVal(oldValue.Type()) - action = plans.Delete - case !newValue.RawEquals(oldValue): - action = plans.Update - default: - action = plans.NoOp - unchanged++ - } - - if action != plans.NoOp { - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeActionSymbol(action) - fmt.Fprintf(p.buf, "%q = {", k) - if p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1]) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - - path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) - p.writeAttrsDiff(objS.Attributes, oldValue, newValue, indent+8, path, result) - p.writeSkippedAttr(result.skippedAttributes, indent+10) - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+6)) - p.buf.WriteString("},\n") - } - } - - p.writeSkippedElems(unchanged, indent+6) - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.buf.WriteString("}") - if !new.IsKnown() { - p.buf.WriteString(" -> (known after apply)") - } else if new.IsNull() { - p.buf.WriteString(p.color.Color("[dark_gray] -> null[reset]")) - } - } -} - -func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) int { - skippedBlocks := 0 - path = append(path, cty.GetAttrStep{Name: name}) - if old.IsNull() && new.IsNull() { - // Nothing to do if both old and new is null - return skippedBlocks - } - - // If either the old or the new value is marked, - // Display a special diff because it is irrelevant - // to list all obfuscated attributes as (sensitive value) - if old.HasMark(marks.Sensitive) || new.HasMark(marks.Sensitive) { - p.writeSensitiveNestedBlockDiff(name, old, new, indent, blankBefore, path) - return 0 - } - - // Where old/new are collections representing a nesting mode other than - // NestingSingle, we assume the collection value can never be unknown - // since we always produce the container for the nested objects, even if - // the objects within are computed. - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - var action plans.Action - eqV := new.Equals(old) - switch { - case old.IsNull(): - action = plans.Create - case new.IsNull(): - action = plans.Delete - case !new.IsWhollyKnown() || !old.IsWhollyKnown(): - // "old" should actually always be known due to our contract - // that old values must never be unknown, but we'll allow it - // anyway to be robust. - action = plans.Update - case !eqV.IsKnown() || !eqV.True(): - action = plans.Update - } - - skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, blankBefore, path) - if skipped { - return 1 - } - case configschema.NestingList: - // For the sake of handling nested blocks, we'll treat a null list - // the same as an empty list since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockListAsEmpty(old) - new = ctyNullBlockListAsEmpty(new) - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - // Here we intentionally preserve the index-based correspondance - // between old and new, rather than trying to detect insertions - // and removals in the list, because this more accurately reflects - // how Terraform Core and providers will understand the change, - // particularly when the nested block contains computed attributes - // that will themselves maintain correspondance by index. - - // commonLen is number of elements that exist in both lists, which - // will be presented as updates (~). Any additional items in one - // of the lists will be presented as either creates (+) or deletes (-) - // depending on which list they belong to. - var commonLen int - switch { - case len(oldItems) < len(newItems): - commonLen = len(oldItems) - default: - commonLen = len(newItems) - } - - blankBeforeInner := blankBefore - for i := 0; i < commonLen; i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - oldItem := oldItems[i] - newItem := newItems[i] - action := plans.Update - if oldItem.RawEquals(newItem) { - action = plans.NoOp - } - skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, blankBeforeInner, path) - if skipped { - skippedBlocks++ - } else { - blankBeforeInner = false - } - } - for i := commonLen; i < len(oldItems); i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - oldItem := oldItems[i] - newItem := cty.NullVal(oldItem.Type()) - skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, blankBeforeInner, path) - if skipped { - skippedBlocks++ - } else { - blankBeforeInner = false - } - } - for i := commonLen; i < len(newItems); i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - newItem := newItems[i] - oldItem := cty.NullVal(newItem.Type()) - skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, blankBeforeInner, path) - if skipped { - skippedBlocks++ - } else { - blankBeforeInner = false - } - } - case configschema.NestingSet: - // For the sake of handling nested blocks, we'll treat a null set - // the same as an empty set since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockSetAsEmpty(old) - new = ctyNullBlockSetAsEmpty(new) - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - if (len(oldItems) + len(newItems)) == 0 { - // Nothing to do if both sets are empty - return 0 - } - - allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) - allItems = append(allItems, oldItems...) - allItems = append(allItems, newItems...) - all := cty.SetVal(allItems) - - blankBeforeInner := blankBefore - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - var action plans.Action - var oldValue, newValue cty.Value - switch { - case !val.IsKnown(): - action = plans.Update - newValue = val - case !old.HasElement(val).True(): - action = plans.Create - oldValue = cty.NullVal(val.Type()) - newValue = val - case !new.HasElement(val).True(): - action = plans.Delete - oldValue = val - newValue = cty.NullVal(val.Type()) - default: - action = plans.NoOp - oldValue = val - newValue = val - } - path := append(path, cty.IndexStep{Key: val}) - skipped := p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, blankBeforeInner, path) - if skipped { - skippedBlocks++ - } else { - blankBeforeInner = false - } - } - - case configschema.NestingMap: - // For the sake of handling nested blocks, we'll treat a null map - // the same as an empty map since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockMapAsEmpty(old) - new = ctyNullBlockMapAsEmpty(new) - - oldItems := old.AsValueMap() - newItems := new.AsValueMap() - if (len(oldItems) + len(newItems)) == 0 { - // Nothing to do if both maps are empty - return 0 - } - - allKeys := make(map[string]bool) - for k := range oldItems { - allKeys[k] = true - } - for k := range newItems { - allKeys[k] = true - } - allKeysOrder := make([]string, 0, len(allKeys)) - for k := range allKeys { - allKeysOrder = append(allKeysOrder, k) - } - sort.Strings(allKeysOrder) - - blankBeforeInner := blankBefore - for _, k := range allKeysOrder { - var action plans.Action - oldValue := oldItems[k] - newValue := newItems[k] - switch { - case oldValue == cty.NilVal: - oldValue = cty.NullVal(newValue.Type()) - action = plans.Create - case newValue == cty.NilVal: - newValue = cty.NullVal(oldValue.Type()) - action = plans.Delete - case !newValue.RawEquals(oldValue): - action = plans.Update - default: - action = plans.NoOp - } - - path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) - skipped := p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, blankBeforeInner, path) - if skipped { - skippedBlocks++ - } else { - blankBeforeInner = false - } - } - } - return skippedBlocks -} - -func (p *blockBodyDiffPrinter) writeSensitiveNestedBlockDiff(name string, old, new cty.Value, indent int, blankBefore bool, path cty.Path) { - var action plans.Action - switch { - case old.IsNull(): - action = plans.Create - case new.IsNull(): - action = plans.Delete - case !new.IsWhollyKnown() || !old.IsWhollyKnown(): - // "old" should actually always be known due to our contract - // that old values must never be unknown, but we'll allow it - // anyway to be robust. - action = plans.Update - case !ctyEqualValueAndMarks(old, new): - action = plans.Update - } - - if blankBefore { - p.buf.WriteRune('\n') - } - - // New line before warning printing - p.buf.WriteRune('\n') - p.writeSensitivityWarning(old, new, indent, action, true) - p.buf.WriteString(strings.Repeat(" ", indent)) - p.writeActionSymbol(action) - fmt.Fprintf(p.buf, "%s {", name) - if action != plans.NoOp && p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteRune('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.buf.WriteString("# At least one attribute in this block is (or was) sensitive,\n") - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.buf.WriteString("# so its contents will not be displayed.") - p.buf.WriteRune('\n') - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.buf.WriteString("}") -} - -func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, blankBefore bool, path cty.Path) bool { - if action == plans.NoOp && !p.verbose { - return true - } - - if blankBefore { - p.buf.WriteRune('\n') - } - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - p.writeActionSymbol(action) - - if label != nil { - fmt.Fprintf(p.buf, "%s %q {", name, *label) - } else { - fmt.Fprintf(p.buf, "%s {", name) - } - - if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - - result := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) - if result.bodyWritten { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - } - p.buf.WriteString("}") - - return false -} - -func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { - // Could check specifically for the sensitivity marker - if val.HasMark(marks.Sensitive) { - p.buf.WriteString(sensitiveCaption) - return - } - - if !val.IsKnown() { - p.buf.WriteString("(known after apply)") - return - } - if val.IsNull() { - p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) - return - } - - ty := val.Type() - - switch { - case ty.IsPrimitiveType(): - switch ty { - case cty.String: - { - // Special behavior for JSON strings containing array or object - src := []byte(val.AsString()) - ty, err := ctyjson.ImpliedType(src) - // check for the special case of "null", which decodes to nil, - // and just allow it to be printed out directly - if err == nil && !ty.IsPrimitiveType() && strings.TrimSpace(val.AsString()) != "null" { - jv, err := ctyjson.Unmarshal(src, ty) - if err == nil { - p.buf.WriteString("jsonencode(") - if jv.LengthInt() == 0 { - p.writeValue(jv, action, 0) - } else { - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(jv, action, indent+4) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteByte(')') - break // don't *also* do the normal behavior below - } - } - } - - if strings.Contains(val.AsString(), "\n") { - // It's a multi-line string, so we want to use the multi-line - // rendering so it'll be readable. Rather than re-implement - // that here, we'll just re-use the multi-line string diff - // printer with no changes, which ends up producing the - // result we want here. - // The path argument is nil because we don't track path - // information into strings and we know that a string can't - // have any indices or attributes that might need to be marked - // as (requires replacement), which is what that argument is for. - p.writeValueDiff(val, val, indent, nil) - break - } - - fmt.Fprintf(p.buf, "%q", val.AsString()) - case cty.Bool: - if val.True() { - p.buf.WriteString("true") - } else { - p.buf.WriteString("false") - } - case cty.Number: - bf := val.AsBigFloat() - p.buf.WriteString(bf.Text('f', -1)) - default: - // should never happen, since the above is exhaustive - fmt.Fprintf(p.buf, "%#v", val) - } - case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): - p.buf.WriteString("[") - - it := val.ElementIterator() - for it.Next() { - _, val := it.Element() - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(val, action, indent+4) - p.buf.WriteString(",") - } - - if val.LengthInt() > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("]") - case ty.IsMapType(): - p.buf.WriteString("{") - - keyLen := 0 - for it := val.ElementIterator(); it.Next(); { - key, _ := it.Element() - if keyStr := key.AsString(); len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - for it := val.ElementIterator(); it.Next(); { - key, val := it.Element() - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(key, action, indent+4) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) - p.buf.WriteString(" = ") - p.writeValue(val, action, indent+4) - } - - if val.LengthInt() > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("}") - case ty.IsObjectType(): - p.buf.WriteString("{") - - atys := ty.AttributeTypes() - attrNames := make([]string, 0, len(atys)) - displayAttrNames := make(map[string]string, len(atys)) - nameLen := 0 - for attrName := range atys { - attrNames = append(attrNames, attrName) - displayAttrNames[attrName] = displayAttributeName(attrName) - if len(displayAttrNames[attrName]) > nameLen { - nameLen = len(displayAttrNames[attrName]) - } - } - sort.Strings(attrNames) - - for _, attrName := range attrNames { - val := val.GetAttr(attrName) - displayAttrName := displayAttrNames[attrName] - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.buf.WriteString(displayAttrName) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(displayAttrName))) - p.buf.WriteString(" = ") - p.writeValue(val, action, indent+4) - } - - if len(attrNames) > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("}") - } -} - -func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { - ty := old.Type() - typesEqual := ctyTypesEqual(ty, new.Type()) - - // We have some specialized diff implementations for certain complex - // values where it's useful to see a visualization of the diff of - // the nested elements rather than just showing the entire old and - // new values verbatim. - // However, these specialized implementations can apply only if both - // values are known and non-null. - if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { - if old.HasMark(marks.Sensitive) || new.HasMark(marks.Sensitive) { - p.buf.WriteString(sensitiveCaption) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - return - } - - switch { - case ty == cty.String: - // We have special behavior for both multi-line strings in general - // and for strings that can parse as JSON. For the JSON handling - // to apply, both old and new must be valid JSON. - // For single-line strings that don't parse as JSON we just fall - // out of this switch block and do the default old -> new rendering. - oldS := old.AsString() - newS := new.AsString() - - { - // Special behavior for JSON strings containing object or - // list values. - oldBytes := []byte(oldS) - newBytes := []byte(newS) - oldType, oldErr := ctyjson.ImpliedType(oldBytes) - newType, newErr := ctyjson.ImpliedType(newBytes) - if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { - oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) - newJV, newErr := ctyjson.Unmarshal(newBytes, newType) - if oldErr == nil && newErr == nil { - if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace - p.buf.WriteString("jsonencode(") - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(plans.Update) - p.writeValueDiff(oldJV, newJV, indent+4, path) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteByte(')') - } else { - // if they differ only in insignificant whitespace - // then we'll note that but still expand out the - // effective value. - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) - } else { - p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) - } - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(oldJV, plans.NoOp, indent+4) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteByte(')') - } - return - } - } - } - - if !strings.Contains(oldS, "\n") && !strings.Contains(newS, "\n") { - break - } - - p.buf.WriteString("<<-EOT") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var oldLines, newLines []cty.Value - { - r := strings.NewReader(oldS) - sc := bufio.NewScanner(r) - for sc.Scan() { - oldLines = append(oldLines, cty.StringVal(sc.Text())) - } - } - { - r := strings.NewReader(newS) - sc := bufio.NewScanner(r) - for sc.Scan() { - newLines = append(newLines, cty.StringVal(sc.Text())) - } - } - - // Optimization for strings which are exactly equal: just print - // directly without calculating the sequence diff. This makes a - // significant difference when this code path is reached via a - // writeValue call with a large multi-line string. - if oldS == newS { - for _, line := range newLines { - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.buf.WriteString(line.AsString()) - p.buf.WriteString("\n") - } - } else { - diffLines := ctySequenceDiff(oldLines, newLines) - for _, diffLine := range diffLines { - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(diffLine.Action) - - switch diffLine.Action { - case plans.NoOp, plans.Delete: - p.buf.WriteString(diffLine.Before.AsString()) - case plans.Create: - p.buf.WriteString(diffLine.After.AsString()) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return for strings - p.buf.WriteString(diffLine.After.AsString()) - - } - p.buf.WriteString("\n") - } - } - - p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol - p.buf.WriteString("EOT") - - return - - case ty.IsSetType(): - p.buf.WriteString("[") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var addedVals, removedVals, allVals []cty.Value - for it := old.ElementIterator(); it.Next(); { - _, val := it.Element() - allVals = append(allVals, val) - if new.HasElement(val).False() { - removedVals = append(removedVals, val) - } - } - for it := new.ElementIterator(); it.Next(); { - _, val := it.Element() - allVals = append(allVals, val) - if val.IsKnown() && old.HasElement(val).False() { - addedVals = append(addedVals, val) - } - } - - var all, added, removed cty.Value - if len(allVals) > 0 { - all = cty.SetVal(allVals) - } else { - all = cty.SetValEmpty(ty.ElementType()) - } - if len(addedVals) > 0 { - added = cty.SetVal(addedVals) - } else { - added = cty.SetValEmpty(ty.ElementType()) - } - if len(removedVals) > 0 { - removed = cty.SetVal(removedVals) - } else { - removed = cty.SetValEmpty(ty.ElementType()) - } - - suppressedElements := 0 - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - - var action plans.Action - switch { - case !val.IsKnown(): - action = plans.Update - case added.HasElement(val).True(): - action = plans.Create - case removed.HasElement(val).True(): - action = plans.Delete - default: - action = plans.NoOp - } - - if action == plans.NoOp && !p.verbose { - suppressedElements++ - continue - } - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(val, action, indent+4) - p.buf.WriteString(",\n") - } - - if suppressedElements > 0 { - p.writeActionSymbol(plans.NoOp) - p.buf.WriteString(strings.Repeat(" ", indent+2)) - noun := "elements" - if suppressedElements == 1 { - noun = "element" - } - p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), suppressedElements, noun)) - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("]") - return - case ty.IsListType() || ty.IsTupleType(): - p.buf.WriteString("[") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) - - // Maintain a stack of suppressed lines in the diff for later - // display or elision - var suppressedElements []*plans.Change - var changeShown bool - - for i := 0; i < len(elemDiffs); i++ { - if !p.verbose { - for i < len(elemDiffs) && elemDiffs[i].Action == plans.NoOp { - suppressedElements = append(suppressedElements, elemDiffs[i]) - i++ - } - } - - // If we have some suppressed elements on the stack… - if len(suppressedElements) > 0 { - // If we've just rendered a change, display the first - // element in the stack as context - if changeShown { - elemDiff := suppressedElements[0] - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - p.buf.WriteString(",\n") - suppressedElements = suppressedElements[1:] - } - - hidden := len(suppressedElements) - - // If we're not yet at the end of the list, capture the - // last element on the stack as context for the upcoming - // change to be rendered - var nextContextDiff *plans.Change - if hidden > 0 && i < len(elemDiffs) { - hidden-- - nextContextDiff = suppressedElements[hidden] - } - - // If there are still hidden elements, show an elision - // statement counting them - if hidden > 0 { - p.writeActionSymbol(plans.NoOp) - p.buf.WriteString(strings.Repeat(" ", indent+2)) - noun := "elements" - if hidden == 1 { - noun = "element" - } - p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), hidden, noun)) - p.buf.WriteString("\n") - } - - // Display the next context diff if it was captured above - if nextContextDiff != nil { - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(nextContextDiff.After, nextContextDiff.Action, indent+4) - p.buf.WriteString(",\n") - } - - // Suppressed elements have now been handled so clear them again - suppressedElements = nil - } - - if i >= len(elemDiffs) { - break - } - - elemDiff := elemDiffs[i] - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(elemDiff.Action) - switch elemDiff.Action { - case plans.NoOp, plans.Delete: - p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) - case plans.Update: - p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) - case plans.Create: - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return. - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - } - - p.buf.WriteString(",\n") - changeShown = true - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("]") - - return - - case ty.IsMapType(): - p.buf.WriteString("{") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var allKeys []string - keyLen := 0 - for it := old.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - for it := new.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - sort.Strings(allKeys) - - suppressedElements := 0 - lastK := "" - for i, k := range allKeys { - if i > 0 && lastK == k { - continue // skip duplicates (list is sorted) - } - lastK = k - - kV := cty.StringVal(k) - var action plans.Action - if old.HasIndex(kV).False() { - action = plans.Create - } else if new.HasIndex(kV).False() { - action = plans.Delete - } - - if old.HasIndex(kV).True() && new.HasIndex(kV).True() { - if ctyEqualValueAndMarks(old.Index(kV), new.Index(kV)) { - action = plans.NoOp - } else { - action = plans.Update - } - } - - if action == plans.NoOp && !p.verbose { - suppressedElements++ - continue - } - - path := append(path, cty.IndexStep{Key: kV}) - - oldV := old.Index(kV) - newV := new.Index(kV) - p.writeSensitivityWarning(oldV, newV, indent+2, action, false) - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(cty.StringVal(k), action, indent+4) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) - p.buf.WriteString(" = ") - switch action { - case plans.Create, plans.NoOp: - v := new.Index(kV) - if v.HasMark(marks.Sensitive) { - p.buf.WriteString(sensitiveCaption) - } else { - p.writeValue(v, action, indent+4) - } - case plans.Delete: - oldV := old.Index(kV) - newV := cty.NullVal(oldV.Type()) - p.writeValueDiff(oldV, newV, indent+4, path) - default: - if oldV.HasMark(marks.Sensitive) || newV.HasMark(marks.Sensitive) { - p.buf.WriteString(sensitiveCaption) - } else { - p.writeValueDiff(oldV, newV, indent+4, path) - } - } - - p.buf.WriteByte('\n') - } - - if suppressedElements > 0 { - p.writeActionSymbol(plans.NoOp) - p.buf.WriteString(strings.Repeat(" ", indent+2)) - noun := "elements" - if suppressedElements == 1 { - noun = "element" - } - p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), suppressedElements, noun)) - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("}") - - return - case ty.IsObjectType(): - p.buf.WriteString("{") - p.buf.WriteString("\n") - - forcesNewResource := p.pathForcesNewResource(path) - - var allKeys []string - displayKeys := make(map[string]string) - keyLen := 0 - for it := old.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - displayKeys[keyStr] = displayAttributeName(keyStr) - if len(displayKeys[keyStr]) > keyLen { - keyLen = len(displayKeys[keyStr]) - } - } - for it := new.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - displayKeys[keyStr] = displayAttributeName(keyStr) - if len(displayKeys[keyStr]) > keyLen { - keyLen = len(displayKeys[keyStr]) - } - } - - sort.Strings(allKeys) - - suppressedElements := 0 - lastK := "" - for i, k := range allKeys { - if i > 0 && lastK == k { - continue // skip duplicates (list is sorted) - } - lastK = k - - kV := k - var action plans.Action - if !old.Type().HasAttribute(kV) { - action = plans.Create - } else if !new.Type().HasAttribute(kV) { - action = plans.Delete - } else if ctyEqualValueAndMarks(old.GetAttr(kV), new.GetAttr(kV)) { - action = plans.NoOp - } else { - action = plans.Update - } - - // TODO: If in future we have a schema associated with this - // object, we should pass the attribute's schema to - // identifyingAttribute here. - if action == plans.NoOp && !p.verbose && !identifyingAttribute(k, nil) { - suppressedElements++ - continue - } - - path := append(path, cty.GetAttrStep{Name: kV}) - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.buf.WriteString(displayKeys[k]) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(displayKeys[k]))) - p.buf.WriteString(" = ") - - switch action { - case plans.Create, plans.NoOp: - v := new.GetAttr(kV) - p.writeValue(v, action, indent+4) - case plans.Delete: - oldV := old.GetAttr(kV) - newV := cty.NullVal(oldV.Type()) - p.writeValueDiff(oldV, newV, indent+4, path) - default: - oldV := old.GetAttr(kV) - newV := new.GetAttr(kV) - p.writeValueDiff(oldV, newV, indent+4, path) - } - - p.buf.WriteString("\n") - } - - if suppressedElements > 0 { - p.writeActionSymbol(plans.NoOp) - p.buf.WriteString(strings.Repeat(" ", indent+2)) - noun := "elements" - if suppressedElements == 1 { - noun = "element" - } - p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), suppressedElements, noun)) - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("}") - - if forcesNewResource { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - return - } - } - - // In all other cases, we just show the new and old values as-is - p.writeValue(old, plans.Delete, indent) - if new.IsNull() { - p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) - } else { - p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) - } - - p.writeValue(new, plans.Create, indent) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } -} - -// writeActionSymbol writes a symbol to represent the given action, followed -// by a space. -// -// It only supports the actions that can be represented with a single character: -// Create, Delete, Update and NoAction. -func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { - switch action { - case plans.Create: - p.buf.WriteString(p.color.Color("[green]+[reset] ")) - case plans.Delete: - p.buf.WriteString(p.color.Color("[red]-[reset] ")) - case plans.Update: - p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) - case plans.NoOp: - p.buf.WriteString(" ") - default: - // Should never happen - p.buf.WriteString(p.color.Color("? ")) - } -} - -func (p *blockBodyDiffPrinter) writeSensitivityWarning(old, new cty.Value, indent int, action plans.Action, isBlock bool) { - // Dont' show this warning for create or delete - if action == plans.Create || action == plans.Delete { - return - } - - // Customize the warning based on if it is an attribute or block - diffType := "attribute value" - if isBlock { - diffType = "block" - } - - // If only attribute sensitivity is changing, clarify that the value is unchanged - var valueUnchangedSuffix string - if !isBlock { - oldUnmarked, _ := old.UnmarkDeep() - newUnmarked, _ := new.UnmarkDeep() - if oldUnmarked.RawEquals(newUnmarked) { - valueUnchangedSuffix = " The value is unchanged." - } - } - - if new.HasMark(marks.Sensitive) && !old.HasMark(marks.Sensitive) { - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString(fmt.Sprintf(p.color.Color("# [yellow]Warning:[reset] this %s will be marked as sensitive and will not\n"), diffType)) - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString(fmt.Sprintf("# display in UI output after applying this change.%s\n", valueUnchangedSuffix)) - } - - // Note if changing this attribute will change its sensitivity - if old.HasMark(marks.Sensitive) && !new.HasMark(marks.Sensitive) { - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString(fmt.Sprintf(p.color.Color("# [yellow]Warning:[reset] this %s will no longer be marked as sensitive\n"), diffType)) - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString(fmt.Sprintf("# after applying this change.%s\n", valueUnchangedSuffix)) - } -} - -func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { - if !p.action.IsReplace() || p.requiredReplace.Empty() { - // "requiredReplace" only applies when the instance is being replaced, - // and we should only inspect that set if it is not empty - return false - } - return p.requiredReplace.Has(path) -} - -func ctyEmptyString(value cty.Value) bool { - if !value.IsNull() && value.IsKnown() { - valueType := value.Type() - if valueType == cty.String && value.AsString() == "" { - return true - } - } - return false -} - -func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { - attrType := val.Type().AttributeType(name) - - if val.IsNull() { - return cty.NullVal(attrType) - } - - // We treat "" as null here - // as existing SDK doesn't support null yet. - // This allows us to avoid spurious diffs - // until we introduce null to the SDK. - attrValue := val.GetAttr(name) - // If the value is marked, the ctyEmptyString function will fail - if !val.ContainsMarked() && ctyEmptyString(attrValue) { - return cty.NullVal(attrType) - } - - return attrValue -} - -func ctyCollectionValues(val cty.Value) []cty.Value { - if !val.IsKnown() || val.IsNull() { - return nil - } - - ret := make([]cty.Value, 0, val.LengthInt()) - for it := val.ElementIterator(); it.Next(); { - _, value := it.Element() - ret = append(ret, value) - } - return ret -} - -// ctySequenceDiff returns differences between given sequences of cty.Value(s) -// in the form of Create, Delete, or Update actions (for objects). -func ctySequenceDiff(old, new []cty.Value) []*plans.Change { - var ret []*plans.Change - lcs := objchange.LongestCommonSubsequence(old, new, objchange.ValueEqual) - var oldI, newI, lcsI int - for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { - // We first process items in the old and new sequences which are not - // equal to the current common sequence item. Old items are marked as - // deletions, and new items are marked as additions. - // - // There is an exception for deleted & created object items, which we - // try to render as updates where that makes sense. - for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { - // Render this as an object update if all of these are true: - // - // - the current old item is an object; - // - there's a current new item which is also an object; - // - either there are no common items left, or the current new item - // doesn't equal the current common item. - // - // Why do we need the the last clause? If we have current items in all - // three sequences, and the current new item is equal to a common item, - // then we should just need to advance the old item list and we'll - // eventually find a common item matching both old and new. - // - // This combination of conditions allows us to render an object update - // diff instead of a combination of delete old & create new. - isObjectDiff := old[oldI].Type().IsObjectType() && newI < len(new) && new[newI].Type().IsObjectType() && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) - if isObjectDiff { - ret = append(ret, &plans.Change{ - Action: plans.Update, - Before: old[oldI], - After: new[newI], - }) - oldI++ - newI++ // we also consume the next "new" in this case - continue - } - - // Otherwise, this item is not part of the common sequence, so - // render as a deletion. - ret = append(ret, &plans.Change{ - Action: plans.Delete, - Before: old[oldI], - After: cty.NullVal(old[oldI].Type()), - }) - oldI++ - } - for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { - ret = append(ret, &plans.Change{ - Action: plans.Create, - Before: cty.NullVal(new[newI].Type()), - After: new[newI], - }) - newI++ - } - - // When we've exhausted the old & new sequences of items which are not - // in the common subsequence, we render a common item and continue. - if lcsI < len(lcs) { - ret = append(ret, &plans.Change{ - Action: plans.NoOp, - Before: lcs[lcsI], - After: lcs[lcsI], - }) - - // All of our indexes advance together now, since the line - // is common to all three sequences. - lcsI++ - oldI++ - newI++ - } - } - return ret -} - -// ctyEqualValueAndMarks checks equality of two possibly-marked values, -// considering partially-unknown values and equal values with different marks -// as inequal -func ctyEqualWithUnknown(old, new cty.Value) bool { - if !old.IsWhollyKnown() || !new.IsWhollyKnown() { - return false - } - return ctyEqualValueAndMarks(old, new) -} - -// ctyEqualValueAndMarks checks equality of two possibly-marked values, -// considering equal values with different marks as inequal -func ctyEqualValueAndMarks(old, new cty.Value) bool { - oldUnmarked, oldMarks := old.UnmarkDeep() - newUnmarked, newMarks := new.UnmarkDeep() - sameValue := oldUnmarked.Equals(newUnmarked) - return sameValue.IsKnown() && sameValue.True() && oldMarks.Equal(newMarks) -} - -// ctyTypesEqual checks equality of two types more loosely -// by avoiding checks of object/tuple elements -// as we render differences on element-by-element basis anyway -func ctyTypesEqual(oldT, newT cty.Type) bool { - if oldT.IsObjectType() && newT.IsObjectType() { - return true - } - if oldT.IsTupleType() && newT.IsTupleType() { - return true - } - return oldT.Equals(newT) -} - -func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { - if cap(path)-len(path) >= minExtra { - return path - } - newCap := cap(path) * 2 - if newCap < (len(path) + minExtra) { - newCap = len(path) + minExtra - } - newPath := make(cty.Path, len(path), newCap) - copy(newPath, path) - return newPath -} - -// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -// -// In particular, this function handles the special situation where a "list" is -// actually represented as a tuple type where nested blocks contain -// dynamically-typed values. -func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - if ty := in.Type(); ty.IsListType() { - return cty.ListValEmpty(ty.ElementType()) - } - return cty.EmptyTupleVal // must need a tuple, then -} - -// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -// -// In particular, this function handles the special situation where a "map" is -// actually represented as an object type where nested blocks contain -// dynamically-typed values. -func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - if ty := in.Type(); ty.IsMapType() { - return cty.MapValEmpty(ty.ElementType()) - } - return cty.EmptyObjectVal // must need an object, then -} - -// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - // Dynamically-typed attributes are not supported inside blocks backed by - // sets, so our result here is always a set. - return cty.SetValEmpty(in.Type().ElementType()) -} - -// DiffActionSymbol returns a string that, once passed through a -// colorstring.Colorize, will produce a result that can be written -// to a terminal to produce a symbol made of three printable -// characters, possibly interspersed with VT100 color codes. -func DiffActionSymbol(action plans.Action) string { - switch action { - case plans.DeleteThenCreate: - return "[red]-[reset]/[green]+[reset]" - case plans.CreateThenDelete: - return "[green]+[reset]/[red]-[reset]" - case plans.Create: - return " [green]+[reset]" - case plans.Delete: - return " [red]-[reset]" - case plans.Read: - return " [cyan]<=[reset]" - case plans.Update: - return " [yellow]~[reset]" - case plans.NoOp: - return " " - default: - return " ?" - } -} - -// Extremely coarse heuristic for determining whether or not a given attribute -// name is important for identifying a resource. In the future, this may be -// replaced by a flag in the schema, but for now this is likely to be good -// enough. -func identifyingAttribute(name string, attrSchema *configschema.Attribute) bool { - return name == "id" || name == "tags" || name == "name" -} - -func (p *blockBodyDiffPrinter) writeSkippedAttr(skipped, indent int) { - if skipped > 0 { - noun := "attributes" - if skipped == 1 { - noun = "attribute" - } - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), skipped, noun)) - } -} - -func (p *blockBodyDiffPrinter) writeSkippedElems(skipped, indent int) { - if skipped > 0 { - noun := "elements" - if skipped == 1 { - noun = "element" - } - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString(fmt.Sprintf(p.color.Color("[dark_gray]# (%d unchanged %s hidden)[reset]"), skipped, noun)) - p.buf.WriteString("\n") - } -} - -func displayAttributeName(name string) string { - if !hclsyntax.ValidIdentifier(name) { - return fmt.Sprintf("%q", name) - } - return name -} diff --git a/internal/command/format/diff_test.go b/internal/command/format/diff_test.go deleted file mode 100644 index 5ab9502c37b1..000000000000 --- a/internal/command/format/diff_test.go +++ /dev/null @@ -1,7007 +0,0 @@ -package format - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" -) - -func TestResourceChange_primitiveTypes(t *testing.T) { - testCases := map[string]testCase{ - "creation": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + id = (known after apply) - } -`, - }, - "creation (null string)": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal("null"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "string": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + string = "null" - } -`, - }, - "creation (null string with extra whitespace)": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal("null "), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "string": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + string = "null " - } -`, - }, - "creation (object with quoted keys)": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "object": cty.ObjectVal(map[string]cty.Value{ - "unquoted": cty.StringVal("value"), - "quoted:key": cty.StringVal("some-value"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "object": {Type: cty.Object(map[string]cty.Type{ - "unquoted": cty.String, - "quoted:key": cty.String, - }), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + object = { - + "quoted:key" = "some-value" - + unquoted = "value" - } - } -`, - }, - "deletion": { - Action: plans.Delete, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - }), - After: cty.NullVal(cty.EmptyObject), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be destroyed - - resource "test_instance" "example" { - - id = "i-02ae66f368e8518a9" -> null - } -`, - }, - "deletion of deposed object": { - Action: plans.Delete, - Mode: addrs.ManagedResourceMode, - DeposedKey: states.DeposedKey("byebye"), - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - }), - After: cty.NullVal(cty.EmptyObject), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example (deposed object byebye) will be destroyed - # (left over from a partially-failed replacement of this instance) - - resource "test_instance" "example" { - - id = "i-02ae66f368e8518a9" -> null - } -`, - }, - "deletion (empty string)": { - Action: plans.Delete, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "intentionally_long": cty.StringVal(""), - }), - After: cty.NullVal(cty.EmptyObject), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "intentionally_long": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be destroyed - - resource "test_instance" "example" { - - id = "i-02ae66f368e8518a9" -> null - } -`, - }, - "string in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - } -`, - }, - "update with quoted key": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "saml:aud": cty.StringVal("https://example.com/saml"), - "zeta": cty.StringVal("alpha"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "saml:aud": cty.StringVal("https://saml.example.com"), - "zeta": cty.StringVal("alpha"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "saml:aud": {Type: cty.String, Optional: true}, - "zeta": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - id = "i-02ae66f368e8518a9" - ~ "saml:aud" = "https://example.com/saml" -> "https://saml.example.com" - # (1 unchanged attribute hidden) - } -`, - }, - "string force-new update": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "ami"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement - id = "i-02ae66f368e8518a9" - } -`, - }, - "string in-place update (null values)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "unchanged": cty.NullVal(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "unchanged": cty.NullVal(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "unchanged": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update of multi-line string field": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "more_lines": cty.StringVal(`original -long -multi-line -string -field -`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "more_lines": cty.StringVal(`original -extremely long -multi-line -string -field -`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "more_lines": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ more_lines = <<-EOT - original - - long - + extremely long - multi-line - string - field - EOT - } -`, - }, - "addition of multi-line string field": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "more_lines": cty.NullVal(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "more_lines": cty.StringVal(`original -new line -`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "more_lines": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + more_lines = <<-EOT - original - new line - EOT - } -`, - }, - "force-new update of multi-line string field": { - Action: plans.DeleteThenCreate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "more_lines": cty.StringVal(`original -`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "more_lines": cty.StringVal(`original -new line -`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "more_lines": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "more_lines"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ more_lines = <<-EOT # forces replacement - original - + new line - EOT - } -`, - }, - - // Sensitive - - "creation with sensitive field": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "password": cty.StringVal("top-secret"), - "conn_info": cty.ObjectVal(map[string]cty.Value{ - "user": cty.StringVal("not-secret"), - "password": cty.StringVal("top-secret"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - "conn_info": { - NestedType: &configschema.Object{ - Nesting: configschema.NestingSingle, - Attributes: map[string]*configschema.Attribute{ - "user": {Type: cty.String, Optional: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - }, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + conn_info = { - + password = (sensitive value) - + user = "not-secret" - } - + id = (known after apply) - + password = (sensitive value) - } -`, - }, - "update with equal sensitive field": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("blah"), - "str": cty.StringVal("before"), - "password": cty.StringVal("top-secret"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "str": cty.StringVal("after"), - "password": cty.StringVal("top-secret"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "str": {Type: cty.String, Optional: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "blah" -> (known after apply) - ~ str = "before" -> "after" - # (1 unchanged attribute hidden) - } -`, - }, - - // tainted objects - "replace tainted resource": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseTainted, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-AFTER"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "ami"}, - }), - ExpectedOutput: ` # test_instance.example is tainted, so must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - } -`, - }, - "force replacement with empty before value": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - "forced": cty.NullVal(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - "forced": cty.StringVal("example"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - "forced": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "forced"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - + forced = "example" # forces replacement - name = "name" - } -`, - }, - "force replacement with empty before value legacy": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - "forced": cty.StringVal(""), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - "forced": cty.StringVal("example"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - "forced": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "forced"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - + forced = "example" # forces replacement - name = "name" - } -`, - }, - "read during apply because of unknown configuration": { - Action: plans.Read, - ActionReason: plans.ResourceInstanceReadBecauseConfigUnknown, - Mode: addrs.DataResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - }, - }, - ExpectedOutput: ` # data.test_instance.example will be read during apply - # (config refers to values not yet known) - <= data "test_instance" "example" { - name = "name" - } -`, - }, - "read during apply because of pending changes to upstream dependency": { - Action: plans.Read, - ActionReason: plans.ResourceInstanceReadBecauseDependencyPending, - Mode: addrs.DataResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - }, - }, - ExpectedOutput: ` # data.test_instance.example will be read during apply - # (depends on a resource or a module with changes pending) - <= data "test_instance" "example" { - name = "name" - } -`, - }, - "read during apply for unspecified reason": { - Action: plans.Read, - Mode: addrs.DataResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - }, - }, - ExpectedOutput: ` # data.test_instance.example will be read during apply - <= data "test_instance" "example" { - name = "name" - } -`, - }, - "show all identifying attributes even if unchanged": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "bar": cty.StringVal("bar"), - "foo": cty.StringVal("foo"), - "name": cty.StringVal("alice"), - "tags": cty.MapVal(map[string]cty.Value{ - "name": cty.StringVal("bob"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "bar": cty.StringVal("bar"), - "foo": cty.StringVal("foo"), - "name": cty.StringVal("alice"), - "tags": cty.MapVal(map[string]cty.Value{ - "name": cty.StringVal("bob"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, - "foo": {Type: cty.String, Optional: true}, - "name": {Type: cty.String, Optional: true}, - "tags": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - name = "alice" - tags = { - "name" = "bob" - } - # (2 unchanged attributes hidden) - } -`, - }, - } - - runTestCases(t, testCases) -} - -func TestResourceChange_JSON(t *testing.T) { - testCases := map[string]testCase{ - "creation": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{ - "str": "value", - "list":["a","b", 234, true], - "obj": {"key": "val"} - }`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + id = (known after apply) - + json_field = jsonencode( - { - + list = [ - + "a", - + "b", - + 234, - + true, - ] - + obj = { - + key = "val" - } - + str = "value" - } - ) - } -`, - }, - "in-place update of object": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value","ccc": 5}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - + bbb = "new_value" - - ccc = 5 -> null - # (1 unchanged element hidden) - } - ) - } -`, - }, - "in-place update of object with quoted keys": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value", "c:c": "old_value"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": "value", "b:bb": "new_value"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - + "b:bb" = "new_value" - - "c:c" = "old_value" -> null - # (1 unchanged element hidden) - } - ) - } -`, - }, - "in-place update (from empty tuple)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": []}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": ["value"]}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ aaa = [ - + "value", - ] - } - ) - } -`, - }, - "in-place update (to empty tuple)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": ["value"]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": []}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ aaa = [ - - "value", - ] - } - ) - } -`, - }, - "in-place update (tuple of different types)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": [42, {"foo":"baz"}, "value"]}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ aaa = [ - 42, - ~ { - ~ foo = "bar" -> "baz" - }, - "value", - ] - } - ) - } -`, - }, - "force-new update": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "json_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - + bbb = "new_value" - # (1 unchanged element hidden) - } # forces replacement - ) - } -`, - }, - "in-place update (whitespace change)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa":"value", - "bbb":"another"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( # whitespace changes - { - aaa = "value" - bbb = "another" - } - ) - } -`, - }, - "force-new update (whitespace change)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa":"value", - "bbb":"another"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "json_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( # whitespace changes force replacement - { - aaa = "value" - bbb = "another" - } - ) - } -`, - }, - "creation (empty)": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + id = (known after apply) - + json_field = jsonencode({}) - } -`, - }, - "JSON list item removal": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`["first","second","third"]`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`["first","second"]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - # (1 unchanged element hidden) - "second", - - "third", - ] - ) - } -`, - }, - "JSON list item addition": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`["first","second"]`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`["first","second","third"]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - # (1 unchanged element hidden) - "second", - + "third", - ] - ) - } -`, - }, - "JSON list object addition": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"first":"111"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"first":"111","second":"222"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - + second = "222" - # (1 unchanged element hidden) - } - ) - } -`, - }, - "JSON object with nested list": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{ - "Statement": ["first"] - }`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{ - "Statement": ["first", "second"] - }`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ Statement = [ - "first", - + "second", - ] - } - ) - } -`, - }, - "JSON list of objects - adding item": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`[{"one": "111"}]`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - { - one = "111" - }, - + { - + two = "222" - }, - ] - ) - } -`, - }, - "JSON list of objects - removing item": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}, {"three": "333"}]`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`[{"one": "111"}, {"three": "333"}]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - { - one = "111" - }, - - { - - two = "222" - }, - { - three = "333" - }, - ] - ) - } -`, - }, - "JSON object with list of objects": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"parent":[{"one": "111"}]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"parent":[{"one": "111"}, {"two": "222"}]}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ parent = [ - { - one = "111" - }, - + { - + two = "222" - }, - ] - } - ) - } -`, - }, - "JSON object double nested lists": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"parent":[{"another_list": ["111"]}]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"parent":[{"another_list": ["111", "222"]}]}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ parent = [ - ~ { - ~ another_list = [ - "111", - + "222", - ] - }, - ] - } - ) - } -`, - }, - "in-place update from object to tuple": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`["aaa", 42, "something"]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - - aaa = [ - - 42, - - { - - foo = "bar" - }, - - "value", - ] - } -> [ - + "aaa", - + 42, - + "something", - ] - ) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_listObject(t *testing.T) { - testCases := map[string]testCase{ - // https://github.com/hashicorp/terraform/issues/30641 - "updating non-identifying attribute": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "accounts": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("1"), - "name": cty.StringVal("production"), - "status": cty.StringVal("ACTIVE"), - }), - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("2"), - "name": cty.StringVal("staging"), - "status": cty.StringVal("ACTIVE"), - }), - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("3"), - "name": cty.StringVal("disaster-recovery"), - "status": cty.StringVal("ACTIVE"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "accounts": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("1"), - "name": cty.StringVal("production"), - "status": cty.StringVal("ACTIVE"), - }), - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("2"), - "name": cty.StringVal("staging"), - "status": cty.StringVal("EXPLODED"), - }), - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("3"), - "name": cty.StringVal("disaster-recovery"), - "status": cty.StringVal("ACTIVE"), - }), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "accounts": { - Type: cty.List(cty.Object(map[string]cty.Type{ - "id": cty.String, - "name": cty.String, - "status": cty.String, - })), - }, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ accounts = [ - { - id = "1" - name = "production" - status = "ACTIVE" - }, - ~ { - id = "2" - name = "staging" - ~ status = "ACTIVE" -> "EXPLODED" - }, - { - id = "3" - name = "disaster-recovery" - status = "ACTIVE" - }, - ] - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_primitiveList(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.NullVal(cty.List(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("new-element"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + list_field = [ - + "new-element", - ] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - first addition": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListValEmpty(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("new-element"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - + "new-element", - ] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - cty.StringVal("ffff"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - cty.StringVal("ffff"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - # (1 unchanged element hidden) - "bbbb", - + "cccc", - "dddd", - # (2 unchanged elements hidden) - ] - # (1 unchanged attribute hidden) - } -`, - }, - "force-new update - insertion": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "list_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ # forces replacement - "aaaa", - + "bbbb", - "cccc", - ] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("bbbb"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - - "aaaa", - "bbbb", - - "cccc", - "dddd", - # (1 unchanged element hidden) - ] - # (1 unchanged attribute hidden) - } -`, - }, - "creation - empty list": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = "ami-STATIC" - + id = (known after apply) - + list_field = [] - } -`, - }, - "in-place update - full to empty": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - - "aaaa", - - "bbbb", - - "cccc", - ] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - null to empty": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.NullVal(cty.List(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + list_field = [] - # (1 unchanged attribute hidden) - } -`, - }, - "update to unknown element": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.UnknownVal(cty.String), - cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - "aaaa", - - "bbbb", - + (known after apply), - "cccc", - ] - # (1 unchanged attribute hidden) - } -`, - }, - "update - two new unknown elements": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - cty.StringVal("cccc"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - "aaaa", - - "bbbb", - + (known after apply), - + (known after apply), - "cccc", - # (2 unchanged elements hidden) - ] - # (1 unchanged attribute hidden) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_primitiveTuple(t *testing.T) { - testCases := map[string]testCase{ - "in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "tuple_field": cty.TupleVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - cty.StringVal("ffff"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "tuple_field": cty.TupleVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - cty.StringVal("eeee"), - cty.StringVal("ffff"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Required: true}, - "tuple_field": {Type: cty.Tuple([]cty.Type{cty.String, cty.String, cty.String, cty.String, cty.String}), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - id = "i-02ae66f368e8518a9" - ~ tuple_field = [ - # (1 unchanged element hidden) - "bbbb", - - "dddd", - + "cccc", - "eeee", - # (1 unchanged element hidden) - ] - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_primitiveSet(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.NullVal(cty.Set(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("new-element"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + set_field = [ - + "new-element", - ] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - first insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetValEmpty(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("new-element"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - + "new-element", - ] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - + "bbbb", - # (2 unchanged elements hidden) - ] - # (1 unchanged attribute hidden) - } -`, - }, - "force-new update - insertion": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "set_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ # forces replacement - + "bbbb", - # (2 unchanged elements hidden) - ] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("bbbb"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - - "cccc", - # (1 unchanged element hidden) - ] - # (1 unchanged attribute hidden) - } -`, - }, - "creation - empty set": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = "ami-STATIC" - + id = (known after apply) - + set_field = [] - } -`, - }, - "in-place update - full to empty set": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - - "bbbb", - ] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - null to empty set": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.NullVal(cty.Set(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + set_field = [] - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update to unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.UnknownVal(cty.Set(cty.String)), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - - "bbbb", - ] -> (known after apply) - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update to unknown element": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.UnknownVal(cty.String), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "bbbb", - ~ (known after apply), - # (1 unchanged element hidden) - ] - # (1 unchanged attribute hidden) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_map(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.NullVal(cty.Map(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "new-key": cty.StringVal("new-element"), - "be:ep": cty.StringVal("boop"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + map_field = { - + "be:ep" = "boop" - + "new-key" = "new-element" - } - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - first insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapValEmpty(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "new-key": cty.StringVal("new-element"), - "be:ep": cty.StringVal("boop"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - + "be:ep" = "boop" - + "new-key" = "new-element" - } - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "c": cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.StringVal("bbbb"), - "b:b": cty.StringVal("bbbb"), - "c": cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - + "b" = "bbbb" - + "b:b" = "bbbb" - # (2 unchanged elements hidden) - } - # (1 unchanged attribute hidden) - } -`, - }, - "force-new update - insertion": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "c": cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.StringVal("bbbb"), - "c": cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "map_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { # forces replacement - + "b" = "bbbb" - # (2 unchanged elements hidden) - } - # (1 unchanged attribute hidden) - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.StringVal("bbbb"), - "c": cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "b": cty.StringVal("bbbb"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - - "a" = "aaaa" -> null - - "c" = "cccc" -> null - # (1 unchanged element hidden) - } - # (1 unchanged attribute hidden) - } -`, - }, - "creation - empty": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = "ami-STATIC" - + id = (known after apply) - + map_field = {} - } -`, - }, - "update to unknown element": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.StringVal("bbbb"), - "c": cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.UnknownVal(cty.String), - "c": cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - ~ "b" = "bbbb" -> (known after apply) - # (2 unchanged elements hidden) - } - # (1 unchanged attribute hidden) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedList(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - equal": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - })}), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - + { - + mount_point = "/var/diska" - + size = "50GB" - }, - ] - id = "i-02ae66f368e8518a9" - - + root_block_device {} - } -`, - }, - "in-place update - first insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - + { - + mount_point = "/var/diska" - }, - ] - id = "i-02ae66f368e8518a9" - - + root_block_device { - + volume_type = "gp2" - } - } -`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - ~ { - + size = "50GB" - # (1 unchanged attribute hidden) - }, - # (1 unchanged element hidden) - ] - id = "i-02ae66f368e8518a9" - - ~ root_block_device { - + new_field = "new_value" - # (1 unchanged attribute hidden) - } - } -`, - }, - "force-new update (inside blocks)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{ - cty.GetAttrStep{Name: "root_block_device"}, - cty.IndexStep{Key: cty.NumberIntVal(0)}, - cty.GetAttrStep{Name: "volume_type"}, - }, - cty.Path{ - cty.GetAttrStep{Name: "disks"}, - cty.IndexStep{Key: cty.NumberIntVal(0)}, - cty.GetAttrStep{Name: "mount_point"}, - }, - ), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - ~ { - ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement - # (1 unchanged attribute hidden) - }, - ] - id = "i-02ae66f368e8518a9" - - ~ root_block_device { - ~ volume_type = "gp2" -> "different" # forces replacement - } - } -`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ # forces replacement - ~ { - ~ mount_point = "/var/diska" -> "/var/diskb" - # (1 unchanged attribute hidden) - }, - ] - id = "i-02ae66f368e8518a9" - - ~ root_block_device { # forces replacement - ~ volume_type = "gp2" -> "different" - } - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - ] - id = "i-02ae66f368e8518a9" - - - root_block_device { - - volume_type = "gp2" -> null - } - } -`, - }, - "with dynamically-typed attribute": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "block": cty.EmptyTupleVal, - }), - After: cty.ObjectVal(map[string]cty.Value{ - "block": cty.TupleVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - cty.ObjectVal(map[string]cty.Value{ - "attr": cty.True, - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.DynamicPseudoType, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + block { - + attr = "foo" - } - + block { - + attr = true - } - } -`, - }, - "in-place sequence update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("x")}), - cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), - cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("z")}), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "attr": { - Type: cty.String, - Required: true, - }, - }, - }, - Nesting: configschema.NestingList, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ list { - ~ attr = "x" -> "y" - } - ~ list { - ~ attr = "y" -> "z" - } - } -`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - ] -> (known after apply) - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - modification": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskc"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("75GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskc"), - "size": cty.StringVal("25GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - ~ { - ~ size = "50GB" -> "75GB" - # (1 unchanged attribute hidden) - }, - ~ { - ~ size = "50GB" -> "25GB" - # (1 unchanged attribute hidden) - }, - # (1 unchanged element hidden) - ] - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedSet(t *testing.T) { - testCases := map[string]testCase{ - "creation from null - sensitive set": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.Object(map[string]cty.Type{ - "id": cty.String, - "ami": cty.String, - "disks": cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.Set(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - })), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = (sensitive value) - + id = "i-02ae66f368e8518a9" - - + root_block_device { - + volume_type = "gp2" - } - } -`, - }, - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - + { - + mount_point = "/var/diska" - }, - ] - id = "i-02ae66f368e8518a9" - - + root_block_device { - + volume_type = "gp2" - } - } -`, - }, - "in-place update - creation - sensitive set": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - # Warning: this attribute value will be marked as sensitive and will not - # display in UI output after applying this change. - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - - + root_block_device { - + volume_type = "gp2" - } - } -`, - }, - "in-place update - marking set sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - # Warning: this attribute value will be marked as sensitive and will not - # display in UI output after applying this change. The value is unchanged. - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("100GB"), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("100GB"), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - + { - + mount_point = "/var/diska" - + size = "50GB" - }, - - { - - mount_point = "/var/diska" -> null - }, - # (1 unchanged element hidden) - ] - id = "i-02ae66f368e8518a9" - - + root_block_device { - + new_field = "new_value" - + volume_type = "gp2" - } - - root_block_device { - - volume_type = "gp2" -> null - } - } -`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { # forces replacement - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - + { # forces replacement - + mount_point = "/var/diskb" - + size = "50GB" - }, - ] - id = "i-02ae66f368e8518a9" - - + root_block_device { # forces replacement - + volume_type = "different" - } - - root_block_device { # forces replacement - - volume_type = "gp2" -> null - } - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - "new_field": cty.String, - })), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - ] - id = "i-02ae66f368e8518a9" - - - root_block_device { - - new_field = "new_value" -> null - - volume_type = "gp2" -> null - } - } -`, - }, - "in-place update - empty nested sets": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - + disks = [ - ] - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update - null insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - + disks = [ - + { - + mount_point = "/var/diska" - + size = "50GB" - }, - ] - id = "i-02ae66f368e8518a9" - - + root_block_device { - + new_field = "new_value" - + volume_type = "gp2" - } - - root_block_device { - - volume_type = "gp2" -> null - } - } -`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - ] -> (known after apply) - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedMap(t *testing.T) { - testCases := map[string]testCase{ - "creation from null": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.NullVal(cty.String), - "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - }))), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = { - + "disk_a" = { - + mount_point = "/var/diska" - }, - } - + id = "i-02ae66f368e8518a9" - - + root_block_device "a" { - + volume_type = "gp2" - } - } -`, - }, - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - + "disk_a" = { - + mount_point = "/var/diska" - }, - } - id = "i-02ae66f368e8518a9" - - + root_block_device "a" { - + volume_type = "gp2" - } - } -`, - }, - "in-place update - change attr": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - ~ "disk_a" = { - + size = "50GB" - # (1 unchanged attribute hidden) - }, - } - id = "i-02ae66f368e8518a9" - - ~ root_block_device "a" { - + new_field = "new_value" - # (1 unchanged attribute hidden) - } - } -`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "disk_2": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/disk2"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - + "disk_2" = { - + mount_point = "/var/disk2" - + size = "50GB" - }, - # (1 unchanged element hidden) - } - id = "i-02ae66f368e8518a9" - - + root_block_device "b" { - + new_field = "new_value" - + volume_type = "gp2" - } - - # (1 unchanged block hidden) - } -`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("standard"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("100GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("standard"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "root_block_device"}, - cty.IndexStep{Key: cty.StringVal("a")}, - }, - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - ~ "disk_a" = { # forces replacement - ~ size = "50GB" -> "100GB" - # (1 unchanged attribute hidden) - }, - } - id = "i-02ae66f368e8518a9" - - ~ root_block_device "a" { # forces replacement - ~ volume_type = "gp2" -> "different" - } - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - "new_field": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - - "disk_a" = { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - } - id = "i-02ae66f368e8518a9" - - - root_block_device "a" { - - new_field = "new_value" -> null - - volume_type = "gp2" -> null - } - } -`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - - "disk_a" = { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - } -> (known after apply) - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - insertion sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "disks"}, - cty.IndexStep{Key: cty.StringVal("disk_a")}, - cty.GetAttrStep{Name: "mount_point"}, - }, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - + "disk_a" = { - + mount_point = (sensitive value) - + size = "50GB" - }, - } - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - multiple unchanged blocks": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (2 unchanged blocks hidden) - } -`, - }, - "in-place update - multiple blocks first changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ root_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - multiple blocks second changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ root_block_device "a" { - ~ volume_type = "gp2" -> "gp3" - } - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - multiple blocks changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ root_block_device "a" { - ~ volume_type = "gp2" -> "gp3" - } - ~ root_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - } -`, - }, - "in-place update - multiple different unchanged blocks": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (2 unchanged blocks hidden) - } -`, - }, - "in-place update - multiple different blocks first changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ leaf_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - multiple different blocks second changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ root_block_device "a" { - ~ volume_type = "gp2" -> "gp3" - } - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - multiple different blocks changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ leaf_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - ~ root_block_device "a" { - ~ volume_type = "gp2" -> "gp3" - } - } -`, - }, - "in-place update - mixed blocks unchanged": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (4 unchanged blocks hidden) - } -`, - }, - "in-place update - mixed blocks changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ leaf_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - ~ root_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - # (2 unchanged blocks hidden) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedSingle(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - equal": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - "disk": cty.NullVal(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.NullVal(cty.String), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - + disk = { - + mount_point = "/var/diska" - + size = "50GB" - } - id = "i-02ae66f368e8518a9" - - + root_block_device {} - } -`, - }, - "force-new update (inside blocks)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{ - cty.GetAttrStep{Name: "root_block_device"}, - cty.GetAttrStep{Name: "volume_type"}, - }, - cty.Path{ - cty.GetAttrStep{Name: "disk"}, - cty.GetAttrStep{Name: "mount_point"}, - }, - ), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disk = { - ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement - # (1 unchanged attribute hidden) - } - id = "i-02ae66f368e8518a9" - - ~ root_block_device { - ~ volume_type = "gp2" -> "different" # forces replacement - } - } -`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, - cty.Path{cty.GetAttrStep{Name: "disk"}}, - ), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disk = { # forces replacement - ~ mount_point = "/var/diska" -> "/var/diskb" - # (1 unchanged attribute hidden) - } - id = "i-02ae66f368e8518a9" - - ~ root_block_device { # forces replacement - ~ volume_type = "gp2" -> "different" - } - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - "disk": cty.NullVal(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - - disk = { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - } -> null - id = "i-02ae66f368e8518a9" - - - root_block_device { - - volume_type = "gp2" -> null - } - } -`, - }, - "with dynamically-typed attribute": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "block": cty.NullVal(cty.Object(map[string]cty.Type{ - "attr": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "block": cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.DynamicPseudoType, Optional: true}, - }, - }, - Nesting: configschema.NestingSingle, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + block { - + attr = "foo" - } - } -`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.UnknownVal(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disk = { - ~ mount_point = "/var/diska" -> (known after apply) - ~ size = "50GB" -> (known after apply) - } -> (known after apply) - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - } -`, - }, - "in-place update - modification": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("25GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disk = { - ~ size = "50GB" -> "25GB" - # (1 unchanged attribute hidden) - } - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedMapSensitiveSchema(t *testing.T) { - testCases := map[string]testCase{ - "creation from null": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.NullVal(cty.String), - "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = (sensitive value) - + id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("100GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - - disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedListSensitiveSchema(t *testing.T) { - testCases := map[string]testCase{ - "creation from null": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.NullVal(cty.String), - "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = (sensitive value) - + id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("100GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - - disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedSetSensitiveSchema(t *testing.T) { - testCases := map[string]testCase{ - "creation from null": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.NullVal(cty.String), - "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = (sensitive value) - + id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("100GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - - disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_actionReason(t *testing.T) { - emptySchema := &configschema.Block{} - nullVal := cty.NullVal(cty.EmptyObject) - emptyVal := cty.EmptyObjectVal - - testCases := map[string]testCase{ - "delete for no particular reason": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceChangeNoReason, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be destroyed - - resource "test_instance" "example" {} -`, - }, - "delete because of wrong repetition mode (NoKey)": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.NoKey, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be destroyed - # (because resource uses count or for_each) - - resource "test_instance" "example" {} -`, - }, - "delete because of wrong repetition mode (IntKey)": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.IntKey(1), - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example[1] will be destroyed - # (because resource does not use count) - - resource "test_instance" "example" {} -`, - }, - "delete because of wrong repetition mode (StringKey)": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.StringKey("a"), - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example["a"] will be destroyed - # (because resource does not use for_each) - - resource "test_instance" "example" {} -`, - }, - "delete because no resource configuration": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseNoResourceConfig, - ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.NoKey), - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # module.foo.test_instance.example will be destroyed - # (because test_instance.example is not in configuration) - - resource "test_instance" "example" {} -`, - }, - "delete because no module": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseNoModule, - ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.IntKey(1)), - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # module.foo[1].test_instance.example will be destroyed - # (because module.foo[1] is not in configuration) - - resource "test_instance" "example" {} -`, - }, - "delete because out of range for count": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseCountIndex, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.IntKey(1), - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example[1] will be destroyed - # (because index [1] is out of range for count) - - resource "test_instance" "example" {} -`, - }, - "delete because out of range for for_each": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseEachKey, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.StringKey("boop"), - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example["boop"] will be destroyed - # (because key ["boop"] is not in for_each map) - - resource "test_instance" "example" {} -`, - }, - "replace for no particular reason (delete first)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceChangeNoReason, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" {} -`, - }, - "replace for no particular reason (create first)": { - Action: plans.CreateThenDelete, - ActionReason: plans.ResourceInstanceChangeNoReason, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example must be replaced -+/- resource "test_instance" "example" {} -`, - }, - "replace by request (delete first)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceByRequest, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be replaced, as requested --/+ resource "test_instance" "example" {} -`, - }, - "replace by request (create first)": { - Action: plans.CreateThenDelete, - ActionReason: plans.ResourceInstanceReplaceByRequest, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be replaced, as requested -+/- resource "test_instance" "example" {} -`, - }, - "replace because tainted (delete first)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseTainted, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example is tainted, so must be replaced --/+ resource "test_instance" "example" {} -`, - }, - "replace because tainted (create first)": { - Action: plans.CreateThenDelete, - ActionReason: plans.ResourceInstanceReplaceBecauseTainted, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example is tainted, so must be replaced -+/- resource "test_instance" "example" {} -`, - }, - "replace because cannot update (delete first)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - // This one has no special message, because the fuller explanation - // typically appears inline as a "# forces replacement" comment. - // (not shown here) - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" {} -`, - }, - "replace because cannot update (create first)": { - Action: plans.CreateThenDelete, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - // This one has no special message, because the fuller explanation - // typically appears inline as a "# forces replacement" comment. - // (not shown here) - ExpectedOutput: ` # test_instance.example must be replaced -+/- resource "test_instance" "example" {} -`, - }, - } - - runTestCases(t, testCases) -} - -func TestResourceChange_sensitiveVariable(t *testing.T) { - testCases := map[string]testCase{ - "creation": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-123"), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("!"), - }), - "nested_block_list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - "another": cty.StringVal("not secret"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - "another": cty.StringVal("not secret"), - }), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - // Nested blocks/sets will mark the whole set/block as sensitive - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_list"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_list": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - "another": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - }, - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - "another": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = (sensitive value) - + id = "i-02ae66f368e8518a9" - + list_field = [ - + "hello", - + (sensitive value), - + "!", - ] - + map_key = { - + "breakfast" = 800 - + "dinner" = (sensitive value) - } - + map_whole = (sensitive value) - - + nested_block_list { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - - + nested_block_set { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - } -`, - }, - "in-place update - before sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "special": cty.BoolVal(true), - "some_number": cty.NumberIntVal(1), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("!"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "special": cty.BoolVal(false), - "some_number": cty.NumberIntVal(2), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("."), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(1900), - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("cereal"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("changed"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("changed"), - }), - }), - }), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "special"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "special": {Type: cty.Bool, Optional: true}, - "some_number": {Type: cty.Number, Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - }, - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ ami = (sensitive value) - id = "i-02ae66f368e8518a9" - ~ list_field = [ - # (1 unchanged element hidden) - "friends", - - (sensitive value), - + ".", - ] - ~ map_key = { - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ "dinner" = (sensitive value) - # (1 unchanged element hidden) - } - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ map_whole = (sensitive value) - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ some_number = (sensitive value) - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ special = (sensitive value) - - # Warning: this block will no longer be marked as sensitive - # after applying this change. - ~ nested_block { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - - # Warning: this block will no longer be marked as sensitive - # after applying this change. - ~ nested_block_set { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - } -`, - }, - "in-place update - after sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block_single": cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("original"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("goodbye"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(700), - "dinner": cty.NumberIntVal(2100), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("cereal"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block_single": cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("changed"), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "tags"}, cty.IndexStep{Key: cty.StringVal("address")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_single"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_single": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSingle, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - id = "i-02ae66f368e8518a9" - ~ list_field = [ - - "hello", - + (sensitive value), - "friends", - ] - ~ map_key = { - ~ "breakfast" = 800 -> 700 - # Warning: this attribute value will be marked as sensitive and will not - # display in UI output after applying this change. - ~ "dinner" = (sensitive value) - } - # Warning: this attribute value will be marked as sensitive and will not - # display in UI output after applying this change. - ~ map_whole = (sensitive value) - - # Warning: this block will be marked as sensitive and will not - # display in UI output after applying this change. - ~ nested_block_single { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - } -`, - }, - "in-place update - both sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block_map": cty.MapVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("original"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("goodbye"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(1800), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("cereal"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block_map": cty.MapVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.UnknownVal(cty.String), - }), - }), - }), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_map": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingMap, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = (sensitive value) - id = "i-02ae66f368e8518a9" - ~ list_field = [ - - (sensitive value), - + (sensitive value), - "friends", - ] - ~ map_key = { - ~ "dinner" = (sensitive value) - # (1 unchanged element hidden) - } - ~ map_whole = (sensitive value) - - ~ nested_block_map { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - } -`, - }, - "in-place update - value unchanged, sensitivity changes": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "special": cty.BoolVal(true), - "some_number": cty.NumberIntVal(1), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("!"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "special": cty.BoolVal(true), - "some_number": cty.NumberIntVal(1), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("!"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - }), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "special"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "special": {Type: cty.Bool, Optional: true}, - "some_number": {Type: cty.Number, Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - }, - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ ami = (sensitive value) - id = "i-02ae66f368e8518a9" - ~ list_field = [ - # (1 unchanged element hidden) - "friends", - - (sensitive value), - + "!", - ] - ~ map_key = { - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ "dinner" = (sensitive value) - # (1 unchanged element hidden) - } - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ map_whole = (sensitive value) - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ some_number = (sensitive value) - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ special = (sensitive value) - - # Warning: this block will no longer be marked as sensitive - # after applying this change. - ~ nested_block { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - - # Warning: this block will no longer be marked as sensitive - # after applying this change. - ~ nested_block_set { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - } -`, - }, - "deletion": { - Action: plans.Delete, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secret"), - "another": cty.StringVal("not secret"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secret"), - "another": cty.StringVal("not secret"), - }), - }), - }), - After: cty.NullVal(cty.EmptyObject), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - "another": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be destroyed - - resource "test_instance" "example" { - - ami = (sensitive value) -> null - - id = "i-02ae66f368e8518a9" -> null - - list_field = [ - - "hello", - - (sensitive value), - ] -> null - - map_key = { - - "breakfast" = 800 - - "dinner" = (sensitive value) - } -> null - - map_whole = (sensitive value) -> null - - - nested_block_set { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - } -`, - }, - "update with sensitive value forcing replacement": { - Action: plans.DeleteThenCreate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "nested_block_set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secret"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "nested_block_set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("changed"), - }), - }), - }), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.GetAttrPath("ami"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.GetAttrPath("nested_block_set"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.GetAttrPath("ami"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.GetAttrPath("nested_block_set"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Required: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - RequiredReplace: cty.NewPathSet( - cty.GetAttrPath("ami"), - cty.GetAttrPath("nested_block_set"), - ), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - - ~ nested_block_set { # forces replacement - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - } -`, - }, - "update with sensitive attribute forcing replacement": { - Action: plans.DeleteThenCreate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true, Computed: true, Sensitive: true}, - }, - }, - RequiredReplace: cty.NewPathSet( - cty.GetAttrPath("ami"), - ), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - } -`, - }, - "update with sensitive nested type attribute forcing replacement": { - Action: plans.DeleteThenCreate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "conn_info": cty.ObjectVal(map[string]cty.Value{ - "user": cty.StringVal("not-secret"), - "password": cty.StringVal("top-secret"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "conn_info": cty.ObjectVal(map[string]cty.Value{ - "user": cty.StringVal("not-secret"), - "password": cty.StringVal("new-secret"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "conn_info": { - NestedType: &configschema.Object{ - Nesting: configschema.NestingSingle, - Attributes: map[string]*configschema.Attribute{ - "user": {Type: cty.String, Optional: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - }, - }, - }, - RequiredReplace: cty.NewPathSet( - cty.GetAttrPath("conn_info"), - cty.GetAttrPath("password"), - ), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ conn_info = { # forces replacement - ~ password = (sensitive value) - # (1 unchanged attribute hidden) - } - id = "i-02ae66f368e8518a9" - } -`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_moved(t *testing.T) { - prevRunAddr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "previous", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - - testCases := map[string]testCase{ - "moved and updated": { - PrevRunAddr: prevRunAddr, - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("12345"), - "foo": cty.StringVal("hello"), - "bar": cty.StringVal("baz"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("12345"), - "foo": cty.StringVal("hello"), - "bar": cty.StringVal("boop"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - # (moved from test_instance.previous) - ~ resource "test_instance" "example" { - ~ bar = "baz" -> "boop" - id = "12345" - # (1 unchanged attribute hidden) - } -`, - }, - "moved without changes": { - PrevRunAddr: prevRunAddr, - Action: plans.NoOp, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("12345"), - "foo": cty.StringVal("hello"), - "bar": cty.StringVal("baz"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("12345"), - "foo": cty.StringVal("hello"), - "bar": cty.StringVal("baz"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.previous has moved to test_instance.example - resource "test_instance" "example" { - id = "12345" - # (2 unchanged attributes hidden) - } -`, - }, - } - - runTestCases(t, testCases) -} - -type testCase struct { - Action plans.Action - ActionReason plans.ResourceInstanceChangeActionReason - ModuleInst addrs.ModuleInstance - Mode addrs.ResourceMode - InstanceKey addrs.InstanceKey - DeposedKey states.DeposedKey - Before cty.Value - BeforeValMarks []cty.PathValueMarks - AfterValMarks []cty.PathValueMarks - After cty.Value - Schema *configschema.Block - RequiredReplace cty.PathSet - ExpectedOutput string - PrevRunAddr addrs.AbsResourceInstance -} - -func runTestCases(t *testing.T, testCases map[string]testCase) { - color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - ty := tc.Schema.ImpliedType() - - beforeVal := tc.Before - switch { // Some fixups to make the test cases a little easier to write - case beforeVal.IsNull(): - beforeVal = cty.NullVal(ty) // allow mistyped nulls - case !beforeVal.IsKnown(): - beforeVal = cty.UnknownVal(ty) // allow mistyped unknowns - } - - afterVal := tc.After - switch { // Some fixups to make the test cases a little easier to write - case afterVal.IsNull(): - afterVal = cty.NullVal(ty) // allow mistyped nulls - case !afterVal.IsKnown(): - afterVal = cty.UnknownVal(ty) // allow mistyped unknowns - } - - addr := addrs.Resource{ - Mode: tc.Mode, - Type: "test_instance", - Name: "example", - }.Instance(tc.InstanceKey).Absolute(tc.ModuleInst) - - prevRunAddr := tc.PrevRunAddr - // If no previous run address is given, reuse the current address - // to make initialization easier - if prevRunAddr.Resource.Resource.Type == "" { - prevRunAddr = addr - } - - change := &plans.ResourceInstanceChange{ - Addr: addr, - PrevRunAddr: prevRunAddr, - DeposedKey: tc.DeposedKey, - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - Change: plans.Change{ - Action: tc.Action, - Before: beforeVal.MarkWithPaths(tc.BeforeValMarks), - After: afterVal.MarkWithPaths(tc.AfterValMarks), - }, - ActionReason: tc.ActionReason, - RequiredReplace: tc.RequiredReplace, - } - - output := ResourceChange(change, tc.Schema, color, DiffLanguageProposedChange) - if diff := cmp.Diff(output, tc.ExpectedOutput); diff != "" { - t.Errorf("wrong output\n%s", diff) - } - }) - } -} - -func TestOutputChanges(t *testing.T) { - color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} - - testCases := map[string]struct { - changes []*plans.OutputChangeSrc - output string - }{ - "new output value": { - []*plans.OutputChangeSrc{ - outputChange( - "foo", - cty.NullVal(cty.DynamicPseudoType), - cty.StringVal("bar"), - false, - ), - }, - ` - + foo = "bar"`, - }, - "removed output": { - []*plans.OutputChangeSrc{ - outputChange( - "foo", - cty.StringVal("bar"), - cty.NullVal(cty.DynamicPseudoType), - false, - ), - }, - ` - - foo = "bar" -> null`, - }, - "single string change": { - []*plans.OutputChangeSrc{ - outputChange( - "foo", - cty.StringVal("bar"), - cty.StringVal("baz"), - false, - ), - }, - ` - ~ foo = "bar" -> "baz"`, - }, - "element added to list": { - []*plans.OutputChangeSrc{ - outputChange( - "foo", - cty.ListVal([]cty.Value{ - cty.StringVal("alpha"), - cty.StringVal("beta"), - cty.StringVal("delta"), - cty.StringVal("epsilon"), - }), - cty.ListVal([]cty.Value{ - cty.StringVal("alpha"), - cty.StringVal("beta"), - cty.StringVal("gamma"), - cty.StringVal("delta"), - cty.StringVal("epsilon"), - }), - false, - ), - }, - ` - ~ foo = [ - # (1 unchanged element hidden) - "beta", - + "gamma", - "delta", - # (1 unchanged element hidden) - ]`, - }, - "multiple outputs changed, one sensitive": { - []*plans.OutputChangeSrc{ - outputChange( - "a", - cty.NumberIntVal(1), - cty.NumberIntVal(2), - false, - ), - outputChange( - "b", - cty.StringVal("hunter2"), - cty.StringVal("correct-horse-battery-staple"), - true, - ), - outputChange( - "c", - cty.BoolVal(false), - cty.BoolVal(true), - false, - ), - }, - ` - ~ a = 1 -> 2 - ~ b = (sensitive value) - ~ c = false -> true`, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - output := OutputChanges(tc.changes, color) - if output != tc.output { - t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.output) - } - }) - } -} - -func outputChange(name string, before, after cty.Value, sensitive bool) *plans.OutputChangeSrc { - addr := addrs.AbsOutputValue{ - OutputValue: addrs.OutputValue{Name: name}, - } - - change := &plans.OutputChange{ - Addr: addr, Change: plans.Change{ - Before: before, - After: after, - }, - Sensitive: sensitive, - } - - changeSrc, err := change.Encode() - if err != nil { - panic(fmt.Sprintf("failed to encode change for %s: %s", addr, err)) - } - - return changeSrc -} - -// A basic test schema using a configurable NestingMode for one (NestedType) attribute and one block -func testSchema(nesting configschema.NestingMode) *configschema.Block { - var diskKey = "disks" - if nesting == configschema.NestingSingle { - diskKey = "disk" - } - - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - diskKey: { - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "mount_point": {Type: cty.String, Optional: true}, - "size": {Type: cty.String, Optional: true}, - }, - Nesting: nesting, - }, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "root_block_device": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "volume_type": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - Nesting: nesting, - }, - }, - } -} - -// A basic test schema using a configurable NestingMode for one (NestedType) -// attribute marked sensitive. -func testSchemaSensitive(nesting configschema.NestingMode) *configschema.Block { - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "disks": { - Sensitive: true, - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "mount_point": {Type: cty.String, Optional: true}, - "size": {Type: cty.String, Optional: true}, - }, - Nesting: nesting, - }, - }, - }, - } -} - -func testSchemaMultipleBlocks(nesting configschema.NestingMode) *configschema.Block { - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "disks": { - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "mount_point": {Type: cty.String, Optional: true}, - "size": {Type: cty.String, Optional: true}, - }, - Nesting: nesting, - }, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "root_block_device": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "volume_type": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - Nesting: nesting, - }, - "leaf_block_device": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "volume_type": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - Nesting: nesting, - }, - }, - } -} - -// similar to testSchema with the addition of a "new_field" block -func testSchemaPlus(nesting configschema.NestingMode) *configschema.Block { - var diskKey = "disks" - if nesting == configschema.NestingSingle { - diskKey = "disk" - } - - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - diskKey: { - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "mount_point": {Type: cty.String, Optional: true}, - "size": {Type: cty.String, Optional: true}, - }, - Nesting: nesting, - }, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "root_block_device": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "volume_type": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "new_field": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - Nesting: nesting, - }, - }, - } -} diff --git a/internal/command/format/state.go b/internal/command/format/state.go deleted file mode 100644 index d0db1cc3dd02..000000000000 --- a/internal/command/format/state.go +++ /dev/null @@ -1,216 +0,0 @@ -package format - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/mitchellh/colorstring" -) - -// StateOpts are the options for formatting a state. -type StateOpts struct { - // State is the state to format. This is required. - State *states.State - - // Schemas are used to decode attributes. This is required. - Schemas *terraform.Schemas - - // Color is the colorizer. This is optional. - Color *colorstring.Colorize -} - -// State takes a state and returns a string -func State(opts *StateOpts) string { - if opts.Color == nil { - panic("colorize not given") - } - - if opts.Schemas == nil { - panic("schemas not given") - } - - s := opts.State - if len(s.Modules) == 0 { - return "The state file is empty. No resources are represented." - } - - buf := bytes.NewBufferString("[reset]") - p := blockBodyDiffPrinter{ - buf: buf, - color: opts.Color, - action: plans.NoOp, - verbose: true, - } - - // Format all the modules - for _, m := range s.Modules { - formatStateModule(p, m, opts.Schemas) - } - - // Write the outputs for the root module - m := s.RootModule() - - if m.OutputValues != nil { - if len(m.OutputValues) > 0 { - p.buf.WriteString("Outputs:\n\n") - } - - // Sort the outputs - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { - ks = append(ks, k) - } - sort.Strings(ks) - - // Output each output k/v pair - for _, k := range ks { - v := m.OutputValues[k] - p.buf.WriteString(fmt.Sprintf("%s = ", k)) - if v.Sensitive { - p.buf.WriteString("(sensitive value)") - } else { - p.writeValue(v.Value, plans.NoOp, 0) - } - p.buf.WriteString("\n") - } - } - - trimmedOutput := strings.TrimSpace(p.buf.String()) - trimmedOutput += "[reset]" - - return opts.Color.Color(trimmedOutput) - -} - -func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { - // First get the names of all the resources so we can show them - // in alphabetical order. - names := make([]string, 0, len(m.Resources)) - for name := range m.Resources { - names = append(names, name) - } - sort.Strings(names) - - // Go through each resource and begin building up the output. - for _, key := range names { - for k, v := range m.Resources[key].Instances { - // keep these in order to keep the current object first, and - // provide deterministic output for the deposed objects - type obj struct { - header string - instance *states.ResourceInstanceObjectSrc - } - instances := []obj{} - - addr := m.Resources[key].Addr - resAddr := addr.Resource - - taintStr := "" - if v.Current != nil && v.Current.Status == 'T' { - taintStr = " (tainted)" - } - - instances = append(instances, - obj{fmt.Sprintf("# %s:%s\n", addr.Instance(k), taintStr), v.Current}) - - for dk, v := range v.Deposed { - instances = append(instances, - obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Instance(k), dk), v}) - } - - // Sort the instances for consistent output. - // Starting the sort from the second index, so the current instance - // is always first. - sort.Slice(instances[1:], func(i, j int) bool { - return instances[i+1].header < instances[j+1].header - }) - - for _, obj := range instances { - header := obj.header - instance := obj.instance - p.buf.WriteString(header) - if instance == nil { - // this shouldn't happen, but there's nothing to do here so - // don't panic below. - continue - } - - var schema *configschema.Block - - provider := m.Resources[key].ProviderConfig.Provider - if _, exists := schemas.Providers[provider]; !exists { - // This should never happen in normal use because we should've - // loaded all of the schemas and checked things prior to this - // point. We can't return errors here, but since this is UI code - // we will try to do _something_ reasonable. - p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider.String())) - continue - } - - switch resAddr.Mode { - case addrs.ManagedResourceMode: - schema, _ = schemas.ResourceTypeConfig( - provider, - resAddr.Mode, - resAddr.Type, - ) - if schema == nil { - p.buf.WriteString(fmt.Sprintf( - "# missing schema for provider %q resource type %s\n\n", provider, resAddr.Type)) - continue - } - - p.buf.WriteString(fmt.Sprintf( - "resource %q %q {", - resAddr.Type, - resAddr.Name, - )) - case addrs.DataResourceMode: - schema, _ = schemas.ResourceTypeConfig( - provider, - resAddr.Mode, - resAddr.Type, - ) - if schema == nil { - p.buf.WriteString(fmt.Sprintf( - "# missing schema for provider %q data source %s\n\n", provider, resAddr.Type)) - continue - } - - p.buf.WriteString(fmt.Sprintf( - "data %q %q {", - resAddr.Type, - resAddr.Name, - )) - default: - // should never happen, since the above is exhaustive - p.buf.WriteString(resAddr.String()) - } - - val, err := instance.Decode(schema.ImpliedType()) - if err != nil { - fmt.Println(err.Error()) - break - } - - path := make(cty.Path, 0, 3) - result := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) - if result.bodyWritten { - p.buf.WriteString("\n") - } - - p.buf.WriteString("}\n\n") - } - } - } - p.buf.WriteString("\n") -} diff --git a/internal/command/format/state_test.go b/internal/command/format/state_test.go deleted file mode 100644 index d83c6eaf9727..000000000000 --- a/internal/command/format/state_test.go +++ /dev/null @@ -1,400 +0,0 @@ -package format - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/zclconf/go-cty/cty" -) - -func TestState(t *testing.T) { - tests := []struct { - State *StateOpts - Want string - }{ - { - &StateOpts{ - State: &states.State{}, - Color: disabledColorize, - Schemas: &terraform.Schemas{}, - }, - "The state file is empty. No resources are represented.", - }, - { - &StateOpts{ - State: basicState(t), - Color: disabledColorize, - Schemas: testSchemas(), - }, - basicStateOutput, - }, - { - &StateOpts{ - State: nestedState(t), - Color: disabledColorize, - Schemas: testSchemas(), - }, - nestedStateOutput, - }, - { - &StateOpts{ - State: deposedState(t), - Color: disabledColorize, - Schemas: testSchemas(), - }, - deposedNestedStateOutput, - }, - { - &StateOpts{ - State: onlyDeposedState(t), - Color: disabledColorize, - Schemas: testSchemas(), - }, - onlyDeposedOutput, - }, - { - &StateOpts{ - State: stateWithMoreOutputs(t), - Color: disabledColorize, - Schemas: testSchemas(), - }, - stateWithMoreOutputsOutput, - }, - } - - for i, tt := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - got := State(tt.State) - if got != tt.Want { - t.Errorf( - "wrong result\ninput: %v\ngot: \n%q\nwant: \n%q", - tt.State.State, got, tt.Want, - ) - } - }) - } -} - -func testProvider() *terraform.MockProvider { - p := new(terraform.MockProvider) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{NewState: req.PriorState} - } - - p.GetProviderSchemaResponse = testProviderSchema() - - return p -} - -func testProviderSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "test_resource": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "woozles": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "test_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } -} - -func testSchemas() *terraform.Schemas { - provider := testProvider() - return &terraform.Schemas{ - Providers: map[addrs.Provider]*terraform.ProviderSchema{ - addrs.NewDefaultProvider("test"): provider.ProviderSchema(), - }, - } -} - -const basicStateOutput = `# data.test_data_source.data: -data "test_data_source" "data" { - compute = "sure" -} - -# test_resource.baz[0]: -resource "test_resource" "baz" { - woozles = "confuzles" -} - - -Outputs: - -bar = "bar value"` - -const nestedStateOutput = `# test_resource.baz[0]: -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -}` - -const deposedNestedStateOutput = `# test_resource.baz[0]: -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -} - -# test_resource.baz[0]: (deposed object 1234) -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -}` - -const onlyDeposedOutput = `# test_resource.baz[0]: -# test_resource.baz[0]: (deposed object 1234) -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -} - -# test_resource.baz[0]: (deposed object 5678) -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -}` - -const stateWithMoreOutputsOutput = `# test_resource.baz[0]: -resource "test_resource" "baz" { - woozles = "confuzles" -} - - -Outputs: - -bool_var = true -int_var = 42 -map_var = { - "first" = "foo" - "second" = "bar" -} -sensitive_var = (sensitive value) -string_var = "string value"` - -func basicState(t *testing.T) *states.State { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetLocalValue("foo", cty.StringVal("foo value")) - rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_data_source", - Name: "data", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"compute":"sure"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func stateWithMoreOutputs(t *testing.T) *states.State { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetOutputValue("string_var", cty.StringVal("string value"), false) - rootModule.SetOutputValue("int_var", cty.NumberIntVal(42), false) - rootModule.SetOutputValue("bool_var", cty.BoolVal(true), false) - rootModule.SetOutputValue("sensitive_var", cty.StringVal("secret!!!"), true) - rootModule.SetOutputValue("map_var", cty.MapVal(map[string]cty.Value{ - "first": cty.StringVal("foo"), - "second": cty.StringVal("bar"), - }), false) - - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func nestedState(t *testing.T) *states.State { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func deposedState(t *testing.T) *states.State { - state := nestedState(t) - rootModule := state.RootModule() - rootModule.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - states.DeposedKey("1234"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -// replicate a corrupt resource where only a deposed exists -func onlyDeposedState(t *testing.T) *states.State { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - states.DeposedKey("1234"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - rootModule.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - states.DeposedKey("5678"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} diff --git a/internal/command/graph.go b/internal/command/graph.go deleted file mode 100644 index c1c225beb85b..000000000000 --- a/internal/command/graph.go +++ /dev/null @@ -1,225 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// GraphCommand is a Command implementation that takes a Terraform -// configuration and outputs the dependency tree in graphical form. -type GraphCommand struct { - Meta -} - -func (c *GraphCommand) Run(args []string) int { - var drawCycles bool - var graphTypeStr string - var moduleDepth int - var verbose bool - var planPath string - - args = c.Meta.process(args) - cmdFlags := c.Meta.defaultFlagSet("graph") - cmdFlags.BoolVar(&drawCycles, "draw-cycles", false, "draw-cycles") - cmdFlags.StringVar(&graphTypeStr, "type", "", "type") - cmdFlags.IntVar(&moduleDepth, "module-depth", -1, "module-depth") - cmdFlags.BoolVar(&verbose, "verbose", false, "verbose") - cmdFlags.StringVar(&planPath, "plan", "", "plan") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) - return 1 - } - - configPath, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Check for user-supplied plugin path - if c.pluginPath, err = c.loadPluginPath(); err != nil { - c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) - return 1 - } - - // Try to load plan if path is specified - var planFile *planfile.Reader - if planPath != "" { - planFile, err = c.PlanFile(planPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - } - - var diags tfdiags.Diagnostics - - backendConfig, backendDiags := c.loadBackendConfig(configPath) - diags = diags.Append(backendDiags) - if diags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - // Load the backend - b, backendDiags := c.Backend(&BackendOpts{ - Config: backendConfig, - }) - diags = diags.Append(backendDiags) - if backendDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - // We require a local backend - local, ok := b.(backend.Local) - if !ok { - c.showDiagnostics(diags) // in case of any warnings in here - c.Ui.Error(ErrUnsupportedLocalOp) - return 1 - } - - // This is a read-only command - c.ignoreRemoteVersionConflict(b) - - // Build the operation - opReq := c.Operation(b, arguments.ViewHuman) - opReq.ConfigDir = configPath - opReq.ConfigLoader, err = c.initConfigLoader() - opReq.PlanFile = planFile - opReq.AllowUnsetVariables = true - if err != nil { - diags = diags.Append(err) - c.showDiagnostics(diags) - return 1 - } - - // Get the context - lr, _, ctxDiags := local.LocalRun(opReq) - diags = diags.Append(ctxDiags) - if ctxDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - if graphTypeStr == "" { - switch { - case lr.Plan != nil: - graphTypeStr = "apply" - default: - graphTypeStr = "plan" - } - } - - var g *terraform.Graph - var graphDiags tfdiags.Diagnostics - switch graphTypeStr { - case "plan": - g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.NormalMode) - case "plan-refresh-only": - g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.RefreshOnlyMode) - case "plan-destroy": - g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.DestroyMode) - case "apply": - plan := lr.Plan - - // Historically "terraform graph" would allow the nonsensical request to - // render an apply graph without a plan, so we continue to support that - // here, though perhaps one day this should be an error. - if lr.Plan == nil { - plan = &plans.Plan{ - Changes: plans.NewChanges(), - UIMode: plans.NormalMode, - PriorState: lr.InputState, - PrevRunState: lr.InputState, - } - } - - g, graphDiags = lr.Core.ApplyGraphForUI(plan, lr.Config) - case "eval", "validate": - // Terraform v0.12 through v1.0 supported both of these, but the - // graph variants for "eval" and "validate" are purely implementation - // details and don't reveal anything (user-model-wise) that you can't - // see in the plan graph. - graphDiags = graphDiags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Graph type no longer available", - fmt.Sprintf("The graph type %q is no longer available. Use -type=plan instead to get a similar result.", graphTypeStr), - )) - default: - graphDiags = graphDiags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported graph type", - `The -type=... argument must be either "plan", "plan-refresh-only", "plan-destroy", or "apply".`, - )) - } - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - graphStr, err := terraform.GraphDot(g, &dag.DotOpts{ - DrawCycles: drawCycles, - MaxDepth: moduleDepth, - Verbose: verbose, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error converting graph: %s", err)) - return 1 - } - - if diags.HasErrors() { - // For this command we only show diagnostics if there are errors, - // because printing out naked warnings could upset a naive program - // consuming our dot output. - c.showDiagnostics(diags) - return 1 - } - - c.Ui.Output(graphStr) - - return 0 -} - -func (c *GraphCommand) Help() string { - helpText := ` -Usage: terraform [global options] graph [options] - - Produces a representation of the dependency graph between different - objects in the current configuration and state. - - The graph is presented in the DOT language. The typical program that can - read this format is GraphViz, but many web services are also available - to read this format. - -Options: - - -plan=tfplan Render graph using the specified plan file instead of the - configuration in the current directory. - - -draw-cycles Highlight any cycles in the graph with colored edges. - This helps when diagnosing cycle errors. - - -type=plan Type of graph to output. Can be: plan, plan-refresh-only, - plan-destroy, or apply. By default Terraform chooses - "plan", or "apply" if you also set the -plan=... option. - - -module-depth=n (deprecated) In prior versions of Terraform, specified the - depth of modules to show in the output. -` - return strings.TrimSpace(helpText) -} - -func (c *GraphCommand) Synopsis() string { - return "Generate a Graphviz graph of the steps in an operation" -} diff --git a/internal/command/graph_test.go b/internal/command/graph_test.go deleted file mode 100644 index f58f7103e978..000000000000 --- a/internal/command/graph_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package command - -import ( - "os" - "strings" - "testing" - - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" -) - -func TestGraph(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("graph"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - c := &GraphCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(applyFixtureProvider()), - Ui: ui, - }, - } - - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - output := ui.OutputWriter.String() - if !strings.Contains(output, `provider[\"registry.terraform.io/hashicorp/test\"]`) { - t.Fatalf("doesn't look like digraph: %s", output) - } -} - -func TestGraph_multipleArgs(t *testing.T) { - ui := new(cli.MockUi) - c := &GraphCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(applyFixtureProvider()), - Ui: ui, - }, - } - - args := []string{ - "bad", - "bad", - } - if code := c.Run(args); code != 1 { - t.Fatalf("bad: \n%s", ui.OutputWriter.String()) - } -} - -func TestGraph_noArgs(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("graph"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - c := &GraphCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(applyFixtureProvider()), - Ui: ui, - }, - } - - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - output := ui.OutputWriter.String() - if !strings.Contains(output, `provider[\"registry.terraform.io/hashicorp/test\"]`) { - t.Fatalf("doesn't look like digraph: %s", output) - } -} - -func TestGraph_noConfig(t *testing.T) { - td := t.TempDir() - os.MkdirAll(td, 0755) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - c := &GraphCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(applyFixtureProvider()), - Ui: ui, - }, - } - - // Running the graph command without a config should not panic, - // but this may be an error at some point in the future. - args := []string{"-type", "apply"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } -} - -func TestGraph_plan(t *testing.T) { - testCwd(t) - - plan := &plans.Plan{ - Changes: plans.NewChanges(), - } - plan.Changes.Resources = append(plan.Changes.Resources, &plans.ResourceInstanceChangeSrc{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "bar", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - Before: plans.DynamicValue(`{}`), - After: plans.DynamicValue(`null`), - }, - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }) - emptyConfig, err := plans.NewDynamicValue(cty.EmptyObjectVal, cty.EmptyObject) - if err != nil { - t.Fatal(err) - } - plan.Backend = plans.Backend{ - // Doesn't actually matter since we aren't going to activate the backend - // for this command anyway, but we need something here for the plan - // file writer to succeed. - Type: "placeholder", - Config: emptyConfig, - } - _, configSnap := testModuleWithSnapshot(t, "graph") - - planPath := testPlanFile(t, configSnap, states.NewState(), plan) - - ui := new(cli.MockUi) - c := &GraphCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(applyFixtureProvider()), - Ui: ui, - }, - } - - args := []string{ - "-plan", planPath, - } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - output := ui.OutputWriter.String() - if !strings.Contains(output, `provider[\"registry.terraform.io/hashicorp/test\"]`) { - t.Fatalf("doesn't look like digraph: %s", output) - } -} diff --git a/internal/command/helper.go b/internal/command/helper.go deleted file mode 100644 index acb6c26af0df..000000000000 --- a/internal/command/helper.go +++ /dev/null @@ -1,28 +0,0 @@ -package command - -import ( - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/cloud" -) - -const failedToLoadSchemasMessage = ` -Warning: Failed to update data for external integrations - -Terraform was unable to generate a description of the updated -state for use with external integrations in Terraform Cloud. -Any integrations configured for this workspace which depend on -information from the state may not work correctly when using the -result of this action. - -This problem occurs when Terraform cannot read the schema for -one or more of the providers used in the state. The next successful -apply will correct the problem by re-generating the JSON description -of the state: - terraform apply -` - -func isCloudMode(b backend.Enhanced) bool { - _, ok := b.(*cloud.Cloud) - - return ok -} diff --git a/internal/command/init.go b/internal/command/init.go deleted file mode 100644 index 2e6d13d9dacf..000000000000 --- a/internal/command/init.go +++ /dev/null @@ -1,1230 +0,0 @@ -package command - -import ( - "context" - "fmt" - "log" - "reflect" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/posener/complete" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/backend" - backendInit "github.com/hashicorp/terraform/internal/backend/init" - "github.com/hashicorp/terraform/internal/cloud" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/providercache" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" - tfversion "github.com/hashicorp/terraform/version" -) - -// InitCommand is a Command implementation that takes a Terraform -// module and clones it to the working directory. -type InitCommand struct { - Meta -} - -func (c *InitCommand) Run(args []string) int { - var flagFromModule, flagLockfile string - var flagBackend, flagCloud, flagGet, flagUpgrade bool - var flagPluginPath FlagStringSlice - flagConfigExtra := newRawFlags("-backend-config") - - args = c.Meta.process(args) - cmdFlags := c.Meta.extendedFlagSet("init") - cmdFlags.BoolVar(&flagBackend, "backend", true, "") - cmdFlags.BoolVar(&flagCloud, "cloud", true, "") - cmdFlags.Var(flagConfigExtra, "backend-config", "") - cmdFlags.StringVar(&flagFromModule, "from-module", "", "copy the source of the given module into the directory before init") - cmdFlags.BoolVar(&flagGet, "get", true, "") - cmdFlags.BoolVar(&c.forceInitCopy, "force-copy", false, "suppress prompts about copying state data") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.BoolVar(&c.reconfigure, "reconfigure", false, "reconfigure") - cmdFlags.BoolVar(&c.migrateState, "migrate-state", false, "migrate state") - cmdFlags.BoolVar(&flagUpgrade, "upgrade", false, "") - cmdFlags.Var(&flagPluginPath, "plugin-dir", "plugin directory") - cmdFlags.StringVar(&flagLockfile, "lockfile", "", "Set a dependency lockfile mode") - cmdFlags.BoolVar(&c.Meta.ignoreRemoteVersion, "ignore-remote-version", false, "continue even if remote and local Terraform versions are incompatible") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - backendFlagSet := arguments.FlagIsSet(cmdFlags, "backend") - cloudFlagSet := arguments.FlagIsSet(cmdFlags, "cloud") - - switch { - case backendFlagSet && cloudFlagSet: - c.Ui.Error("The -backend and -cloud options are aliases of one another and mutually-exclusive in their use") - return 1 - case backendFlagSet: - flagCloud = flagBackend - case cloudFlagSet: - flagBackend = flagCloud - } - - if c.migrateState && c.reconfigure { - c.Ui.Error("The -migrate-state and -reconfigure options are mutually-exclusive") - return 1 - } - - // Copying the state only happens during backend migration, so setting - // -force-copy implies -migrate-state - if c.forceInitCopy { - c.migrateState = true - } - - var diags tfdiags.Diagnostics - - if len(flagPluginPath) > 0 { - c.pluginPath = flagPluginPath - } - - // Validate the arg count and get the working directory - args = cmdFlags.Args() - path, err := ModulePath(args) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - if err := c.storePluginPath(c.pluginPath); err != nil { - c.Ui.Error(fmt.Sprintf("Error saving -plugin-path values: %s", err)) - return 1 - } - - // This will track whether we outputted anything so that we know whether - // to output a newline before the success message - var header bool - - if flagFromModule != "" { - src := flagFromModule - - empty, err := configs.IsEmptyDir(path) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error validating destination directory: %s", err)) - return 1 - } - if !empty { - c.Ui.Error(strings.TrimSpace(errInitCopyNotEmpty)) - return 1 - } - - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold]Copying configuration[reset] from %q...", src, - ))) - header = true - - hooks := uiModuleInstallHooks{ - Ui: c.Ui, - ShowLocalPaths: false, // since they are in a weird location for init - } - - initDirFromModuleAbort, initDirFromModuleDiags := c.initDirFromModule(path, src, hooks) - diags = diags.Append(initDirFromModuleDiags) - if initDirFromModuleAbort || initDirFromModuleDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - - c.Ui.Output("") - } - - // If our directory is empty, then we're done. We can't get or set up - // the backend with an empty directory. - empty, err := configs.IsEmptyDir(path) - if err != nil { - diags = diags.Append(fmt.Errorf("Error checking configuration: %s", err)) - c.showDiagnostics(diags) - return 1 - } - if empty { - c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitEmpty))) - return 0 - } - - // Load just the root module to begin backend and module initialization - rootModEarly, earlyConfDiags := c.loadSingleModule(path) - - // There may be parsing errors in config loading but these will be shown later _after_ - // checking for core version requirement errors. Not meeting the version requirement should - // be the first error displayed if that is an issue, but other operations are required - // before being able to check core version requirements. - if rootModEarly == nil { - c.Ui.Error(c.Colorize().Color(strings.TrimSpace(errInitConfigError))) - diags = diags.Append(earlyConfDiags) - c.showDiagnostics(diags) - - return 1 - } - - var back backend.Backend - - // There may be config errors or backend init errors but these will be shown later _after_ - // checking for core version requirement errors. - var backDiags tfdiags.Diagnostics - var backendOutput bool - - switch { - case flagCloud && rootModEarly.CloudConfig != nil: - back, backendOutput, backDiags = c.initCloud(rootModEarly, flagConfigExtra) - case flagBackend: - back, backendOutput, backDiags = c.initBackend(rootModEarly, flagConfigExtra) - default: - // load the previously-stored backend config - back, backDiags = c.Meta.backendFromState() - } - if backendOutput { - header = true - } - - var state *states.State - - // If we have a functional backend (either just initialized or initialized - // on a previous run) we'll use the current state as a potential source - // of provider dependencies. - if back != nil { - c.ignoreRemoteVersionConflict(back) - workspace, err := c.Workspace() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) - return 1 - } - sMgr, err := back.StateMgr(workspace) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error loading state: %s", err)) - return 1 - } - - if err := sMgr.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Error refreshing state: %s", err)) - return 1 - } - - state = sMgr.State() - } - - if flagGet { - modsOutput, modsAbort, modsDiags := c.getModules(path, rootModEarly, flagUpgrade) - diags = diags.Append(modsDiags) - if modsAbort || modsDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - if modsOutput { - header = true - } - } - - // With all of the modules (hopefully) installed, we can now try to load the - // whole configuration tree. - config, confDiags := c.loadConfig(path) - // configDiags will be handled after the version constraint check, since an - // incorrect version of terraform may be producing errors for configuration - // constructs added in later versions. - - // Before we go further, we'll check to make sure none of the modules in - // the configuration declare that they don't support this Terraform - // version, so we can produce a version-related error message rather than - // potentially-confusing downstream errors. - versionDiags := terraform.CheckCoreVersionRequirements(config) - if versionDiags.HasErrors() { - c.showDiagnostics(versionDiags) - return 1 - } - - // If we pass the core version check, we want to show any errors from initializing the backend next, - // which will include syntax errors from loading the configuration. However, there's a special case - // where we are unable to load the backend from configuration or state _and_ the configuration has - // errors. In that case, we want to show a slightly friendlier error message for newcomers. - showBackendDiags := back != nil || rootModEarly.Backend != nil || rootModEarly.CloudConfig != nil - if showBackendDiags { - diags = diags.Append(backDiags) - if backDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - } else { - diags = diags.Append(earlyConfDiags) - if earlyConfDiags.HasErrors() { - c.Ui.Error(strings.TrimSpace(errInitConfigError)) - c.showDiagnostics(diags) - return 1 - } - } - - // If everything is ok with the core version check and backend initialization, - // show other errors from loading the full configuration tree. - diags = diags.Append(confDiags) - if confDiags.HasErrors() { - c.Ui.Error(strings.TrimSpace(errInitConfigError)) - c.showDiagnostics(diags) - return 1 - } - - // Now that we have loaded all modules, check the module tree for missing providers. - providersOutput, providersAbort, providerDiags := c.getProviders(config, state, flagUpgrade, flagPluginPath, flagLockfile) - diags = diags.Append(providerDiags) - if providersAbort || providerDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 - } - if providersOutput { - header = true - } - - // If we outputted information, then we need to output a newline - // so that our success message is nicely spaced out from prior text. - if header { - c.Ui.Output("") - } - - // If we accumulated any warnings along the way that weren't accompanied - // by errors then we'll output them here so that the success message is - // still the final thing shown. - c.showDiagnostics(diags) - _, cloud := back.(*cloud.Cloud) - output := outputInitSuccess - if cloud { - output = outputInitSuccessCloud - } - - c.Ui.Output(c.Colorize().Color(strings.TrimSpace(output))) - - if !c.RunningInAutomation { - // If we're not running in an automation wrapper, give the user - // some more detailed next steps that are appropriate for interactive - // shell usage. - output = outputInitSuccessCLI - if cloud { - output = outputInitSuccessCLICloud - } - c.Ui.Output(c.Colorize().Color(strings.TrimSpace(output))) - } - return 0 -} - -func (c *InitCommand) getModules(path string, earlyRoot *configs.Module, upgrade bool) (output bool, abort bool, diags tfdiags.Diagnostics) { - if len(earlyRoot.ModuleCalls) == 0 { - // Nothing to do - return false, false, nil - } - - if upgrade { - c.Ui.Output(c.Colorize().Color("[reset][bold]Upgrading modules...")) - } else { - c.Ui.Output(c.Colorize().Color("[reset][bold]Initializing modules...")) - } - - hooks := uiModuleInstallHooks{ - Ui: c.Ui, - ShowLocalPaths: true, - } - - installAbort, installDiags := c.installModules(path, upgrade, hooks) - diags = diags.Append(installDiags) - - // At this point, installModules may have generated error diags or been - // aborted by SIGINT. In any case we continue and the manifest as best - // we can. - - // Since module installer has modified the module manifest on disk, we need - // to refresh the cache of it in the loader. - if c.configLoader != nil { - if err := c.configLoader.RefreshModules(); err != nil { - // Should never happen - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read module manifest", - fmt.Sprintf("After installing modules, Terraform could not re-read the manifest of installed modules. This is a bug in Terraform. %s.", err), - )) - } - } - - return true, installAbort, diags -} - -func (c *InitCommand) initCloud(root *configs.Module, extraConfig rawFlags) (be backend.Backend, output bool, diags tfdiags.Diagnostics) { - c.Ui.Output(c.Colorize().Color("\n[reset][bold]Initializing Terraform Cloud...")) - - if len(extraConfig.AllItems()) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid command-line option", - "The -backend-config=... command line option is only for state backends, and is not applicable to Terraform Cloud-based configurations.\n\nTo change the set of workspaces associated with this configuration, edit the Cloud configuration block in the root module.", - )) - return nil, true, diags - } - - backendConfig := root.CloudConfig.ToBackendConfig() - - opts := &BackendOpts{ - Config: &backendConfig, - Init: true, - } - - back, backDiags := c.Backend(opts) - diags = diags.Append(backDiags) - return back, true, diags -} - -func (c *InitCommand) initBackend(root *configs.Module, extraConfig rawFlags) (be backend.Backend, output bool, diags tfdiags.Diagnostics) { - c.Ui.Output(c.Colorize().Color("\n[reset][bold]Initializing the backend...")) - - var backendConfig *configs.Backend - var backendConfigOverride hcl.Body - if root.Backend != nil { - backendType := root.Backend.Type - if backendType == "cloud" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported backend type", - Detail: fmt.Sprintf("There is no explicit backend type named %q. To configure Terraform Cloud, declare a 'cloud' block instead.", backendType), - Subject: &root.Backend.TypeRange, - }) - return nil, true, diags - } - - bf := backendInit.Backend(backendType) - if bf == nil { - detail := fmt.Sprintf("There is no backend type named %q.", backendType) - if msg, removed := backendInit.RemovedBackends[backendType]; removed { - detail = msg - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported backend type", - Detail: detail, - Subject: &root.Backend.TypeRange, - }) - return nil, true, diags - } - - b := bf() - backendSchema := b.ConfigSchema() - backendConfig = root.Backend - - var overrideDiags tfdiags.Diagnostics - backendConfigOverride, overrideDiags = c.backendConfigOverrideBody(extraConfig, backendSchema) - diags = diags.Append(overrideDiags) - if overrideDiags.HasErrors() { - return nil, true, diags - } - } else { - // If the user supplied a -backend-config on the CLI but no backend - // block was found in the configuration, it's likely - but not - // necessarily - a mistake. Return a warning. - if !extraConfig.Empty() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Missing backend configuration", - `-backend-config was used without a "backend" block in the configuration. - -If you intended to override the default local backend configuration, -no action is required, but you may add an explicit backend block to your -configuration to clear this warning: - -terraform { - backend "local" {} -} - -However, if you intended to override a defined backend, please verify that -the backend configuration is present and valid. -`, - )) - } - } - - opts := &BackendOpts{ - Config: backendConfig, - ConfigOverride: backendConfigOverride, - Init: true, - } - - back, backDiags := c.Backend(opts) - diags = diags.Append(backDiags) - return back, true, diags -} - -// Load the complete module tree, and fetch any missing providers. -// This method outputs its own Ui. -func (c *InitCommand) getProviders(config *configs.Config, state *states.State, upgrade bool, pluginDirs []string, flagLockfile string) (output, abort bool, diags tfdiags.Diagnostics) { - // Dev overrides cause the result of "terraform init" to be irrelevant for - // any overridden providers, so we'll warn about it to avoid later - // confusion when Terraform ends up using a different provider than the - // lock file called for. - diags = diags.Append(c.providerDevOverrideInitWarnings()) - - // First we'll collect all the provider dependencies we can see in the - // configuration and the state. - reqs, hclDiags := config.ProviderRequirements() - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return false, true, diags - } - if state != nil { - stateReqs := state.ProviderRequirements() - reqs = reqs.Merge(stateReqs) - } - - for providerAddr := range reqs { - if providerAddr.IsLegacy() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid legacy provider address", - fmt.Sprintf( - "This configuration or its associated state refers to the unqualified provider %q.\n\nYou must complete the Terraform 0.13 upgrade process before upgrading to later versions.", - providerAddr.Type, - ), - )) - } - } - - previousLocks, moreDiags := c.lockedDependencies() - diags = diags.Append(moreDiags) - - if diags.HasErrors() { - return false, true, diags - } - - var inst *providercache.Installer - if len(pluginDirs) == 0 { - // By default we use a source that looks for providers in all of the - // standard locations, possibly customized by the user in CLI config. - inst = c.providerInstaller() - } else { - // If the user passes at least one -plugin-dir then that circumvents - // the usual sources and forces Terraform to consult only the given - // directories. Anything not available in one of those directories - // is not available for installation. - source := c.providerCustomLocalDirectorySource(pluginDirs) - inst = c.providerInstallerCustomSource(source) - - // The default (or configured) search paths are logged earlier, in provider_source.go - // Log that those are being overridden by the `-plugin-dir` command line options - log.Println("[DEBUG] init: overriding provider plugin search paths") - log.Printf("[DEBUG] will search for provider plugins in %s", pluginDirs) - } - - // Installation can be aborted by interruption signals - ctx, done := c.InterruptibleContext() - defer done() - - // We want to print out a nice warning if we don't manage to pull - // checksums for all our providers. This is tracked via callbacks - // and incomplete providers are stored here for later analysis. - var incompleteProviders []string - - // Because we're currently just streaming a series of events sequentially - // into the terminal, we're showing only a subset of the events to keep - // things relatively concise. Later it'd be nice to have a progress UI - // where statuses update in-place, but we can't do that as long as we - // are shimming our vt100 output to the legacy console API on Windows. - evts := &providercache.InstallerEvents{ - PendingProviders: func(reqs map[addrs.Provider]getproviders.VersionConstraints) { - c.Ui.Output(c.Colorize().Color( - "\n[reset][bold]Initializing provider plugins...", - )) - }, - ProviderAlreadyInstalled: func(provider addrs.Provider, selectedVersion getproviders.Version) { - c.Ui.Info(fmt.Sprintf("- Using previously-installed %s v%s", provider.ForDisplay(), selectedVersion)) - }, - BuiltInProviderAvailable: func(provider addrs.Provider) { - c.Ui.Info(fmt.Sprintf("- %s is built in to Terraform", provider.ForDisplay())) - }, - BuiltInProviderFailure: func(provider addrs.Provider, err error) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid dependency on built-in provider", - fmt.Sprintf("Cannot use %s: %s.", provider.ForDisplay(), err), - )) - }, - QueryPackagesBegin: func(provider addrs.Provider, versionConstraints getproviders.VersionConstraints, locked bool) { - if locked { - c.Ui.Info(fmt.Sprintf("- Reusing previous version of %s from the dependency lock file", provider.ForDisplay())) - } else { - if len(versionConstraints) > 0 { - c.Ui.Info(fmt.Sprintf("- Finding %s versions matching %q...", provider.ForDisplay(), getproviders.VersionConstraintsString(versionConstraints))) - } else { - c.Ui.Info(fmt.Sprintf("- Finding latest version of %s...", provider.ForDisplay())) - } - } - }, - LinkFromCacheBegin: func(provider addrs.Provider, version getproviders.Version, cacheRoot string) { - c.Ui.Info(fmt.Sprintf("- Using %s v%s from the shared cache directory", provider.ForDisplay(), version)) - }, - FetchPackageBegin: func(provider addrs.Provider, version getproviders.Version, location getproviders.PackageLocation) { - c.Ui.Info(fmt.Sprintf("- Installing %s v%s...", provider.ForDisplay(), version)) - }, - QueryPackagesFailure: func(provider addrs.Provider, err error) { - switch errorTy := err.(type) { - case getproviders.ErrProviderNotFound: - sources := errorTy.Sources - displaySources := make([]string, len(sources)) - for i, source := range sources { - displaySources[i] = fmt.Sprintf(" - %s", source) - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to query available provider packages", - fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s\n\n%s", - provider.ForDisplay(), err, strings.Join(displaySources, "\n"), - ), - )) - case getproviders.ErrRegistryProviderNotKnown: - // We might be able to suggest an alternative provider to use - // instead of this one. - suggestion := fmt.Sprintf("\n\nAll modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending on %s, run the following command:\n terraform providers", provider.ForDisplay()) - alternative := getproviders.MissingProviderSuggestion(ctx, provider, inst.ProviderSource(), reqs) - if alternative != provider { - suggestion = fmt.Sprintf( - "\n\nDid you intend to use %s? If so, you must specify that source address in each module which requires that provider. To see which modules are currently depending on %s, run the following command:\n terraform providers", - alternative.ForDisplay(), provider.ForDisplay(), - ) - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to query available provider packages", - fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s%s", - provider.ForDisplay(), err, suggestion, - ), - )) - case getproviders.ErrHostNoProviders: - switch { - case errorTy.Hostname == svchost.Hostname("github.com") && !errorTy.HasOtherVersion: - // If a user copies the URL of a GitHub repository into - // the source argument and removes the schema to make it - // provider-address-shaped then that's one way we can end up - // here. We'll use a specialized error message in anticipation - // of that mistake. We only do this if github.com isn't a - // provider registry, to allow for the (admittedly currently - // rather unlikely) possibility that github.com starts being - // a real Terraform provider registry in the future. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider registry host", - fmt.Sprintf("The given source address %q specifies a GitHub repository rather than a Terraform provider. Refer to the documentation of the provider to find the correct source address to use.", - provider.String(), - ), - )) - - case errorTy.HasOtherVersion: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider registry host", - fmt.Sprintf("The host %q given in in provider source address %q does not offer a Terraform provider registry that is compatible with this Terraform version, but it may be compatible with a different Terraform version.", - errorTy.Hostname, provider.String(), - ), - )) - - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider registry host", - fmt.Sprintf("The host %q given in in provider source address %q does not offer a Terraform provider registry.", - errorTy.Hostname, provider.String(), - ), - )) - } - - case getproviders.ErrRequestCanceled: - // We don't attribute cancellation to any particular operation, - // but rather just emit a single general message about it at - // the end, by checking ctx.Err(). - - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to query available provider packages", - fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s", - provider.ForDisplay(), err, - ), - )) - } - - }, - QueryPackagesWarning: func(provider addrs.Provider, warnings []string) { - displayWarnings := make([]string, len(warnings)) - for i, warning := range warnings { - displayWarnings[i] = fmt.Sprintf("- %s", warning) - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Additional provider information from registry", - fmt.Sprintf("The remote registry returned warnings for %s:\n%s", - provider.String(), - strings.Join(displayWarnings, "\n"), - ), - )) - }, - LinkFromCacheFailure: func(provider addrs.Provider, version getproviders.Version, err error) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to install provider from shared cache", - fmt.Sprintf("Error while importing %s v%s from the shared cache directory: %s.", provider.ForDisplay(), version, err), - )) - }, - FetchPackageFailure: func(provider addrs.Provider, version getproviders.Version, err error) { - const summaryIncompatible = "Incompatible provider version" - switch err := err.(type) { - case getproviders.ErrProtocolNotSupported: - closestAvailable := err.Suggestion - switch { - case closestAvailable == getproviders.UnspecifiedVersion: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - summaryIncompatible, - fmt.Sprintf(errProviderVersionIncompatible, provider.String()), - )) - case version.GreaterThan(closestAvailable): - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - summaryIncompatible, - fmt.Sprintf(providerProtocolTooNew, provider.ForDisplay(), - version, tfversion.String(), closestAvailable, closestAvailable, - getproviders.VersionConstraintsString(reqs[provider]), - ), - )) - default: // version is less than closestAvailable - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - summaryIncompatible, - fmt.Sprintf(providerProtocolTooOld, provider.ForDisplay(), - version, tfversion.String(), closestAvailable, closestAvailable, - getproviders.VersionConstraintsString(reqs[provider]), - ), - )) - } - case getproviders.ErrPlatformNotSupported: - switch { - case err.MirrorURL != nil: - // If we're installing from a mirror then it may just be - // the mirror lacking the package, rather than it being - // unavailable from upstream. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - summaryIncompatible, - fmt.Sprintf( - "Your chosen provider mirror at %s does not have a %s v%s package available for your current platform, %s.\n\nProvider releases are separate from Terraform CLI releases, so this provider might not support your current platform. Alternatively, the mirror itself might have only a subset of the plugin packages available in the origin registry, at %s.", - err.MirrorURL, err.Provider, err.Version, err.Platform, - err.Provider.Hostname, - ), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - summaryIncompatible, - fmt.Sprintf( - "Provider %s v%s does not have a package available for your current platform, %s.\n\nProvider releases are separate from Terraform CLI releases, so not all providers are available for all platforms. Other versions of this provider may have different platforms supported.", - err.Provider, err.Version, err.Platform, - ), - )) - } - - case getproviders.ErrRequestCanceled: - // We don't attribute cancellation to any particular operation, - // but rather just emit a single general message about it at - // the end, by checking ctx.Err(). - - default: - // We can potentially end up in here under cancellation too, - // in spite of our getproviders.ErrRequestCanceled case above, - // because not all of the outgoing requests we do under the - // "fetch package" banner are source metadata requests. - // In that case we will emit a redundant error here about - // the request being cancelled, but we'll still detect it - // as a cancellation after the installer returns and do the - // normal cancellation handling. - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to install provider", - fmt.Sprintf("Error while installing %s v%s: %s", provider.ForDisplay(), version, err), - )) - } - }, - FetchPackageSuccess: func(provider addrs.Provider, version getproviders.Version, localDir string, authResult *getproviders.PackageAuthenticationResult) { - var keyID string - if authResult != nil && authResult.ThirdPartySigned() { - keyID = authResult.KeyID - } - if keyID != "" { - keyID = c.Colorize().Color(fmt.Sprintf(", key ID [reset][bold]%s[reset]", keyID)) - } - - c.Ui.Info(fmt.Sprintf("- Installed %s v%s (%s%s)", provider.ForDisplay(), version, authResult, keyID)) - }, - ProvidersLockUpdated: func(provider addrs.Provider, version getproviders.Version, localHashes []getproviders.Hash, signedHashes []getproviders.Hash, priorHashes []getproviders.Hash) { - // We're going to use this opportunity to track if we have any - // "incomplete" installs of providers. An incomplete install is - // when we are only going to write the local hashes into our lock - // file which means a `terraform init` command will fail in future - // when used on machines of a different architecture. - // - // We want to print a warning about this. - - if len(signedHashes) > 0 { - // If we have any signedHashes hashes then we don't worry - as - // we know we retrieved all available hashes for this version - // anyway. - return - } - - // If local hashes and prior hashes are exactly the same then - // it means we didn't record any signed hashes previously, and - // we know we're not adding any extra in now (because we already - // checked the signedHashes), so that's a problem. - // - // In the actual check here, if we have any priorHashes and those - // hashes are not the same as the local hashes then we're going to - // accept that this provider has been configured correctly. - if len(priorHashes) > 0 && !reflect.DeepEqual(localHashes, priorHashes) { - return - } - - // Now, either signedHashes is empty, or priorHashes is exactly the - // same as our localHashes which means we never retrieved the - // signedHashes previously. - // - // Either way, this is bad. Let's complain/warn. - incompleteProviders = append(incompleteProviders, provider.ForDisplay()) - }, - ProvidersFetched: func(authResults map[addrs.Provider]*getproviders.PackageAuthenticationResult) { - thirdPartySigned := false - for _, authResult := range authResults { - if authResult.ThirdPartySigned() { - thirdPartySigned = true - break - } - } - if thirdPartySigned { - c.Ui.Info(fmt.Sprintf("\nPartner and community providers are signed by their developers.\n" + - "If you'd like to know more about provider signing, you can read about it here:\n" + - "https://www.terraform.io/docs/cli/plugins/signing.html")) - } - }, - } - ctx = evts.OnContext(ctx) - - mode := providercache.InstallNewProvidersOnly - if upgrade { - if flagLockfile == "readonly" { - c.Ui.Error("The -upgrade flag conflicts with -lockfile=readonly.") - return true, true, diags - } - - mode = providercache.InstallUpgrades - } - newLocks, err := inst.EnsureProviderVersions(ctx, previousLocks, reqs, mode) - if ctx.Err() == context.Canceled { - c.showDiagnostics(diags) - c.Ui.Error("Provider installation was canceled by an interrupt signal.") - return true, true, diags - } - if err != nil { - // The errors captured in "err" should be redundant with what we - // received via the InstallerEvents callbacks above, so we'll - // just return those as long as we have some. - if !diags.HasErrors() { - diags = diags.Append(err) - } - - return true, true, diags - } - - // If the provider dependencies have changed since the last run then we'll - // say a little about that in case the reader wasn't expecting a change. - // (When we later integrate module dependencies into the lock file we'll - // probably want to refactor this so that we produce one lock-file related - // message for all changes together, but this is here for now just because - // it's the smallest change relative to what came before it, which was - // a hidden JSON file specifically for tracking providers.) - if !newLocks.Equal(previousLocks) { - // if readonly mode - if flagLockfile == "readonly" { - // check if required provider dependences change - if !newLocks.EqualProviderAddress(previousLocks) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - `Provider dependency changes detected`, - `Changes to the required provider dependencies were detected, but the lock file is read-only. To use and record these requirements, run "terraform init" without the "-lockfile=readonly" flag.`, - )) - return true, true, diags - } - - // suppress updating the file to record any new information it learned, - // such as a hash using a new scheme. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - `Provider lock file not updated`, - `Changes to the provider selections were detected, but not saved in the .terraform.lock.hcl file. To record these selections, run "terraform init" without the "-lockfile=readonly" flag.`, - )) - return true, false, diags - } - - // Jump in here and add a warning if any of the providers are incomplete. - if len(incompleteProviders) > 0 { - // We don't really care about the order here, we just want the - // output to be deterministic. - sort.Slice(incompleteProviders, func(i, j int) bool { - return incompleteProviders[i] < incompleteProviders[j] - }) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - incompleteLockFileInformationHeader, - fmt.Sprintf( - incompleteLockFileInformationBody, - strings.Join(incompleteProviders, "\n - "), - getproviders.CurrentPlatform.String()))) - } - - if previousLocks.Empty() { - // A change from empty to non-empty is special because it suggests - // we're running "terraform init" for the first time against a - // new configuration. In that case we'll take the opportunity to - // say a little about what the dependency lock file is, for new - // users or those who are upgrading from a previous Terraform - // version that didn't have dependency lock files. - c.Ui.Output(c.Colorize().Color(` -Terraform has created a lock file [bold].terraform.lock.hcl[reset] to record the provider -selections it made above. Include this file in your version control repository -so that Terraform can guarantee to make the same selections by default when -you run "terraform init" in the future.`)) - } else { - c.Ui.Output(c.Colorize().Color(` -Terraform has made some changes to the provider dependency selections recorded -in the .terraform.lock.hcl file. Review those changes and commit them to your -version control system if they represent changes you intended to make.`)) - } - - moreDiags = c.replaceLockedDependencies(newLocks) - diags = diags.Append(moreDiags) - } - - return true, false, diags -} - -// backendConfigOverrideBody interprets the raw values of -backend-config -// arguments into a hcl Body that should override the backend settings given -// in the configuration. -// -// If the result is nil then no override needs to be provided. -// -// If the returned diagnostics contains errors then the returned body may be -// incomplete or invalid. -func (c *InitCommand) backendConfigOverrideBody(flags rawFlags, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { - items := flags.AllItems() - if len(items) == 0 { - return nil, nil - } - - var ret hcl.Body - var diags tfdiags.Diagnostics - synthVals := make(map[string]cty.Value) - - mergeBody := func(newBody hcl.Body) { - if ret == nil { - ret = newBody - } else { - ret = configs.MergeBodies(ret, newBody) - } - } - flushVals := func() { - if len(synthVals) == 0 { - return - } - newBody := configs.SynthBody("-backend-config=...", synthVals) - mergeBody(newBody) - synthVals = make(map[string]cty.Value) - } - - if len(items) == 1 && items[0].Value == "" { - // Explicitly remove all -backend-config options. - // We do this by setting an empty but non-nil ConfigOverrides. - return configs.SynthBody("-backend-config=''", synthVals), diags - } - - for _, item := range items { - eq := strings.Index(item.Value, "=") - - if eq == -1 { - // The value is interpreted as a filename. - newBody, fileDiags := c.loadHCLFile(item.Value) - diags = diags.Append(fileDiags) - if fileDiags.HasErrors() { - continue - } - // Generate an HCL body schema for the backend block. - var bodySchema hcl.BodySchema - for name := range schema.Attributes { - // We intentionally ignore the `Required` attribute here - // because backend config override files can be partial. The - // goal is to make sure we're not loading a file with - // extraneous attributes or blocks. - bodySchema.Attributes = append(bodySchema.Attributes, hcl.AttributeSchema{ - Name: name, - }) - } - for name, block := range schema.BlockTypes { - var labelNames []string - if block.Nesting == configschema.NestingMap { - labelNames = append(labelNames, "key") - } - bodySchema.Blocks = append(bodySchema.Blocks, hcl.BlockHeaderSchema{ - Type: name, - LabelNames: labelNames, - }) - } - // Verify that the file body matches the expected backend schema. - _, schemaDiags := newBody.Content(&bodySchema) - diags = diags.Append(schemaDiags) - if schemaDiags.HasErrors() { - continue - } - flushVals() // deal with any accumulated individual values first - mergeBody(newBody) - } else { - name := item.Value[:eq] - rawValue := item.Value[eq+1:] - attrS := schema.Attributes[name] - if attrS == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid backend configuration argument", - fmt.Sprintf("The backend configuration argument %q given on the command line is not expected for the selected backend type.", name), - )) - continue - } - value, valueDiags := configValueFromCLI(item.String(), rawValue, attrS.Type) - diags = diags.Append(valueDiags) - if valueDiags.HasErrors() { - continue - } - synthVals[name] = value - } - } - - flushVals() - - return ret, diags -} - -func (c *InitCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictDirs("") -} - -func (c *InitCommand) AutocompleteFlags() complete.Flags { - return complete.Flags{ - "-backend": completePredictBoolean, - "-cloud": completePredictBoolean, - "-backend-config": complete.PredictFiles("*.tfvars"), // can also be key=value, but we can't "predict" that - "-force-copy": complete.PredictNothing, - "-from-module": completePredictModuleSource, - "-get": completePredictBoolean, - "-input": completePredictBoolean, - "-lock": completePredictBoolean, - "-lock-timeout": complete.PredictAnything, - "-no-color": complete.PredictNothing, - "-plugin-dir": complete.PredictDirs(""), - "-reconfigure": complete.PredictNothing, - "-migrate-state": complete.PredictNothing, - "-upgrade": completePredictBoolean, - } -} - -func (c *InitCommand) Help() string { - helpText := ` -Usage: terraform [global options] init [options] - - Initialize a new or existing Terraform working directory by creating - initial files, loading any remote state, downloading modules, etc. - - This is the first command that should be run for any new or existing - Terraform configuration per machine. This sets up all the local data - necessary to run Terraform that is typically not committed to version - control. - - This command is always safe to run multiple times. Though subsequent runs - may give errors, this command will never delete your configuration or - state. Even so, if you have important information, please back it up prior - to running this command, just in case. - -Options: - - -backend=false Disable backend or Terraform Cloud initialization - for this configuration and use what was previously - initialized instead. - - aliases: -cloud=false - - -backend-config=path Configuration to be merged with what is in the - configuration file's 'backend' block. This can be - either a path to an HCL file with key/value - assignments (same format as terraform.tfvars) or a - 'key=value' format, and can be specified multiple - times. The backend type must be in the configuration - itself. - - -force-copy Suppress prompts about copying state data when - initializating a new state backend. This is - equivalent to providing a "yes" to all confirmation - prompts. - - -from-module=SOURCE Copy the contents of the given module into the target - directory before initialization. - - -get=false Disable downloading modules for this configuration. - - -input=false Disable interactive prompts. Note that some actions may - require interactive prompts and will error if input is - disabled. - - -lock=false Don't hold a state lock during backend migration. - This is dangerous if others might concurrently run - commands against the same workspace. - - -lock-timeout=0s Duration to retry a state lock. - - -no-color If specified, output won't contain any color. - - -plugin-dir Directory containing plugin binaries. This overrides all - default search paths for plugins, and prevents the - automatic installation of plugins. This flag can be used - multiple times. - - -reconfigure Reconfigure a backend, ignoring any saved - configuration. - - -migrate-state Reconfigure a backend, and attempt to migrate any - existing state. - - -upgrade Install the latest module and provider versions - allowed within configured constraints, overriding the - default behavior of selecting exactly the version - recorded in the dependency lockfile. - - -lockfile=MODE Set a dependency lockfile mode. - Currently only "readonly" is valid. - - -ignore-remote-version A rare option used for Terraform Cloud and the remote backend - only. Set this to ignore checking that the local and remote - Terraform versions use compatible state representations, making - an operation proceed even when there is a potential mismatch. - See the documentation on configuring Terraform with - Terraform Cloud for more information. - -` - return strings.TrimSpace(helpText) -} - -func (c *InitCommand) Synopsis() string { - return "Prepare your working directory for other commands" -} - -const errInitConfigError = ` -[reset]There are some problems with the configuration, described below. - -The Terraform configuration must be valid before initialization so that -Terraform can determine which modules and providers need to be installed. -` - -const errInitCopyNotEmpty = ` -The working directory already contains files. The -from-module option requires -an empty directory into which a copy of the referenced module will be placed. - -To initialize the configuration already in this working directory, omit the --from-module option. -` - -const outputInitEmpty = ` -[reset][bold]Terraform initialized in an empty directory![reset] - -The directory has no Terraform configuration files. You may begin working -with Terraform immediately by creating Terraform configuration files. -` - -const outputInitSuccess = ` -[reset][bold][green]Terraform has been successfully initialized![reset][green] -` - -const outputInitSuccessCloud = ` -[reset][bold][green]Terraform Cloud has been successfully initialized![reset][green] -` - -const outputInitSuccessCLI = `[reset][green] -You may now begin working with Terraform. Try running "terraform plan" to see -any changes that are required for your infrastructure. All Terraform commands -should now work. - -If you ever set or change modules or backend configuration for Terraform, -rerun this command to reinitialize your working directory. If you forget, other -commands will detect it and remind you to do so if necessary. -` - -const outputInitSuccessCLICloud = `[reset][green] -You may now begin working with Terraform Cloud. Try running "terraform plan" to -see any changes that are required for your infrastructure. - -If you ever set or change modules or Terraform Settings, run "terraform init" -again to reinitialize your working directory. -` - -// providerProtocolTooOld is a message sent to the CLI UI if the provider's -// supported protocol versions are too old for the user's version of terraform, -// but a newer version of the provider is compatible. -const providerProtocolTooOld = `Provider %q v%s is not compatible with Terraform %s. -Provider version %s is the latest compatible version. Select it with the following version constraint: - version = %q - -Terraform checked all of the plugin versions matching the given constraint: - %s - -Consult the documentation for this provider for more information on compatibility between provider and Terraform versions. -` - -// providerProtocolTooNew is a message sent to the CLI UI if the provider's -// supported protocol versions are too new for the user's version of terraform, -// and the user could either upgrade terraform or choose an older version of the -// provider. -const providerProtocolTooNew = `Provider %q v%s is not compatible with Terraform %s. -You need to downgrade to v%s or earlier. Select it with the following constraint: - version = %q - -Terraform checked all of the plugin versions matching the given constraint: - %s - -Consult the documentation for this provider for more information on compatibility between provider and Terraform versions. -Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. -` - -// No version of the provider is compatible. -const errProviderVersionIncompatible = `No compatible versions of provider %s were found.` - -// incompleteLockFileInformationHeader is the summary displayed to users when -// the lock file has only recorded local hashes. -const incompleteLockFileInformationHeader = `Incomplete lock file information for providers` - -// incompleteLockFileInformationBody is the body of text displayed to users when -// the lock file has only recorded local hashes. -const incompleteLockFileInformationBody = `Due to your customized provider installation methods, Terraform was forced to calculate lock file checksums locally for the following providers: - - %s - -The current .terraform.lock.hcl file only includes checksums for %s, so Terraform running on another platform will fail to install these providers. - -To calculate additional checksums for another platform, run: - terraform providers lock -platform=linux_amd64 -(where linux_amd64 is the platform to generate)` diff --git a/internal/command/init_test.go b/internal/command/init_test.go deleted file mode 100644 index e81db3e205b8..000000000000 --- a/internal/command/init_test.go +++ /dev/null @@ -1,2852 +0,0 @@ -package command - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/google/go-cmp/cmp" - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/providercache" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -func TestInit_empty(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - os.MkdirAll(td, 0755) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } -} - -func TestInit_multipleArgs(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - os.MkdirAll(td, 0755) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{ - "bad", - "bad", - } - if code := c.Run(args); code != 1 { - t.Fatalf("bad: \n%s", ui.OutputWriter.String()) - } -} - -func TestInit_fromModule_cwdDest(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - os.MkdirAll(td, os.ModePerm) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{ - "-from-module=" + testFixturePath("init"), - } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - if _, err := os.Stat(filepath.Join(td, "hello.tf")); err != nil { - t.Fatalf("err: %s", err) - } -} - -// https://github.com/hashicorp/terraform/issues/518 -func TestInit_fromModule_dstInSrc(t *testing.T) { - dir := t.TempDir() - if err := os.MkdirAll(dir, 0755); err != nil { - t.Fatalf("err: %s", err) - } - - // Change to the temporary directory - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(dir); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - - if err := os.Mkdir("foo", os.ModePerm); err != nil { - t.Fatal(err) - } - - if _, err := os.Create("issue518.tf"); err != nil { - t.Fatalf("err: %s", err) - } - - if err := os.Chdir("foo"); err != nil { - t.Fatalf("err: %s", err) - } - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{ - "-from-module=./..", - } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - if _, err := os.Stat(filepath.Join(dir, "foo", "issue518.tf")); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestInit_get(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // Check output - output := ui.OutputWriter.String() - if !strings.Contains(output, "foo in foo") { - t.Fatalf("doesn't look like we installed module 'foo': %s", output) - } -} - -func TestInit_getUpgradeModules(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{ - "-get=true", - "-upgrade", - } - if code := c.Run(args); code != 0 { - t.Fatalf("command did not complete successfully:\n%s", ui.ErrorWriter.String()) - } - - // Check output - output := ui.OutputWriter.String() - if !strings.Contains(output, "Upgrading modules...") { - t.Fatalf("doesn't look like get upgrade: %s", output) - } -} - -func TestInit_backend(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - if _, err := os.Stat(filepath.Join(DefaultDataDir, DefaultStateFilename)); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestInit_backendUnset(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend"), td) - defer testChdir(t, td)() - - { - log.Printf("[TRACE] TestInit_backendUnset: beginning first init") - - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - // Init - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - log.Printf("[TRACE] TestInit_backendUnset: first init complete") - t.Logf("First run output:\n%s", ui.OutputWriter.String()) - t.Logf("First run errors:\n%s", ui.ErrorWriter.String()) - - if _, err := os.Stat(filepath.Join(DefaultDataDir, DefaultStateFilename)); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - log.Printf("[TRACE] TestInit_backendUnset: beginning second init") - - // Unset - if err := ioutil.WriteFile("main.tf", []byte(""), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-force-copy"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - log.Printf("[TRACE] TestInit_backendUnset: second init complete") - t.Logf("Second run output:\n%s", ui.OutputWriter.String()) - t.Logf("Second run errors:\n%s", ui.ErrorWriter.String()) - - s := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if !s.Backend.Empty() { - t.Fatal("should not have backend config") - } - } -} - -func TestInit_backendConfigFile(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend-config-file"), td) - defer testChdir(t, td)() - - t.Run("good-config-file", func(t *testing.T) { - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - args := []string{"-backend-config", "input.config"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // Read our saved backend config and verify we have our settings - state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { - t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) - } - }) - - // the backend config file must not be a full terraform block - t.Run("full-backend-config-file", func(t *testing.T) { - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - args := []string{"-backend-config", "backend.config"} - if code := c.Run(args); code != 1 { - t.Fatalf("expected error, got success\n") - } - if !strings.Contains(ui.ErrorWriter.String(), "Unsupported block type") { - t.Fatalf("wrong error: %s", ui.ErrorWriter) - } - }) - - // the backend config file must match the schema for the backend - t.Run("invalid-config-file", func(t *testing.T) { - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - args := []string{"-backend-config", "invalid.config"} - if code := c.Run(args); code != 1 { - t.Fatalf("expected error, got success\n") - } - if !strings.Contains(ui.ErrorWriter.String(), "Unsupported argument") { - t.Fatalf("wrong error: %s", ui.ErrorWriter) - } - }) - - // missing file is an error - t.Run("missing-config-file", func(t *testing.T) { - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - args := []string{"-backend-config", "missing.config"} - if code := c.Run(args); code != 1 { - t.Fatalf("expected error, got success\n") - } - if !strings.Contains(ui.ErrorWriter.String(), "Failed to read file") { - t.Fatalf("wrong error: %s", ui.ErrorWriter) - } - }) - - // blank filename clears the backend config - t.Run("blank-config-file", func(t *testing.T) { - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - args := []string{"-backend-config=", "-migrate-state"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // Read our saved backend config and verify the backend config is empty - state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":null,"workspace_dir":null}`; got != want { - t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) - } - }) - - // simulate the local backend having a required field which is not - // specified in the override file - t.Run("required-argument", func(t *testing.T) { - c := &InitCommand{} - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "path": { - Type: cty.String, - Optional: true, - }, - "workspace_dir": { - Type: cty.String, - Required: true, - }, - }, - } - flagConfigExtra := newRawFlags("-backend-config") - flagConfigExtra.Set("input.config") - _, diags := c.backendConfigOverrideBody(flagConfigExtra, schema) - if len(diags) != 0 { - t.Errorf("expected no diags, got: %s", diags.Err()) - } - }) -} - -func TestInit_backendConfigFilePowershellConfusion(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend-config-file"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - // SUBTLE: when using -flag=value with Powershell, unquoted values are - // broken into separate arguments. This results in the init command - // interpreting the flags as an empty backend-config setting (which is - // semantically valid!) followed by a custom configuration path. - // - // Adding the "=" here forces this codepath to be checked, and it should - // result in an early exit with a diagnostic that the provided - // configuration file is not a diretory. - args := []string{"-backend-config=", "./input.config"} - if code := c.Run(args); code != 1 { - t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) - } - - output := ui.ErrorWriter.String() - if got, want := output, `Too many command line arguments`; !strings.Contains(got, want) { - t.Fatalf("wrong output\ngot:\n%s\n\nwant: message containing %q", got, want) - } -} - -func TestInit_backendReconfigure(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend"), td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{ - "hashicorp/test": {"1.2.3"}, - }) - defer close() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - ProviderSource: providerSource, - Ui: ui, - View: view, - }, - } - - // create some state, so the backend has something to migrate. - f, err := os.Create("foo") // this is the path" in the backend config - if err != nil { - t.Fatalf("err: %s", err) - } - err = writeStateForTesting(testState(), f) - f.Close() - if err != nil { - t.Fatalf("err: %s", err) - } - - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // now run init again, changing the path. - // The -reconfigure flag prevents init from migrating - // Without -reconfigure, the test fails since the backend asks for input on migrating state - args = []string{"-reconfigure", "-backend-config", "path=changed"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } -} - -func TestInit_backendConfigFileChange(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend-config-file-change"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-backend-config", "input.config", "-migrate-state"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // Read our saved backend config and verify we have our settings - state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { - t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) - } -} - -func TestInit_backendMigrateWhileLocked(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend-migrate-while-locked"), td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{ - "hashicorp/test": {"1.2.3"}, - }) - defer close() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - ProviderSource: providerSource, - Ui: ui, - View: view, - }, - } - - // Create some state, so the backend has something to migrate from - f, err := os.Create("local-state.tfstate") - if err != nil { - t.Fatalf("err: %s", err) - } - err = writeStateForTesting(testState(), f) - f.Close() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Lock the source state - unlock, err := testLockState(t, testDataDir, "local-state.tfstate") - if err != nil { - t.Fatal(err) - } - defer unlock() - - // Attempt to migrate - args := []string{"-backend-config", "input.config", "-migrate-state", "-force-copy"} - if code := c.Run(args); code == 0 { - t.Fatalf("expected nonzero exit code: %s", ui.OutputWriter.String()) - } - - // Disabling locking should work - args = []string{"-backend-config", "input.config", "-migrate-state", "-force-copy", "-lock=false"} - if code := c.Run(args); code != 0 { - t.Fatalf("expected zero exit code, got %d: %s", code, ui.ErrorWriter.String()) - } -} - -func TestInit_backendConfigFileChangeWithExistingState(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend-config-file-change-migrate-existing"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - }, - } - - oldState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - - // we deliberately do not provide the answer for backend-migrate-copy-to-empty to trigger error - args := []string{"-migrate-state", "-backend-config", "input.config", "-input=true"} - if code := c.Run(args); code == 0 { - t.Fatal("expected error") - } - - // Read our backend config and verify new settings are not saved - state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"local-state.tfstate"}`; got != want { - t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) - } - - // without changing config, hash should not change - if oldState.Backend.Hash != state.Backend.Hash { - t.Errorf("backend hash should not have changed\ngot: %d\nwant: %d", state.Backend.Hash, oldState.Backend.Hash) - } -} - -func TestInit_backendConfigKV(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend-config-kv"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-backend-config", "path=hello"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // Read our saved backend config and verify we have our settings - state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { - t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) - } -} - -func TestInit_backendConfigKVReInit(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend-config-kv"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-backend-config", "path=test"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - ui = new(cli.MockUi) - c = &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - // a second init should require no changes, nor should it change the backend. - args = []string{"-input=false"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // make sure the backend is configured how we expect - configState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - cfg := map[string]interface{}{} - if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { - t.Fatal(err) - } - if cfg["path"] != "test" { - t.Fatalf(`expected backend path="test", got path="%v"`, cfg["path"]) - } - - // override the -backend-config options by settings - args = []string{"-input=false", "-backend-config", "", "-migrate-state"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // make sure the backend is configured how we expect - configState = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - cfg = map[string]interface{}{} - if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { - t.Fatal(err) - } - if cfg["path"] != nil { - t.Fatalf(`expected backend path="", got path="%v"`, cfg["path"]) - } -} - -func TestInit_backendConfigKVReInitWithConfigDiff(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-input=false"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - ui = new(cli.MockUi) - c = &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - // a second init with identical config should require no changes, nor - // should it change the backend. - args = []string{"-input=false", "-backend-config", "path=foo"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // make sure the backend is configured how we expect - configState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - cfg := map[string]interface{}{} - if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { - t.Fatal(err) - } - if cfg["path"] != "foo" { - t.Fatalf(`expected backend path="foo", got path="%v"`, cfg["foo"]) - } -} - -func TestInit_backendCli_no_config_block(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-backend-config", "path=test"} - if code := c.Run(args); code != 0 { - t.Fatalf("got exit status %d; want 0\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) - } - - errMsg := ui.ErrorWriter.String() - if !strings.Contains(errMsg, "Warning: Missing backend configuration") { - t.Fatal("expected missing backend block warning, got", errMsg) - } -} - -func TestInit_backendReinitWithExtra(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend-empty"), td) - defer testChdir(t, td)() - - m := testMetaBackend(t, nil) - opts := &BackendOpts{ - ConfigOverride: configs.SynthBody("synth", map[string]cty.Value{ - "path": cty.StringVal("hello"), - }), - Init: true, - } - - _, cHash, err := m.backendConfig(opts) - if err != nil { - t.Fatal(err) - } - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-backend-config", "path=hello"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // Read our saved backend config and verify we have our settings - state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { - t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) - } - - if state.Backend.Hash != uint64(cHash) { - t.Fatal("mismatched state and config backend hashes") - } - - // init again and make sure nothing changes - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - state = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { - t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) - } - if state.Backend.Hash != uint64(cHash) { - t.Fatal("mismatched state and config backend hashes") - } -} - -// move option from config to -backend-config args -func TestInit_backendReinitConfigToExtra(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - if code := c.Run([]string{"-input=false"}); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // Read our saved backend config and verify we have our settings - state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"foo","workspace_dir":null}`; got != want { - t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) - } - - backendHash := state.Backend.Hash - - // init again but remove the path option from the config - cfg := "terraform {\n backend \"local\" {}\n}\n" - if err := ioutil.WriteFile("main.tf", []byte(cfg), 0644); err != nil { - t.Fatal(err) - } - - // We need a fresh InitCommand here because the old one now has our configuration - // file cached inside it, so it won't re-read the modification we just made. - c = &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-input=false", "-backend-config=path=foo"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - state = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) - if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"foo","workspace_dir":null}`; got != want { - t.Errorf("wrong config after moving to arg\ngot: %s\nwant: %s", got, want) - } - - if state.Backend.Hash == backendHash { - t.Fatal("state.Backend.Hash was not updated") - } -} - -func TestInit_backendCloudInvalidOptions(t *testing.T) { - // There are various "terraform init" options that are only for - // traditional backends and not applicable to Terraform Cloud mode. - // For those, we want to return an explicit error rather than - // just silently ignoring them, so that users will be aware that - // Cloud mode has more of an expected "happy path" than the - // less-vertically-integrated backends do, and to avoid these - // unapplicable options becoming compatibility constraints for - // future evolution of Cloud mode. - - // We use the same starting fixture for all of these tests, but some - // of them will customize it a bit as part of their work. - setupTempDir := func(t *testing.T) func() { - t.Helper() - td := t.TempDir() - testCopyDir(t, testFixturePath("init-cloud-simple"), td) - unChdir := testChdir(t, td) - return unChdir - } - - // Some of the tests need a non-empty placeholder state file to work - // with. - fakeState := states.BuildState(func(cb *states.SyncState) { - // Having a root module output value should be enough for this - // state file to be considered "non-empty" and thus a candidate - // for migration. - cb.SetOutputValue( - addrs.OutputValue{Name: "a"}.Absolute(addrs.RootModuleInstance), - cty.True, - false, - ) - }) - fakeStateFile := &statefile.File{ - Lineage: "boop", - Serial: 4, - TerraformVersion: version.Must(version.NewVersion("1.0.0")), - State: fakeState, - } - var fakeStateBuf bytes.Buffer - err := statefile.WriteForTest(fakeStateFile, &fakeStateBuf) - if err != nil { - t.Error(err) - } - fakeStateBytes := fakeStateBuf.Bytes() - - t.Run("-backend-config", func(t *testing.T) { - defer setupTempDir(t)() - - // We have -backend-config as a pragmatic way to dynamically set - // certain settings of backends that tend to vary depending on - // where Terraform is running, such as AWS authentication profiles - // that are naturally local only to the machine where Terraform is - // running. Those needs don't apply to Terraform Cloud, because - // the remote workspace encapsulates all of the details of how - // operations and state work in that case, and so the Cloud - // configuration is only about which workspaces we'll be working - // with. - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - Ui: ui, - View: view, - }, - } - args := []string{"-backend-config=anything"} - if code := c.Run(args); code == 0 { - t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) - } - - gotStderr := ui.ErrorWriter.String() - wantStderr := ` -Error: Invalid command-line option - -The -backend-config=... command line option is only for state backends, and -is not applicable to Terraform Cloud-based configurations. - -To change the set of workspaces associated with this configuration, edit the -Cloud configuration block in the root module. - -` - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong error output\n%s", diff) - } - }) - t.Run("-reconfigure", func(t *testing.T) { - defer setupTempDir(t)() - - // The -reconfigure option was originally imagined as a way to force - // skipping state migration when migrating between backends, but it - // has a historical flaw that it doesn't work properly when the - // initial situation is the implicit local backend with a state file - // present. The Terraform Cloud migration path has some additional - // steps to take care of more details automatically, and so - // -reconfigure doesn't really make sense in that context, particularly - // with its design bug with the handling of the implicit local backend. - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - Ui: ui, - View: view, - }, - } - args := []string{"-reconfigure"} - if code := c.Run(args); code == 0 { - t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) - } - - gotStderr := ui.ErrorWriter.String() - wantStderr := ` -Error: Invalid command-line option - -The -reconfigure option is for in-place reconfiguration of state backends -only, and is not needed when changing Terraform Cloud settings. - -When using Terraform Cloud, initialization automatically activates any new -Cloud configuration settings. - -` - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong error output\n%s", diff) - } - }) - t.Run("-reconfigure when migrating in", func(t *testing.T) { - defer setupTempDir(t)() - - // We have a slightly different error message for the case where we - // seem to be trying to migrate to Terraform Cloud with existing - // state or explicit backend already present. - - if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { - t.Fatal(err) - } - - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - Ui: ui, - View: view, - }, - } - args := []string{"-reconfigure"} - if code := c.Run(args); code == 0 { - t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) - } - - gotStderr := ui.ErrorWriter.String() - wantStderr := ` -Error: Invalid command-line option - -The -reconfigure option is unsupported when migrating to Terraform Cloud, -because activating Terraform Cloud involves some additional steps. - -` - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong error output\n%s", diff) - } - }) - t.Run("-migrate-state", func(t *testing.T) { - defer setupTempDir(t)() - - // In Cloud mode, migrating in or out always proposes migrating state - // and changing configuration while staying in cloud mode never migrates - // state, so this special option isn't relevant. - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - Ui: ui, - View: view, - }, - } - args := []string{"-migrate-state"} - if code := c.Run(args); code == 0 { - t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) - } - - gotStderr := ui.ErrorWriter.String() - wantStderr := ` -Error: Invalid command-line option - -The -migrate-state option is for migration between state backends only, and -is not applicable when using Terraform Cloud. - -State storage is handled automatically by Terraform Cloud and so the state -storage location is not configurable. - -` - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong error output\n%s", diff) - } - }) - t.Run("-migrate-state when migrating in", func(t *testing.T) { - defer setupTempDir(t)() - - // We have a slightly different error message for the case where we - // seem to be trying to migrate to Terraform Cloud with existing - // state or explicit backend already present. - - if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { - t.Fatal(err) - } - - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - Ui: ui, - View: view, - }, - } - args := []string{"-migrate-state"} - if code := c.Run(args); code == 0 { - t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) - } - - gotStderr := ui.ErrorWriter.String() - wantStderr := ` -Error: Invalid command-line option - -The -migrate-state option is for migration between state backends only, and -is not applicable when using Terraform Cloud. - -Terraform Cloud migration has additional steps, configured by interactive -prompts. - -` - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong error output\n%s", diff) - } - }) - t.Run("-force-copy", func(t *testing.T) { - defer setupTempDir(t)() - - // In Cloud mode, migrating in or out always proposes migrating state - // and changing configuration while staying in cloud mode never migrates - // state, so this special option isn't relevant. - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - Ui: ui, - View: view, - }, - } - args := []string{"-force-copy"} - if code := c.Run(args); code == 0 { - t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) - } - - gotStderr := ui.ErrorWriter.String() - wantStderr := ` -Error: Invalid command-line option - -The -force-copy option is for migration between state backends only, and is -not applicable when using Terraform Cloud. - -State storage is handled automatically by Terraform Cloud and so the state -storage location is not configurable. - -` - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong error output\n%s", diff) - } - }) - t.Run("-force-copy when migrating in", func(t *testing.T) { - defer setupTempDir(t)() - - // We have a slightly different error message for the case where we - // seem to be trying to migrate to Terraform Cloud with existing - // state or explicit backend already present. - - if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { - t.Fatal(err) - } - - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - Ui: ui, - View: view, - }, - } - args := []string{"-force-copy"} - if code := c.Run(args); code == 0 { - t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) - } - - gotStderr := ui.ErrorWriter.String() - wantStderr := ` -Error: Invalid command-line option - -The -force-copy option is for migration between state backends only, and is -not applicable when using Terraform Cloud. - -Terraform Cloud migration has additional steps, configured by interactive -prompts. - -` - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong error output\n%s", diff) - } - }) - -} - -// make sure inputFalse stops execution on migrate -func TestInit_inputFalse(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-backend"), td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{"-input=false", "-backend-config=path=foo"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter) - } - - // write different states for foo and bar - fooState := states.BuildState(func(s *states.SyncState) { - s.SetOutputValue( - addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("foo"), - false, // not sensitive - ) - }) - if err := statemgr.NewFilesystem("foo").WriteState(fooState); err != nil { - t.Fatal(err) - } - barState := states.BuildState(func(s *states.SyncState) { - s.SetOutputValue( - addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("bar"), - false, // not sensitive - ) - }) - if err := statemgr.NewFilesystem("bar").WriteState(barState); err != nil { - t.Fatal(err) - } - - ui = new(cli.MockUi) - c = &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args = []string{"-input=false", "-backend-config=path=bar", "-migrate-state"} - if code := c.Run(args); code == 0 { - t.Fatal("init should have failed", ui.OutputWriter) - } - - errMsg := ui.ErrorWriter.String() - if !strings.Contains(errMsg, "interactive input is disabled") { - t.Fatal("expected input disabled error, got", errMsg) - } - - ui = new(cli.MockUi) - c = &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - // A missing input=false should abort rather than loop infinitely - args = []string{"-backend-config=path=baz"} - if code := c.Run(args); code == 0 { - t.Fatal("init should have failed", ui.OutputWriter) - } -} - -func TestInit_getProvider(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-providers"), td) - defer testChdir(t, td)() - - overrides := metaOverridesForProvider(testProvider()) - ui := new(cli.MockUi) - view, _ := testView(t) - providerSource, close := newMockProviderSource(t, map[string][]string{ - // looking for an exact version - "exact": {"1.2.3"}, - // config requires >= 2.3.3 - "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, - // config specifies - "between": {"3.4.5", "2.3.4", "1.2.3"}, - }) - defer close() - m := Meta{ - testingOverrides: overrides, - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{ - "-backend=false", // should be possible to install plugins without backend init - } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // check that we got the providers for our config - exactPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/exact/1.2.3/%s", getproviders.CurrentPlatform) - if _, err := os.Stat(exactPath); os.IsNotExist(err) { - t.Fatal("provider 'exact' not downloaded") - } - greaterThanPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/greater-than/2.3.4/%s", getproviders.CurrentPlatform) - if _, err := os.Stat(greaterThanPath); os.IsNotExist(err) { - t.Fatal("provider 'greater-than' not downloaded") - } - betweenPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/between/2.3.4/%s", getproviders.CurrentPlatform) - if _, err := os.Stat(betweenPath); os.IsNotExist(err) { - t.Fatal("provider 'between' not downloaded") - } - - t.Run("future-state", func(t *testing.T) { - // getting providers should fail if a state from a newer version of - // terraform exists, since InitCommand.getProviders needs to inspect that - // state. - - f, err := os.Create(DefaultStateFilename) - if err != nil { - t.Fatalf("err: %s", err) - } - defer f.Close() - - // Construct a mock state file from the far future - type FutureState struct { - Version uint `json:"version"` - Lineage string `json:"lineage"` - TerraformVersion string `json:"terraform_version"` - Outputs map[string]interface{} `json:"outputs"` - Resources []map[string]interface{} `json:"resources"` - } - fs := &FutureState{ - Version: 999, - Lineage: "123-456-789", - TerraformVersion: "999.0.0", - Outputs: make(map[string]interface{}), - Resources: make([]map[string]interface{}, 0), - } - src, err := json.MarshalIndent(fs, "", " ") - if err != nil { - t.Fatalf("failed to marshal future state: %s", err) - } - src = append(src, '\n') - _, err = f.Write(src) - if err != nil { - t.Fatal(err) - } - - ui := new(cli.MockUi) - view, _ := testView(t) - m.Ui = ui - m.View = view - c := &InitCommand{ - Meta: m, - } - - if code := c.Run(nil); code == 0 { - t.Fatal("expected error, got:", ui.OutputWriter) - } - - errMsg := ui.ErrorWriter.String() - if !strings.Contains(errMsg, "Unsupported state file format") { - t.Fatal("unexpected error:", errMsg) - } - }) -} - -func TestInit_getProviderSource(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-provider-source"), td) - defer testChdir(t, td)() - - overrides := metaOverridesForProvider(testProvider()) - ui := new(cli.MockUi) - view, _ := testView(t) - providerSource, close := newMockProviderSource(t, map[string][]string{ - // looking for an exact version - "acme/alpha": {"1.2.3"}, - // config doesn't specify versions for other providers - "registry.example.com/acme/beta": {"1.0.0"}, - "gamma": {"2.0.0"}, - }) - defer close() - m := Meta{ - testingOverrides: overrides, - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{ - "-backend=false", // should be possible to install plugins without backend init - } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - // check that we got the providers for our config - exactPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/acme/alpha/1.2.3/%s", getproviders.CurrentPlatform) - if _, err := os.Stat(exactPath); os.IsNotExist(err) { - t.Error("provider 'alpha' not downloaded") - } - greaterThanPath := fmt.Sprintf(".terraform/providers/registry.example.com/acme/beta/1.0.0/%s", getproviders.CurrentPlatform) - if _, err := os.Stat(greaterThanPath); os.IsNotExist(err) { - t.Error("provider 'beta' not downloaded") - } - betweenPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/gamma/2.0.0/%s", getproviders.CurrentPlatform) - if _, err := os.Stat(betweenPath); os.IsNotExist(err) { - t.Error("provider 'gamma' not downloaded") - } -} - -func TestInit_getProviderLegacyFromState(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-provider-legacy-from-state"), td) - defer testChdir(t, td)() - - overrides := metaOverridesForProvider(testProvider()) - ui := new(cli.MockUi) - view, _ := testView(t) - providerSource, close := newMockProviderSource(t, map[string][]string{ - "acme/alpha": {"1.2.3"}, - }) - defer close() - m := Meta{ - testingOverrides: overrides, - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - if code := c.Run(nil); code != 1 { - t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) - } - - // Expect this diagnostic output - wants := []string{ - "Invalid legacy provider address", - "You must complete the Terraform 0.13 upgrade process", - } - got := ui.ErrorWriter.String() - for _, want := range wants { - if !strings.Contains(got, want) { - t.Fatalf("expected output to contain %q, got:\n\n%s", want, got) - } - } -} - -func TestInit_getProviderInvalidPackage(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-provider-invalid-package"), td) - defer testChdir(t, td)() - - overrides := metaOverridesForProvider(testProvider()) - ui := new(cli.MockUi) - view, _ := testView(t) - - // create a provider source which allows installing an invalid package - addr := addrs.MustParseProviderSourceString("invalid/package") - version := getproviders.MustParseVersion("1.0.0") - meta, close, err := getproviders.FakeInstallablePackageMeta( - addr, - version, - getproviders.VersionList{getproviders.MustParseVersion("5.0")}, - getproviders.CurrentPlatform, - "terraform-package", // should be "terraform-provider-package" - ) - defer close() - if err != nil { - t.Fatalf("failed to prepare fake package for %s %s: %s", addr.ForDisplay(), version, err) - } - providerSource := getproviders.NewMockSource([]getproviders.PackageMeta{meta}, nil) - - m := Meta{ - testingOverrides: overrides, - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{ - "-backend=false", // should be possible to install plugins without backend init - } - if code := c.Run(args); code != 1 { - t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) - } - - // invalid provider should be installed - packagePath := fmt.Sprintf(".terraform/providers/registry.terraform.io/invalid/package/1.0.0/%s/terraform-package", getproviders.CurrentPlatform) - if _, err := os.Stat(packagePath); os.IsNotExist(err) { - t.Fatal("provider 'invalid/package' not downloaded") - } - - wantErrors := []string{ - "Failed to install provider", - "could not find executable file starting with terraform-provider-package", - } - got := ui.ErrorWriter.String() - for _, wantError := range wantErrors { - if !strings.Contains(got, wantError) { - t.Fatalf("missing error:\nwant: %q\ngot:\n%s", wantError, got) - } - } -} - -func TestInit_getProviderDetectedLegacy(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-provider-detected-legacy"), td) - defer testChdir(t, td)() - - // We need to construct a multisource with a mock source and a registry - // source: the mock source will return ErrRegistryProviderNotKnown for an - // unknown provider, and the registry source will allow us to look up the - // appropriate namespace if possible. - providerSource, psClose := newMockProviderSource(t, map[string][]string{ - "hashicorp/foo": {"1.2.3"}, - "terraform-providers/baz": {"2.3.4"}, // this will not be installed - }) - defer psClose() - registrySource, rsClose := testRegistrySource(t) - defer rsClose() - multiSource := getproviders.MultiSource{ - {Source: providerSource}, - {Source: registrySource}, - } - - ui := new(cli.MockUi) - view, _ := testView(t) - m := Meta{ - Ui: ui, - View: view, - ProviderSource: multiSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{ - "-backend=false", // should be possible to install plugins without backend init - } - if code := c.Run(args); code == 0 { - t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) - } - - // foo should be installed - fooPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/hashicorp/foo/1.2.3/%s", getproviders.CurrentPlatform) - if _, err := os.Stat(fooPath); os.IsNotExist(err) { - t.Error("provider 'foo' not installed") - } - // baz should not be installed - bazPath := fmt.Sprintf(".terraform/providers/registry.terraform.io/terraform-providers/baz/2.3.4/%s", getproviders.CurrentPlatform) - if _, err := os.Stat(bazPath); !os.IsNotExist(err) { - t.Error("provider 'baz' installed, but should not be") - } - - // error output is the main focus of this test - errOutput := ui.ErrorWriter.String() - errors := []string{ - "Failed to query available provider packages", - "Could not retrieve the list of available versions", - "registry.terraform.io/hashicorp/baz", - "registry.terraform.io/hashicorp/frob", - } - for _, want := range errors { - if !strings.Contains(errOutput, want) { - t.Fatalf("expected error %q: %s", want, errOutput) - } - } -} - -func TestInit_providerSource(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-required-providers"), td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{ - "test": {"1.2.3", "1.2.4"}, - "test-beta": {"1.2.4"}, - "source": {"1.2.2", "1.2.3", "1.2.1"}, - }) - defer close() - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{} - - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - if strings.Contains(ui.OutputWriter.String(), "Terraform has initialized, but configuration upgrades may be needed") { - t.Fatalf("unexpected \"configuration upgrade\" warning in output") - } - - cacheDir := m.providerLocalCacheDir() - gotPackages := cacheDir.AllAvailablePackages() - wantPackages := map[addrs.Provider][]providercache.CachedProvider{ - addrs.NewDefaultProvider("test"): { - { - Provider: addrs.NewDefaultProvider("test"), - Version: getproviders.MustParseVersion("1.2.3"), - PackageDir: expectedPackageInstallPath("test", "1.2.3", false), - }, - }, - addrs.NewDefaultProvider("test-beta"): { - { - Provider: addrs.NewDefaultProvider("test-beta"), - Version: getproviders.MustParseVersion("1.2.4"), - PackageDir: expectedPackageInstallPath("test-beta", "1.2.4", false), - }, - }, - addrs.NewDefaultProvider("source"): { - { - Provider: addrs.NewDefaultProvider("source"), - Version: getproviders.MustParseVersion("1.2.3"), - PackageDir: expectedPackageInstallPath("source", "1.2.3", false), - }, - }, - } - if diff := cmp.Diff(wantPackages, gotPackages); diff != "" { - t.Errorf("wrong cache directory contents after upgrade\n%s", diff) - } - - locks, err := m.lockedDependencies() - if err != nil { - t.Fatalf("failed to get locked dependencies: %s", err) - } - gotProviderLocks := locks.AllProviders() - wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ - addrs.NewDefaultProvider("test-beta"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("test-beta"), - getproviders.MustParseVersion("1.2.4"), - getproviders.MustParseVersionConstraints("= 1.2.4"), - []getproviders.Hash{ - getproviders.HashScheme1.New("see6W06w09Ea+AobFJ+mbvPTie6ASqZAAdlFZbs8BSM="), - }, - ), - addrs.NewDefaultProvider("test"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("test"), - getproviders.MustParseVersion("1.2.3"), - getproviders.MustParseVersionConstraints("= 1.2.3"), - []getproviders.Hash{ - getproviders.HashScheme1.New("wlbEC2mChQZ2hhgUhl6SeVLPP7fMqOFUZAQhQ9GIIno="), - }, - ), - addrs.NewDefaultProvider("source"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("source"), - getproviders.MustParseVersion("1.2.3"), - getproviders.MustParseVersionConstraints("= 1.2.3"), - []getproviders.Hash{ - getproviders.HashScheme1.New("myS3qb3px3tRBq1ZWRYJeUH+kySWpBc0Yy8rw6W7/p4="), - }, - ), - } - - if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { - t.Errorf("wrong version selections after upgrade\n%s", diff) - } - - if got, want := ui.OutputWriter.String(), "Installed hashicorp/test v1.2.3 (verified checksum)"; !strings.Contains(got, want) { - t.Fatalf("unexpected output: %s\nexpected to include %q", got, want) - } - if got, want := ui.ErrorWriter.String(), "\n - hashicorp/source\n - hashicorp/test\n - hashicorp/test-beta"; !strings.Contains(got, want) { - t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) - } -} - -func TestInit_cancelModules(t *testing.T) { - // This test runs `terraform init` as if SIGINT (or similar on other - // platforms) were sent to it, testing that it is interruptible. - - td := t.TempDir() - testCopyDir(t, testFixturePath("init-registry-module"), td) - defer testChdir(t, td)() - - // Our shutdown channel is pre-closed so init will exit as soon as it - // starts a cancelable portion of the process. - shutdownCh := make(chan struct{}) - close(shutdownCh) - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ShutdownCh: shutdownCh, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{} - - if code := c.Run(args); code == 0 { - t.Fatalf("succeeded; wanted error\n%s", ui.OutputWriter.String()) - } - - if got, want := ui.ErrorWriter.String(), `Module installation was canceled by an interrupt signal`; !strings.Contains(got, want) { - t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) - } -} - -func TestInit_cancelProviders(t *testing.T) { - // This test runs `terraform init` as if SIGINT (or similar on other - // platforms) were sent to it, testing that it is interruptible. - - td := t.TempDir() - testCopyDir(t, testFixturePath("init-required-providers"), td) - defer testChdir(t, td)() - - // Use a provider source implementation which is designed to hang indefinitely, - // to avoid a race between the closed shutdown channel and the provider source - // operations. - providerSource := &getproviders.HangingSource{} - - // Our shutdown channel is pre-closed so init will exit as soon as it - // starts a cancelable portion of the process. - shutdownCh := make(chan struct{}) - close(shutdownCh) - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - ShutdownCh: shutdownCh, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{} - - if code := c.Run(args); code == 0 { - t.Fatalf("succeeded; wanted error\n%s", ui.OutputWriter.String()) - } - // Currently the first operation that is cancelable is provider - // installation, so our error message comes from there. If we - // make the earlier steps cancelable in future then it'd be - // expected for this particular message to change. - if got, want := ui.ErrorWriter.String(), `Provider installation was canceled by an interrupt signal`; !strings.Contains(got, want) { - t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) - } -} - -func TestInit_getUpgradePlugins(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-providers"), td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{ - // looking for an exact version - "exact": {"1.2.3"}, - // config requires >= 2.3.3 - "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, - // config specifies > 1.0.0 , < 3.0.0 - "between": {"3.4.5", "2.3.4", "1.2.3"}, - }) - defer close() - - ui := new(cli.MockUi) - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - installFakeProviderPackages(t, &m, map[string][]string{ - "exact": {"0.0.1"}, - "greater-than": {"2.3.3"}, - }) - - c := &InitCommand{ - Meta: m, - } - - args := []string{ - "-upgrade=true", - } - if code := c.Run(args); code != 0 { - t.Fatalf("command did not complete successfully:\n%s", ui.ErrorWriter.String()) - } - - cacheDir := m.providerLocalCacheDir() - gotPackages := cacheDir.AllAvailablePackages() - wantPackages := map[addrs.Provider][]providercache.CachedProvider{ - // "between" wasn't previously installed at all, so we installed - // the newest available version that matched the version constraints. - addrs.NewDefaultProvider("between"): { - { - Provider: addrs.NewDefaultProvider("between"), - Version: getproviders.MustParseVersion("2.3.4"), - PackageDir: expectedPackageInstallPath("between", "2.3.4", false), - }, - }, - // The existing version of "exact" did not match the version constraints, - // so we installed what the configuration selected as well. - addrs.NewDefaultProvider("exact"): { - { - Provider: addrs.NewDefaultProvider("exact"), - Version: getproviders.MustParseVersion("1.2.3"), - PackageDir: expectedPackageInstallPath("exact", "1.2.3", false), - }, - // Previous version is still there, but not selected - { - Provider: addrs.NewDefaultProvider("exact"), - Version: getproviders.MustParseVersion("0.0.1"), - PackageDir: expectedPackageInstallPath("exact", "0.0.1", false), - }, - }, - // The existing version of "greater-than" _did_ match the constraints, - // but a newer version was available and the user specified - // -upgrade and so we upgraded it anyway. - addrs.NewDefaultProvider("greater-than"): { - { - Provider: addrs.NewDefaultProvider("greater-than"), - Version: getproviders.MustParseVersion("2.3.4"), - PackageDir: expectedPackageInstallPath("greater-than", "2.3.4", false), - }, - // Previous version is still there, but not selected - { - Provider: addrs.NewDefaultProvider("greater-than"), - Version: getproviders.MustParseVersion("2.3.3"), - PackageDir: expectedPackageInstallPath("greater-than", "2.3.3", false), - }, - }, - } - if diff := cmp.Diff(wantPackages, gotPackages); diff != "" { - t.Errorf("wrong cache directory contents after upgrade\n%s", diff) - } - - locks, err := m.lockedDependencies() - if err != nil { - t.Fatalf("failed to get locked dependencies: %s", err) - } - gotProviderLocks := locks.AllProviders() - wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ - addrs.NewDefaultProvider("between"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("between"), - getproviders.MustParseVersion("2.3.4"), - getproviders.MustParseVersionConstraints("> 1.0.0, < 3.0.0"), - []getproviders.Hash{ - getproviders.HashScheme1.New("JVqAvZz88A+hS2wHVtTWQkHaxoA/LrUAz0H3jPBWPIA="), - }, - ), - addrs.NewDefaultProvider("exact"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("exact"), - getproviders.MustParseVersion("1.2.3"), - getproviders.MustParseVersionConstraints("= 1.2.3"), - []getproviders.Hash{ - getproviders.HashScheme1.New("H1TxWF8LyhBb6B4iUdKhLc/S9sC/jdcrCykpkbGcfbg="), - }, - ), - addrs.NewDefaultProvider("greater-than"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("greater-than"), - getproviders.MustParseVersion("2.3.4"), - getproviders.MustParseVersionConstraints(">= 2.3.3"), - []getproviders.Hash{ - getproviders.HashScheme1.New("SJPpXx/yoFE/W+7eCipjJ+G21xbdnTBD7lWodZ8hWkU="), - }, - ), - } - if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { - t.Errorf("wrong version selections after upgrade\n%s", diff) - } -} - -func TestInit_getProviderMissing(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-providers"), td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{ - // looking for exact version 1.2.3 - "exact": {"1.2.4"}, - // config requires >= 2.3.3 - "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, - // config specifies - "between": {"3.4.5", "2.3.4", "1.2.3"}, - }) - defer close() - - ui := new(cli.MockUi) - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{} - if code := c.Run(args); code == 0 { - t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) - } - - if !strings.Contains(ui.ErrorWriter.String(), "no available releases match") { - t.Fatalf("unexpected error output: %s", ui.ErrorWriter) - } -} - -func TestInit_checkRequiredVersion(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-check-required-version"), td) - defer testChdir(t, td)() - - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{} - if code := c.Run(args); code != 1 { - t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) - } - errStr := ui.ErrorWriter.String() - if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { - t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) - } - if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { - t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) - } -} - -// Verify that init will error out with an invalid version constraint, even if -// there are other invalid configuration constructs. -func TestInit_checkRequiredVersionFirst(t *testing.T) { - t.Run("root_module", func(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-check-required-version-first"), td) - defer testChdir(t, td)() - - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{} - if code := c.Run(args); code != 1 { - t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) - } - errStr := ui.ErrorWriter.String() - if !strings.Contains(errStr, `Unsupported Terraform Core version`) { - t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) - } - }) - t.Run("sub_module", func(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-check-required-version-first-module"), td) - defer testChdir(t, td)() - - ui := cli.NewMockUi() - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - }, - } - - args := []string{} - if code := c.Run(args); code != 1 { - t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) - } - errStr := ui.ErrorWriter.String() - if !strings.Contains(errStr, `Unsupported Terraform Core version`) { - t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) - } - }) -} - -func TestInit_providerLockFile(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("init-provider-lock-file"), td) - // The temporary directory does not have write permission (dr-xr-xr-x) after the copy - defer os.Chmod(td, os.ModePerm) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{ - "test": {"1.2.3"}, - }) - defer close() - - ui := new(cli.MockUi) - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - lockFile := ".terraform.lock.hcl" - buf, err := ioutil.ReadFile(lockFile) - if err != nil { - t.Fatalf("failed to read dependency lock file %s: %s", lockFile, err) - } - buf = bytes.TrimSpace(buf) - // The hash in here is for the fake package that newMockProviderSource produces - // (so it'll change if newMockProviderSource starts producing different contents) - wantLockFile := strings.TrimSpace(` -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/test" { - version = "1.2.3" - constraints = "1.2.3" - hashes = [ - "h1:wlbEC2mChQZ2hhgUhl6SeVLPP7fMqOFUZAQhQ9GIIno=", - ] -} -`) - if diff := cmp.Diff(wantLockFile, string(buf)); diff != "" { - t.Errorf("wrong dependency lock file contents\n%s", diff) - } - - // Make the local directory read-only, and verify that rerunning init - // succeeds, to ensure that we don't try to rewrite an unchanged lock file - os.Chmod(".", 0555) - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } -} - -func TestInit_providerLockFileReadonly(t *testing.T) { - // The hash in here is for the fake package that newMockProviderSource produces - // (so it'll change if newMockProviderSource starts producing different contents) - inputLockFile := strings.TrimSpace(` -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/test" { - version = "1.2.3" - constraints = "1.2.3" - hashes = [ - "zh:e919b507a91e23a00da5c2c4d0b64bcc7900b68d43b3951ac0f6e5d80387fbdc", - ] -} -`) - - badLockFile := strings.TrimSpace(` -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/test" { - version = "1.2.3" - constraints = "1.2.3" - hashes = [ - "zh:0000000000000000000000000000000000000000000000000000000000000000", - ] -} -`) - - updatedLockFile := strings.TrimSpace(` -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/test" { - version = "1.2.3" - constraints = "1.2.3" - hashes = [ - "h1:wlbEC2mChQZ2hhgUhl6SeVLPP7fMqOFUZAQhQ9GIIno=", - "zh:e919b507a91e23a00da5c2c4d0b64bcc7900b68d43b3951ac0f6e5d80387fbdc", - ] -} -`) - - emptyUpdatedLockFile := strings.TrimSpace(` -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. -`) - - cases := []struct { - desc string - fixture string - providers map[string][]string - input string - args []string - ok bool - want string - }{ - { - desc: "default", - fixture: "init-provider-lock-file", - providers: map[string][]string{"test": {"1.2.3"}}, - input: inputLockFile, - args: []string{}, - ok: true, - want: updatedLockFile, - }, - { - desc: "unused provider", - fixture: "init-provider-now-unused", - providers: map[string][]string{"test": {"1.2.3"}}, - input: inputLockFile, - args: []string{}, - ok: true, - want: emptyUpdatedLockFile, - }, - { - desc: "readonly", - fixture: "init-provider-lock-file", - providers: map[string][]string{"test": {"1.2.3"}}, - input: inputLockFile, - args: []string{"-lockfile=readonly"}, - ok: true, - want: inputLockFile, - }, - { - desc: "unused provider readonly", - fixture: "init-provider-now-unused", - providers: map[string][]string{"test": {"1.2.3"}}, - input: inputLockFile, - args: []string{"-lockfile=readonly"}, - ok: false, - want: inputLockFile, - }, - { - desc: "conflict", - fixture: "init-provider-lock-file", - providers: map[string][]string{"test": {"1.2.3"}}, - input: inputLockFile, - args: []string{"-lockfile=readonly", "-upgrade"}, - ok: false, - want: inputLockFile, - }, - { - desc: "checksum mismatch", - fixture: "init-provider-lock-file", - providers: map[string][]string{"test": {"1.2.3"}}, - input: badLockFile, - args: []string{"-lockfile=readonly"}, - ok: false, - want: badLockFile, - }, - { - desc: "reject to change required provider dependences", - fixture: "init-provider-lock-file-readonly-add", - providers: map[string][]string{ - "test": {"1.2.3"}, - "foo": {"1.0.0"}, - }, - input: inputLockFile, - args: []string{"-lockfile=readonly"}, - ok: false, - want: inputLockFile, - }, - } - - for _, tc := range cases { - t.Run(tc.desc, func(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath(tc.fixture), td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, tc.providers) - defer close() - - ui := new(cli.MockUi) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - // write input lockfile - lockFile := ".terraform.lock.hcl" - if err := ioutil.WriteFile(lockFile, []byte(tc.input), 0644); err != nil { - t.Fatalf("failed to write input lockfile: %s", err) - } - - code := c.Run(tc.args) - if tc.ok && code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - if !tc.ok && code == 0 { - t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) - } - - buf, err := ioutil.ReadFile(lockFile) - if err != nil { - t.Fatalf("failed to read dependency lock file %s: %s", lockFile, err) - } - buf = bytes.TrimSpace(buf) - if diff := cmp.Diff(tc.want, string(buf)); diff != "" { - t.Errorf("wrong dependency lock file contents\n%s", diff) - } - }) - } -} - -func TestInit_pluginDirReset(t *testing.T) { - td := testTempDir(t) - defer os.RemoveAll(td) - defer testChdir(t, td)() - - // An empty provider source - providerSource, close := newMockProviderSource(t, nil) - defer close() - - ui := new(cli.MockUi) - view, _ := testView(t) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - }, - } - - // make our vendor paths - pluginPath := []string{"a", "b", "c"} - for _, p := range pluginPath { - if err := os.MkdirAll(p, 0755); err != nil { - t.Fatal(err) - } - } - - // run once and save the -plugin-dir - args := []string{"-plugin-dir", "a"} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter) - } - - pluginDirs, err := c.loadPluginPath() - if err != nil { - t.Fatal(err) - } - - if len(pluginDirs) != 1 || pluginDirs[0] != "a" { - t.Fatalf(`expected plugin dir ["a"], got %q`, pluginDirs) - } - - ui = new(cli.MockUi) - c = &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, // still empty - }, - } - - // make sure we remove the plugin-dir record - args = []string{"-plugin-dir="} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter) - } - - pluginDirs, err = c.loadPluginPath() - if err != nil { - t.Fatal(err) - } - - if len(pluginDirs) != 0 { - t.Fatalf("expected no plugin dirs got %q", pluginDirs) - } -} - -// Test user-supplied -plugin-dir -func TestInit_pluginDirProviders(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-providers"), td) - defer testChdir(t, td)() - - // An empty provider source - providerSource, close := newMockProviderSource(t, nil) - defer close() - - ui := new(cli.MockUi) - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - // make our vendor paths - pluginPath := []string{"a", "b", "c"} - for _, p := range pluginPath { - if err := os.MkdirAll(p, 0755); err != nil { - t.Fatal(err) - } - } - - // We'll put some providers in our plugin dirs. To do this, we'll pretend - // for a moment that they are provider cache directories just because that - // allows us to lean on our existing test helper functions to do this. - for i, def := range [][]string{ - {"exact", "1.2.3"}, - {"greater-than", "2.3.4"}, - {"between", "2.3.4"}, - } { - name, version := def[0], def[1] - dir := providercache.NewDir(pluginPath[i]) - installFakeProviderPackagesElsewhere(t, dir, map[string][]string{ - name: {version}, - }) - } - - args := []string{ - "-plugin-dir", "a", - "-plugin-dir", "b", - "-plugin-dir", "c", - } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter) - } - - locks, err := m.lockedDependencies() - if err != nil { - t.Fatalf("failed to get locked dependencies: %s", err) - } - gotProviderLocks := locks.AllProviders() - wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ - addrs.NewDefaultProvider("between"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("between"), - getproviders.MustParseVersion("2.3.4"), - getproviders.MustParseVersionConstraints("> 1.0.0, < 3.0.0"), - []getproviders.Hash{ - getproviders.HashScheme1.New("JVqAvZz88A+hS2wHVtTWQkHaxoA/LrUAz0H3jPBWPIA="), - }, - ), - addrs.NewDefaultProvider("exact"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("exact"), - getproviders.MustParseVersion("1.2.3"), - getproviders.MustParseVersionConstraints("= 1.2.3"), - []getproviders.Hash{ - getproviders.HashScheme1.New("H1TxWF8LyhBb6B4iUdKhLc/S9sC/jdcrCykpkbGcfbg="), - }, - ), - addrs.NewDefaultProvider("greater-than"): depsfile.NewProviderLock( - addrs.NewDefaultProvider("greater-than"), - getproviders.MustParseVersion("2.3.4"), - getproviders.MustParseVersionConstraints(">= 2.3.3"), - []getproviders.Hash{ - getproviders.HashScheme1.New("SJPpXx/yoFE/W+7eCipjJ+G21xbdnTBD7lWodZ8hWkU="), - }, - ), - } - if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { - t.Errorf("wrong version selections after upgrade\n%s", diff) - } - - // -plugin-dir overrides the normal provider source, so it should not have - // seen any calls at all. - if calls := providerSource.CallLog(); len(calls) > 0 { - t.Errorf("unexpected provider source calls (want none)\n%s", spew.Sdump(calls)) - } -} - -// Test user-supplied -plugin-dir doesn't allow auto-install -func TestInit_pluginDirProvidersDoesNotGet(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-get-providers"), td) - defer testChdir(t, td)() - - // Our provider source has a suitable package for "between" available, - // but we should ignore it because -plugin-dir is set and thus this - // source is temporarily overridden during install. - providerSource, close := newMockProviderSource(t, map[string][]string{ - "between": {"2.3.4"}, - }) - defer close() - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - // make our vendor paths - pluginPath := []string{"a", "b"} - for _, p := range pluginPath { - if err := os.MkdirAll(p, 0755); err != nil { - t.Fatal(err) - } - } - - // We'll put some providers in our plugin dirs. To do this, we'll pretend - // for a moment that they are provider cache directories just because that - // allows us to lean on our existing test helper functions to do this. - for i, def := range [][]string{ - {"exact", "1.2.3"}, - {"greater-than", "2.3.4"}, - } { - name, version := def[0], def[1] - dir := providercache.NewDir(pluginPath[i]) - installFakeProviderPackagesElsewhere(t, dir, map[string][]string{ - name: {version}, - }) - } - - args := []string{ - "-plugin-dir", "a", - "-plugin-dir", "b", - } - if code := c.Run(args); code == 0 { - // should have been an error - t.Fatalf("succeeded; want error\nstdout:\n%s\nstderr\n%s", ui.OutputWriter, ui.ErrorWriter) - } - - // The error output should mention the "between" provider but should not - // mention either the "exact" or "greater-than" provider, because the - // latter two are available via the -plugin-dir directories. - errStr := ui.ErrorWriter.String() - if subStr := "hashicorp/between"; !strings.Contains(errStr, subStr) { - t.Errorf("error output should mention the 'between' provider\nwant substr: %s\ngot:\n%s", subStr, errStr) - } - if subStr := "hashicorp/exact"; strings.Contains(errStr, subStr) { - t.Errorf("error output should not mention the 'exact' provider\ndo not want substr: %s\ngot:\n%s", subStr, errStr) - } - if subStr := "hashicorp/greater-than"; strings.Contains(errStr, subStr) { - t.Errorf("error output should not mention the 'greater-than' provider\ndo not want substr: %s\ngot:\n%s", subStr, errStr) - } - - if calls := providerSource.CallLog(); len(calls) > 0 { - t.Errorf("unexpected provider source calls (want none)\n%s", spew.Sdump(calls)) - } -} - -// Verify that plugin-dir doesn't prevent discovery of internal providers -func TestInit_pluginDirWithBuiltIn(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-internal"), td) - defer testChdir(t, td)() - - // An empty provider source - providerSource, close := newMockProviderSource(t, nil) - defer close() - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{"-plugin-dir", "./"} - if code := c.Run(args); code != 0 { - t.Fatalf("error: %s", ui.ErrorWriter) - } - - outputStr := ui.OutputWriter.String() - if subStr := "terraform.io/builtin/terraform is built in to Terraform"; !strings.Contains(outputStr, subStr) { - t.Errorf("output should mention the terraform provider\nwant substr: %s\ngot:\n%s", subStr, outputStr) - } -} - -func TestInit_invalidBuiltInProviders(t *testing.T) { - // This test fixture includes two invalid provider dependencies: - // - an implied dependency on terraform.io/builtin/terraform with an - // explicit version number, which is not allowed because it's builtin. - // - an explicit dependency on terraform.io/builtin/nonexist, which does - // not exist at all. - td := t.TempDir() - testCopyDir(t, testFixturePath("init-internal-invalid"), td) - defer testChdir(t, td)() - - // An empty provider source - providerSource, close := newMockProviderSource(t, nil) - defer close() - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - View: view, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - if code := c.Run(nil); code == 0 { - t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) - } - - errStr := ui.ErrorWriter.String() - if subStr := "Cannot use terraform.io/builtin/terraform: built-in"; !strings.Contains(errStr, subStr) { - t.Errorf("error output should mention the terraform provider\nwant substr: %s\ngot:\n%s", subStr, errStr) - } - if subStr := "Cannot use terraform.io/builtin/nonexist: this Terraform release"; !strings.Contains(errStr, subStr) { - t.Errorf("error output should mention the 'nonexist' provider\nwant substr: %s\ngot:\n%s", subStr, errStr) - } -} - -func TestInit_invalidSyntaxNoBackend(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-syntax-invalid-no-backend"), td) - defer testChdir(t, td)() - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - Ui: ui, - View: view, - } - - c := &InitCommand{ - Meta: m, - } - - if code := c.Run(nil); code == 0 { - t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) - } - - errStr := ui.ErrorWriter.String() - if subStr := "There are some problems with the configuration, described below"; !strings.Contains(errStr, subStr) { - t.Errorf("Error output should include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) - } - if subStr := "Error: Unsupported block type"; !strings.Contains(errStr, subStr) { - t.Errorf("Error output should mention the syntax problem\nwant substr: %s\ngot:\n%s", subStr, errStr) - } -} - -func TestInit_invalidSyntaxWithBackend(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-syntax-invalid-with-backend"), td) - defer testChdir(t, td)() - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - Ui: ui, - View: view, - } - - c := &InitCommand{ - Meta: m, - } - - if code := c.Run(nil); code == 0 { - t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) - } - - errStr := ui.ErrorWriter.String() - if subStr := "There are some problems with the configuration, described below"; !strings.Contains(errStr, subStr) { - t.Errorf("Error output should include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) - } - if subStr := "Error: Unsupported block type"; !strings.Contains(errStr, subStr) { - t.Errorf("Error output should mention the syntax problem\nwant substr: %s\ngot:\n%s", subStr, errStr) - } -} - -func TestInit_invalidSyntaxInvalidBackend(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("init-syntax-invalid-backend-invalid"), td) - defer testChdir(t, td)() - - ui := cli.NewMockUi() - view, _ := testView(t) - m := Meta{ - Ui: ui, - View: view, - } - - c := &InitCommand{ - Meta: m, - } - - if code := c.Run(nil); code == 0 { - t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) - } - - errStr := ui.ErrorWriter.String() - if subStr := "There are some problems with the configuration, described below"; strings.Contains(errStr, subStr) { - t.Errorf("Error output should not include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) - } - if subStr := "Error: Unsupported block type"; strings.Contains(errStr, subStr) { - t.Errorf("Error output should not mention syntax errors\nwant substr: %s\ngot:\n%s", subStr, errStr) - } - if subStr := "Error: Unsupported backend type"; !strings.Contains(errStr, subStr) { - t.Errorf("Error output should mention the invalid backend\nwant substr: %s\ngot:\n%s", subStr, errStr) - } -} - -// newMockProviderSource is a helper to succinctly construct a mock provider -// source that contains a set of packages matching the given provider versions -// that are available for installation (from temporary local files). -// -// The caller must call the returned close callback once the source is no -// longer needed, at which point it will clean up all of the temporary files -// and the packages in the source will no longer be available for installation. -// -// Provider addresses must be valid source strings, and passing only the -// provider name will be interpreted as a "default" provider under -// registry.terraform.io/hashicorp. If you need more control over the -// provider addresses, pass a full provider source string. -// -// This function also registers providers as belonging to the current platform, -// to ensure that they will be available to a provider installer operating in -// its default configuration. -// -// In case of any errors while constructing the source, this function will -// abort the current test using the given testing.T. Therefore a caller can -// assume that if this function returns then the result is valid and ready -// to use. -func newMockProviderSource(t *testing.T, availableProviderVersions map[string][]string) (source *getproviders.MockSource, close func()) { - t.Helper() - var packages []getproviders.PackageMeta - var closes []func() - close = func() { - for _, f := range closes { - f() - } - } - for source, versions := range availableProviderVersions { - addr := addrs.MustParseProviderSourceString(source) - for _, versionStr := range versions { - version, err := getproviders.ParseVersion(versionStr) - if err != nil { - close() - t.Fatalf("failed to parse %q as a version number for %q: %s", versionStr, addr.ForDisplay(), err) - } - meta, close, err := getproviders.FakeInstallablePackageMeta(addr, version, getproviders.VersionList{getproviders.MustParseVersion("5.0")}, getproviders.CurrentPlatform, "") - if err != nil { - close() - t.Fatalf("failed to prepare fake package for %s %s: %s", addr.ForDisplay(), versionStr, err) - } - closes = append(closes, close) - packages = append(packages, meta) - } - } - - return getproviders.NewMockSource(packages, nil), close -} - -// installFakeProviderPackages installs a fake package for the given provider -// names (interpreted as a "default" provider address) and versions into the -// local plugin cache for the given "meta". -// -// Any test using this must be using testChdir or some similar mechanism to -// make sure that it isn't writing directly into a test fixture or source -// directory within the codebase. -// -// If a requested package cannot be installed for some reason, this function -// will abort the test using the given testing.T. Therefore if this function -// returns the caller can assume that the requested providers have been -// installed. -func installFakeProviderPackages(t *testing.T, meta *Meta, providerVersions map[string][]string) { - t.Helper() - - cacheDir := meta.providerLocalCacheDir() - installFakeProviderPackagesElsewhere(t, cacheDir, providerVersions) -} - -// installFakeProviderPackagesElsewhere is a variant of installFakeProviderPackages -// that will install packages into the given provider cache directory, rather -// than forcing the use of the local cache of the current "Meta". -func installFakeProviderPackagesElsewhere(t *testing.T, cacheDir *providercache.Dir, providerVersions map[string][]string) { - t.Helper() - - // It can be hard to spot the mistake of forgetting to run testChdir before - // modifying the working directory, so we'll use a simple heuristic here - // to try to detect that mistake and make a noisy error about it instead. - wd, err := os.Getwd() - if err == nil { - wd = filepath.Clean(wd) - // If the directory we're in is named "command" or if we're under a - // directory named "testdata" then we'll assume a mistake and generate - // an error. This will cause the test to fail but won't block it from - // running. - if filepath.Base(wd) == "command" || filepath.Base(wd) == "testdata" || strings.Contains(filepath.ToSlash(wd), "/testdata/") { - t.Errorf("installFakeProviderPackage may be used only by tests that switch to a temporary working directory, e.g. using testChdir") - } - } - - for name, versions := range providerVersions { - addr := addrs.NewDefaultProvider(name) - for _, versionStr := range versions { - version, err := getproviders.ParseVersion(versionStr) - if err != nil { - t.Fatalf("failed to parse %q as a version number for %q: %s", versionStr, name, err) - } - meta, close, err := getproviders.FakeInstallablePackageMeta(addr, version, getproviders.VersionList{getproviders.MustParseVersion("5.0")}, getproviders.CurrentPlatform, "") - // We're going to install all these fake packages before we return, - // so we don't need to preserve them afterwards. - defer close() - if err != nil { - t.Fatalf("failed to prepare fake package for %s %s: %s", name, versionStr, err) - } - _, err = cacheDir.InstallPackage(context.Background(), meta, nil) - if err != nil { - t.Fatalf("failed to install fake package for %s %s: %s", name, versionStr, err) - } - } - } -} - -// expectedPackageInstallPath is a companion to installFakeProviderPackages -// that returns the path where the provider with the given name and version -// would be installed and, relatedly, where the installer will expect to -// find an already-installed version. -// -// Just as with installFakeProviderPackages, this function is a shortcut helper -// for "default-namespaced" providers as we commonly use in tests. If you need -// more control over the provider addresses, use functions of the underlying -// getproviders and providercache packages instead. -// -// The result always uses forward slashes, even on Windows, for consistency -// with how the getproviders and providercache packages build paths. -func expectedPackageInstallPath(name, version string, exe bool) string { - platform := getproviders.CurrentPlatform - baseDir := ".terraform/providers" - if exe { - p := fmt.Sprintf("registry.terraform.io/hashicorp/%s/%s/%s/terraform-provider-%s_%s", name, version, platform, name, version) - if platform.OS == "windows" { - p += ".exe" - } - return filepath.ToSlash(filepath.Join(baseDir, p)) - } - return filepath.ToSlash(filepath.Join( - baseDir, fmt.Sprintf("registry.terraform.io/hashicorp/%s/%s/%s", name, version, platform), - )) -} diff --git a/internal/command/jsonchecks/checks.go b/internal/command/jsonchecks/checks.go deleted file mode 100644 index 892dcba32530..000000000000 --- a/internal/command/jsonchecks/checks.go +++ /dev/null @@ -1,124 +0,0 @@ -package jsonchecks - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/hashicorp/terraform/internal/states" -) - -// MarshalCheckStates is the main entry-point for this package, which takes -// the top-level model object for checks in state and plan, and returns a -// JSON representation of it suitable for use in public integration points. -func MarshalCheckStates(results *states.CheckResults) []byte { - jsonResults := make([]checkResultStatic, 0, results.ConfigResults.Len()) - - for _, elem := range results.ConfigResults.Elems { - staticAddr := elem.Key - aggrResult := elem.Value - - objects := make([]checkResultDynamic, 0, aggrResult.ObjectResults.Len()) - for _, elem := range aggrResult.ObjectResults.Elems { - dynamicAddr := elem.Key - result := elem.Value - - problems := make([]checkProblem, 0, len(result.FailureMessages)) - for _, msg := range result.FailureMessages { - problems = append(problems, checkProblem{ - Message: msg, - }) - } - sort.Slice(problems, func(i, j int) bool { - return problems[i].Message < problems[j].Message - }) - - objects = append(objects, checkResultDynamic{ - Address: makeDynamicObjectAddr(dynamicAddr), - Status: checkStatusForJSON(result.Status), - Problems: problems, - }) - } - - sort.Slice(objects, func(i, j int) bool { - return objects[i].Address["to_display"].(string) < objects[j].Address["to_display"].(string) - }) - - jsonResults = append(jsonResults, checkResultStatic{ - Address: makeStaticObjectAddr(staticAddr), - Status: checkStatusForJSON(aggrResult.Status), - Instances: objects, - }) - } - - sort.Slice(jsonResults, func(i, j int) bool { - return jsonResults[i].Address["to_display"].(string) < jsonResults[j].Address["to_display"].(string) - }) - - ret, err := json.Marshal(jsonResults) - if err != nil { - // We totally control the input to json.Marshal, so any error here - // is a bug in the code above. - panic(fmt.Sprintf("invalid input to json.Marshal: %s", err)) - } - return ret -} - -// checkResultStatic is the container for the static, configuration-driven -// idea of "checkable object" -- a resource block with conditions, for example -- -// which ensures that we can always say _something_ about each checkable -// object in the configuration even if Terraform Core encountered an error -// before being able to determine the dynamic instances of the checkable object. -type checkResultStatic struct { - ExperimentalNote experimentalNote `json:"//"` - - // Address is the address of the checkable object this result relates to. - Address staticObjectAddr `json:"address"` - - // Status is the aggregate status for all of the dynamic objects belonging - // to this static object. - Status checkStatus `json:"status"` - - // Instances contains the results for each individual dynamic object that - // belongs to this static object. - Instances []checkResultDynamic `json:"instances,omitempty"` -} - -// checkResultDynamic describes the check result for a dynamic object, which -// results from Terraform Core evaluating the "expansion" (e.g. count or for_each) -// of the containing object or its own containing module(s). -type checkResultDynamic struct { - // Address augments the Address of the containing checkResultStatic with - // instance-specific extra properties or overridden properties. - Address dynamicObjectAddr `json:"address"` - - // Status is the status for this specific dynamic object. - Status checkStatus `json:"status"` - - // Problems describes some optional details associated with a failure - // status, describing what fails. - // - // This does not include the errors for status "error", because Terraform - // Core emits those separately as normal diagnostics. However, if a - // particular object has a mixture of conditions that failed and conditions - // that were invalid then status can be "error" while simultaneously - // returning problems in this property. - Problems []checkProblem `json:"problems,omitempty"` -} - -// checkProblem describes one of potentially several problems that led to -// a check being classified as status "fail". -type checkProblem struct { - // Message is the condition error message provided by the author. - Message string `json:"message"` - - // We don't currently have any other problem-related data, but this is - // intentionally an object to allow us to add other data over time, such - // as the source location where the failing condition was defined. -} - -type experimentalNote struct{} - -func (n experimentalNote) MarshalJSON() ([]byte, error) { - return []byte(`"EXPERIMENTAL: see docs for details"`), nil -} diff --git a/internal/command/jsonconfig/config.go b/internal/command/jsonconfig/config.go deleted file mode 100644 index f744c91086f8..000000000000 --- a/internal/command/jsonconfig/config.go +++ /dev/null @@ -1,565 +0,0 @@ -package jsonconfig - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/terraform" -) - -// Config represents the complete configuration source -type config struct { - ProviderConfigs map[string]providerConfig `json:"provider_config,omitempty"` - RootModule module `json:"root_module,omitempty"` -} - -// ProviderConfig describes all of the provider configurations throughout the -// configuration tree, flattened into a single map for convenience since -// provider configurations are the one concept in Terraform that can span across -// module boundaries. -type providerConfig struct { - Name string `json:"name,omitempty"` - FullName string `json:"full_name,omitempty"` - Alias string `json:"alias,omitempty"` - VersionConstraint string `json:"version_constraint,omitempty"` - ModuleAddress string `json:"module_address,omitempty"` - Expressions map[string]interface{} `json:"expressions,omitempty"` - parentKey string -} - -type module struct { - Outputs map[string]output `json:"outputs,omitempty"` - // Resources are sorted in a user-friendly order that is undefined at this - // time, but consistent. - Resources []resource `json:"resources,omitempty"` - ModuleCalls map[string]moduleCall `json:"module_calls,omitempty"` - Variables variables `json:"variables,omitempty"` -} - -type moduleCall struct { - Source string `json:"source,omitempty"` - Expressions map[string]interface{} `json:"expressions,omitempty"` - CountExpression *expression `json:"count_expression,omitempty"` - ForEachExpression *expression `json:"for_each_expression,omitempty"` - Module module `json:"module,omitempty"` - VersionConstraint string `json:"version_constraint,omitempty"` - DependsOn []string `json:"depends_on,omitempty"` -} - -// variables is the JSON representation of the variables provided to the current -// plan. -type variables map[string]*variable - -type variable struct { - Default json.RawMessage `json:"default,omitempty"` - Description string `json:"description,omitempty"` - Sensitive bool `json:"sensitive,omitempty"` -} - -// Resource is the representation of a resource in the config -type resource struct { - // Address is the absolute resource address - Address string `json:"address,omitempty"` - - // Mode can be "managed" or "data" - Mode string `json:"mode,omitempty"` - - Type string `json:"type,omitempty"` - Name string `json:"name,omitempty"` - - // ProviderConfigKey is the key into "provider_configs" (shown above) for - // the provider configuration that this resource is associated with. - // - // NOTE: If a given resource is in a ModuleCall, and the provider was - // configured outside of the module (in a higher level configuration file), - // the ProviderConfigKey will not match a key in the ProviderConfigs map. - ProviderConfigKey string `json:"provider_config_key,omitempty"` - - // Provisioners is an optional field which describes any provisioners. - // Connection info will not be included here. - Provisioners []provisioner `json:"provisioners,omitempty"` - - // Expressions" describes the resource-type-specific content of the - // configuration block. - Expressions map[string]interface{} `json:"expressions,omitempty"` - - // SchemaVersion indicates which version of the resource type schema the - // "values" property conforms to. - SchemaVersion uint64 `json:"schema_version"` - - // CountExpression and ForEachExpression describe the expressions given for - // the corresponding meta-arguments in the resource configuration block. - // These are omitted if the corresponding argument isn't set. - CountExpression *expression `json:"count_expression,omitempty"` - ForEachExpression *expression `json:"for_each_expression,omitempty"` - - DependsOn []string `json:"depends_on,omitempty"` -} - -type output struct { - Sensitive bool `json:"sensitive,omitempty"` - Expression expression `json:"expression,omitempty"` - DependsOn []string `json:"depends_on,omitempty"` - Description string `json:"description,omitempty"` -} - -type provisioner struct { - Type string `json:"type,omitempty"` - Expressions map[string]interface{} `json:"expressions,omitempty"` -} - -// Marshal returns the json encoding of terraform configuration. -func Marshal(c *configs.Config, schemas *terraform.Schemas) ([]byte, error) { - var output config - - pcs := make(map[string]providerConfig) - marshalProviderConfigs(c, schemas, pcs) - - rootModule, err := marshalModule(c, schemas, "") - if err != nil { - return nil, err - } - output.RootModule = rootModule - - normalizeModuleProviderKeys(&rootModule, pcs) - - for name, pc := range pcs { - if pc.parentKey != "" { - delete(pcs, name) - } - } - output.ProviderConfigs = pcs - - ret, err := json.Marshal(output) - return ret, err -} - -func marshalProviderConfigs( - c *configs.Config, - schemas *terraform.Schemas, - m map[string]providerConfig, -) { - if c == nil { - return - } - - // We want to determine only the provider requirements from this module, - // ignoring any descendants. Disregard any diagnostics when determining - // requirements because we want this marshalling to succeed even if there - // are invalid constraints. - reqs, _ := c.ProviderRequirementsShallow() - - // Add an entry for each provider configuration block in the module. - for k, pc := range c.Module.ProviderConfigs { - providerFqn := c.ProviderForConfigAddr(addrs.LocalProviderConfig{LocalName: pc.Name}) - schema := schemas.ProviderConfig(providerFqn) - - p := providerConfig{ - Name: pc.Name, - FullName: providerFqn.String(), - Alias: pc.Alias, - ModuleAddress: c.Path.String(), - Expressions: marshalExpressions(pc.Config, schema), - } - - // Store the fully resolved provider version constraint, rather than - // using the version argument in the configuration block. This is both - // future proof (for when we finish the deprecation of the provider config - // version argument) and more accurate (as it reflects the full set of - // constraints, in case there are multiple). - if vc, ok := reqs[providerFqn]; ok { - p.VersionConstraint = getproviders.VersionConstraintsString(vc) - } - - key := opaqueProviderKey(k, c.Path.String()) - - m[key] = p - } - - // Ensure that any required providers with no associated configuration - // block are included in the set. - for k, pr := range c.Module.ProviderRequirements.RequiredProviders { - // If a provider has aliases defined, process those first. - for _, alias := range pr.Aliases { - // If there exists a value for this provider, we have nothing to add - // to it, so skip. - key := opaqueProviderKey(alias.StringCompact(), c.Path.String()) - if _, exists := m[key]; exists { - continue - } - // Given no provider configuration block exists, the only fields we can - // fill here are the local name, FQN, module address, and version - // constraints. - p := providerConfig{ - Name: pr.Name, - FullName: pr.Type.String(), - ModuleAddress: c.Path.String(), - } - - if vc, ok := reqs[pr.Type]; ok { - p.VersionConstraint = getproviders.VersionConstraintsString(vc) - } - - m[key] = p - } - - // If there exists a value for this provider, we have nothing to add - // to it, so skip. - key := opaqueProviderKey(k, c.Path.String()) - if _, exists := m[key]; exists { - continue - } - - // Given no provider configuration block exists, the only fields we can - // fill here are the local name, module address, and version - // constraints. - p := providerConfig{ - Name: pr.Name, - FullName: pr.Type.String(), - ModuleAddress: c.Path.String(), - } - - if vc, ok := reqs[pr.Type]; ok { - p.VersionConstraint = getproviders.VersionConstraintsString(vc) - } - - if c.Parent != nil { - parentKey := opaqueProviderKey(pr.Name, c.Parent.Path.String()) - p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) - } - - m[key] = p - } - - // Providers could be implicitly created or inherited from the parent module - // when no requirements and configuration block defined. - for req := range reqs { - // Only default providers could implicitly exist, - // so the provider name must be same as the provider type. - key := opaqueProviderKey(req.Type, c.Path.String()) - if _, exists := m[key]; exists { - continue - } - - p := providerConfig{ - Name: req.Type, - FullName: req.String(), - ModuleAddress: c.Path.String(), - } - - // In child modules, providers defined in the parent module can be implicitly used. - if c.Parent != nil { - parentKey := opaqueProviderKey(req.Type, c.Parent.Path.String()) - p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) - } - - m[key] = p - } - - // Must also visit our child modules, recursively. - for name, mc := range c.Module.ModuleCalls { - // Keys in c.Children are guaranteed to match those in c.Module.ModuleCalls - cc := c.Children[name] - - // Add provider config map entries for passed provider configs, - // pointing at the passed configuration - for _, ppc := range mc.Providers { - // These provider names include aliases, if set - moduleProviderName := ppc.InChild.String() - parentProviderName := ppc.InParent.String() - - // Look up the provider FQN from the module context, using the non-aliased local name - providerFqn := cc.ProviderForConfigAddr(addrs.LocalProviderConfig{LocalName: ppc.InChild.Name}) - - // The presence of passed provider configs means that we cannot have - // any configuration expressions or version constraints here - p := providerConfig{ - Name: moduleProviderName, - FullName: providerFqn.String(), - ModuleAddress: cc.Path.String(), - } - - key := opaqueProviderKey(moduleProviderName, cc.Path.String()) - parentKey := opaqueProviderKey(parentProviderName, cc.Parent.Path.String()) - p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) - - m[key] = p - } - - // Finally, marshal any other provider configs within the called module. - // It is safe to do this last because it is invalid to configure a - // provider which has passed provider configs in the module call. - marshalProviderConfigs(cc, schemas, m) - } -} - -func marshalModule(c *configs.Config, schemas *terraform.Schemas, addr string) (module, error) { - var module module - var rs []resource - - managedResources, err := marshalResources(c.Module.ManagedResources, schemas, addr) - if err != nil { - return module, err - } - dataResources, err := marshalResources(c.Module.DataResources, schemas, addr) - if err != nil { - return module, err - } - - rs = append(managedResources, dataResources...) - module.Resources = rs - - outputs := make(map[string]output) - for _, v := range c.Module.Outputs { - o := output{ - Sensitive: v.Sensitive, - Expression: marshalExpression(v.Expr), - } - if v.Description != "" { - o.Description = v.Description - } - if len(v.DependsOn) > 0 { - dependencies := make([]string, len(v.DependsOn)) - for i, d := range v.DependsOn { - ref, diags := addrs.ParseRef(d) - // we should not get an error here, because `terraform validate` - // would have complained well before this point, but if we do we'll - // silenty skip it. - if !diags.HasErrors() { - dependencies[i] = ref.Subject.String() - } - } - o.DependsOn = dependencies - } - - outputs[v.Name] = o - } - module.Outputs = outputs - - module.ModuleCalls = marshalModuleCalls(c, schemas) - - if len(c.Module.Variables) > 0 { - vars := make(variables, len(c.Module.Variables)) - for k, v := range c.Module.Variables { - var defaultValJSON []byte - if v.Default == cty.NilVal { - defaultValJSON = nil - } else { - defaultValJSON, err = ctyjson.Marshal(v.Default, v.Default.Type()) - if err != nil { - return module, err - } - } - vars[k] = &variable{ - Default: defaultValJSON, - Description: v.Description, - Sensitive: v.Sensitive, - } - } - module.Variables = vars - } - - return module, nil -} - -func marshalModuleCalls(c *configs.Config, schemas *terraform.Schemas) map[string]moduleCall { - ret := make(map[string]moduleCall) - - for name, mc := range c.Module.ModuleCalls { - mcConfig := c.Children[name] - ret[name] = marshalModuleCall(mcConfig, mc, schemas) - } - - return ret -} - -func marshalModuleCall(c *configs.Config, mc *configs.ModuleCall, schemas *terraform.Schemas) moduleCall { - // It is possible to have a module call with a nil config. - if c == nil { - return moduleCall{} - } - - ret := moduleCall{ - // We're intentionally echoing back exactly what the user entered - // here, rather than the normalized version in SourceAddr, because - // historically we only _had_ the raw address and thus it would be - // a (admittedly minor) breaking change to start normalizing them - // now, in case consumers of this data are expecting a particular - // non-normalized syntax. - Source: mc.SourceAddrRaw, - VersionConstraint: mc.Version.Required.String(), - } - cExp := marshalExpression(mc.Count) - if !cExp.Empty() { - ret.CountExpression = &cExp - } else { - fExp := marshalExpression(mc.ForEach) - if !fExp.Empty() { - ret.ForEachExpression = &fExp - } - } - - schema := &configschema.Block{} - schema.Attributes = make(map[string]*configschema.Attribute) - for _, variable := range c.Module.Variables { - schema.Attributes[variable.Name] = &configschema.Attribute{ - Required: variable.Default == cty.NilVal, - } - } - - ret.Expressions = marshalExpressions(mc.Config, schema) - - module, _ := marshalModule(c, schemas, c.Path.String()) - - ret.Module = module - - if len(mc.DependsOn) > 0 { - dependencies := make([]string, len(mc.DependsOn)) - for i, d := range mc.DependsOn { - ref, diags := addrs.ParseRef(d) - // we should not get an error here, because `terraform validate` - // would have complained well before this point, but if we do we'll - // silenty skip it. - if !diags.HasErrors() { - dependencies[i] = ref.Subject.String() - } - } - ret.DependsOn = dependencies - } - - return ret -} - -func marshalResources(resources map[string]*configs.Resource, schemas *terraform.Schemas, moduleAddr string) ([]resource, error) { - var rs []resource - for _, v := range resources { - providerConfigKey := opaqueProviderKey(v.ProviderConfigAddr().StringCompact(), moduleAddr) - r := resource{ - Address: v.Addr().String(), - Type: v.Type, - Name: v.Name, - ProviderConfigKey: providerConfigKey, - } - - switch v.Mode { - case addrs.ManagedResourceMode: - r.Mode = "managed" - case addrs.DataResourceMode: - r.Mode = "data" - default: - return rs, fmt.Errorf("resource %s has an unsupported mode %s", r.Address, v.Mode.String()) - } - - cExp := marshalExpression(v.Count) - if !cExp.Empty() { - r.CountExpression = &cExp - } else { - fExp := marshalExpression(v.ForEach) - if !fExp.Empty() { - r.ForEachExpression = &fExp - } - } - - schema, schemaVer := schemas.ResourceTypeConfig( - v.Provider, - v.Mode, - v.Type, - ) - if schema == nil { - return nil, fmt.Errorf("no schema found for %s (in provider %s)", v.Addr().String(), v.Provider) - } - r.SchemaVersion = schemaVer - - r.Expressions = marshalExpressions(v.Config, schema) - - // Managed is populated only for Mode = addrs.ManagedResourceMode - if v.Managed != nil && len(v.Managed.Provisioners) > 0 { - var provisioners []provisioner - for _, p := range v.Managed.Provisioners { - schema := schemas.ProvisionerConfig(p.Type) - prov := provisioner{ - Type: p.Type, - Expressions: marshalExpressions(p.Config, schema), - } - provisioners = append(provisioners, prov) - } - r.Provisioners = provisioners - } - - if len(v.DependsOn) > 0 { - dependencies := make([]string, len(v.DependsOn)) - for i, d := range v.DependsOn { - ref, diags := addrs.ParseRef(d) - // we should not get an error here, because `terraform validate` - // would have complained well before this point, but if we do we'll - // silenty skip it. - if !diags.HasErrors() { - dependencies[i] = ref.Subject.String() - } - } - r.DependsOn = dependencies - } - - rs = append(rs, r) - } - sort.Slice(rs, func(i, j int) bool { - return rs[i].Address < rs[j].Address - }) - return rs, nil -} - -// Flatten all resource provider keys in a module and its descendents, such -// that any resources from providers using a configuration passed through the -// module call have a direct refernce to that provider configuration. -func normalizeModuleProviderKeys(m *module, pcs map[string]providerConfig) { - for i, r := range m.Resources { - if pc, exists := pcs[r.ProviderConfigKey]; exists { - if _, hasParent := pcs[pc.parentKey]; hasParent { - m.Resources[i].ProviderConfigKey = pc.parentKey - } - } - } - - for _, mc := range m.ModuleCalls { - normalizeModuleProviderKeys(&mc.Module, pcs) - } -} - -// opaqueProviderKey generates a unique absProviderConfig-like string from the module -// address and provider -func opaqueProviderKey(provider string, addr string) (key string) { - key = provider - if addr != "" { - key = fmt.Sprintf("%s:%s", addr, provider) - } - return key -} - -// Traverse up the module call tree until we find the provider -// configuration which has no linked parent config. This is then -// the source of the configuration used in this module call, so -// we link to it directly -func findSourceProviderKey(startKey string, fullName string, m map[string]providerConfig) string { - var parentKey string - - key := startKey - for key != "" { - parent, exists := m[key] - if !exists || parent.FullName != fullName { - break - } - - parentKey = key - key = parent.parentKey - } - - return parentKey -} diff --git a/internal/command/jsonformat/collections/action.go b/internal/command/jsonformat/collections/action.go deleted file mode 100644 index 97f7d7d7ab43..000000000000 --- a/internal/command/jsonformat/collections/action.go +++ /dev/null @@ -1,16 +0,0 @@ -package collections - -import "github.com/hashicorp/terraform/internal/plans" - -// CompareActions will compare current and next, and return plans.Update if they -// are different, and current if they are the same. -func CompareActions(current, next plans.Action) plans.Action { - if next == plans.NoOp { - return current - } - - if current != next { - return plans.Update - } - return current -} diff --git a/internal/command/jsonformat/collections/map.go b/internal/command/jsonformat/collections/map.go deleted file mode 100644 index ecc8ece92abe..000000000000 --- a/internal/command/jsonformat/collections/map.go +++ /dev/null @@ -1,26 +0,0 @@ -package collections - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" -) - -type ProcessKey func(key string) computed.Diff - -func TransformMap[Input any](before, after map[string]Input, keys []string, process ProcessKey) (map[string]computed.Diff, plans.Action) { - current := plans.NoOp - if before != nil && after == nil { - current = plans.Delete - } - if before == nil && after != nil { - current = plans.Create - } - - elements := make(map[string]computed.Diff) - for _, key := range keys { - elements[key] = process(key) - current = CompareActions(current, elements[key].Action) - } - - return elements, current -} diff --git a/internal/command/jsonformat/collections/slice.go b/internal/command/jsonformat/collections/slice.go deleted file mode 100644 index b584dbef7f44..000000000000 --- a/internal/command/jsonformat/collections/slice.go +++ /dev/null @@ -1,72 +0,0 @@ -package collections - -import ( - "reflect" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/objchange" -) - -type TransformIndices func(before, after int) computed.Diff -type ProcessIndices func(before, after int) -type IsObjType[Input any] func(input Input) bool - -func TransformSlice[Input any](before, after []Input, process TransformIndices, isObjType IsObjType[Input]) ([]computed.Diff, plans.Action) { - current := plans.NoOp - if before != nil && after == nil { - current = plans.Delete - } - if before == nil && after != nil { - current = plans.Create - } - - var elements []computed.Diff - ProcessSlice(before, after, func(before, after int) { - element := process(before, after) - elements = append(elements, element) - current = CompareActions(current, element.Action) - }, isObjType) - return elements, current -} - -func ProcessSlice[Input any](before, after []Input, process ProcessIndices, isObjType IsObjType[Input]) { - lcs := objchange.LongestCommonSubsequence(before, after, func(before, after Input) bool { - return reflect.DeepEqual(before, after) - }) - - var beforeIx, afterIx, lcsIx int - for beforeIx < len(before) || afterIx < len(after) || lcsIx < len(lcs) { - // Step through all the before values until we hit the next item in the - // longest common subsequence. We are going to just say that all of - // these have been deleted. - for beforeIx < len(before) && (lcsIx >= len(lcs) || !reflect.DeepEqual(before[beforeIx], lcs[lcsIx])) { - isObjectDiff := isObjType(before[beforeIx]) && afterIx < len(after) && isObjType(after[afterIx]) && (lcsIx >= len(lcs) || !reflect.DeepEqual(after[afterIx], lcs[lcsIx])) - if isObjectDiff { - process(beforeIx, afterIx) - beforeIx++ - afterIx++ - continue - } - - process(beforeIx, len(after)) - beforeIx++ - } - - // Now, step through all the after values until hit the next item in the - // LCS. We are going to say that all of these have been created. - for afterIx < len(after) && (lcsIx >= len(lcs) || !reflect.DeepEqual(after[afterIx], lcs[lcsIx])) { - process(len(before), afterIx) - afterIx++ - } - - // Finally, add the item in common as unchanged. - if lcsIx < len(lcs) { - process(beforeIx, afterIx) - beforeIx++ - afterIx++ - lcsIx++ - } - } -} diff --git a/internal/command/jsonformat/computed/diff.go b/internal/command/jsonformat/computed/diff.go deleted file mode 100644 index 6f6980c9f449..000000000000 --- a/internal/command/jsonformat/computed/diff.go +++ /dev/null @@ -1,120 +0,0 @@ -package computed - -import ( - "github.com/mitchellh/colorstring" - - "github.com/hashicorp/terraform/internal/plans" -) - -// Diff captures the computed diff for a single block, element or attribute. -// -// It essentially merges common functionality across all types of changes, -// namely the replace logic and the action / change type. Any remaining -// behaviour can be offloaded to the renderer which will be unique for the -// various change types (eg. maps, objects, lists, blocks, primitives, etc.). -type Diff struct { - // Renderer captures the uncommon functionality across the different kinds - // of changes. Each type of change (lists, blocks, sets, etc.) will have a - // unique renderer. - Renderer DiffRenderer - - // Action is the action described by this change (such as create, delete, - // update, etc.). - Action plans.Action - - // Replace tells the Change that it should add the `# forces replacement` - // suffix. - // - // Every single change could potentially add this suffix, so we embed it in - // the change as common functionality instead of in the specific renderers. - Replace bool -} - -// NewDiff creates a new Diff object with the provided renderer, action and -// replace context. -func NewDiff(renderer DiffRenderer, action plans.Action, replace bool) Diff { - return Diff{ - Renderer: renderer, - Action: action, - Replace: replace, - } -} - -// RenderHuman prints the Change into a human-readable string referencing the -// specified RenderOpts. -// -// If the returned string is a single line, then indent should be ignored. -// -// If the return string is multiple lines, then indent should be used to offset -// the beginning of all lines but the first by the specified amount. -func (diff Diff) RenderHuman(indent int, opts RenderHumanOpts) string { - return diff.Renderer.RenderHuman(diff, indent, opts) -} - -// WarningsHuman returns a list of strings that should be rendered as warnings -// before a given change is rendered. -// -// As with the RenderHuman function, the indent should only be applied on -// multiline warnings and on the second and following lines. -func (diff Diff) WarningsHuman(indent int, opts RenderHumanOpts) []string { - return diff.Renderer.WarningsHuman(diff, indent, opts) -} - -type DiffRenderer interface { - RenderHuman(diff Diff, indent int, opts RenderHumanOpts) string - WarningsHuman(diff Diff, indent int, opts RenderHumanOpts) []string -} - -// RenderHumanOpts contains options that can control how the human render -// function of the DiffRenderer will function. -type RenderHumanOpts struct { - Colorize *colorstring.Colorize - - // OverrideNullSuffix tells the Renderer not to display the `-> null` suffix - // that is normally displayed when an element, attribute, or block is - // deleted. - OverrideNullSuffix bool - - // OverrideForcesReplacement tells the Renderer to display the - // `# forces replacement` suffix, even if a diff doesn't have the Replace - // field set. - // - // Some renderers (like the Set renderer) don't display the suffix - // themselves but force their child diffs to display it instead. - OverrideForcesReplacement bool - - // ShowUnchangedChildren instructs the Renderer to render all children of a - // given complex change, instead of hiding unchanged items and compressing - // them into a single line. - ShowUnchangedChildren bool - - // HideDiffActionSymbols tells the renderer not to show the '+'/'-' symbols - // and to skip the places where the symbols would result in an offset. - HideDiffActionSymbols bool -} - -// NewRenderHumanOpts creates a new RenderHumanOpts struct with the required -// fields set. -func NewRenderHumanOpts(colorize *colorstring.Colorize) RenderHumanOpts { - return RenderHumanOpts{ - Colorize: colorize, - } -} - -// Clone returns a new RenderOpts object, that matches the original but can be -// edited without changing the original. -func (opts RenderHumanOpts) Clone() RenderHumanOpts { - return RenderHumanOpts{ - Colorize: opts.Colorize, - - OverrideNullSuffix: opts.OverrideNullSuffix, - ShowUnchangedChildren: opts.ShowUnchangedChildren, - HideDiffActionSymbols: opts.HideDiffActionSymbols, - - // OverrideForcesReplacement is a special case in that it doesn't - // cascade. So each diff should decide independently whether it's direct - // children should override their internal Replace logic, instead of - // an ancestor making the switch and affecting the entire tree. - OverrideForcesReplacement: false, - } -} diff --git a/internal/command/jsonformat/computed/renderers/block.go b/internal/command/jsonformat/computed/renderers/block.go deleted file mode 100644 index 17f2dbf1dca1..000000000000 --- a/internal/command/jsonformat/computed/renderers/block.go +++ /dev/null @@ -1,181 +0,0 @@ -package renderers - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - - "github.com/hashicorp/terraform/internal/plans" -) - -var ( - _ computed.DiffRenderer = (*blockRenderer)(nil) - - importantAttributes = []string{ - "id", - "name", - "tags", - } -) - -func importantAttribute(attr string) bool { - for _, attribute := range importantAttributes { - if attribute == attr { - return true - } - } - return false -} - -func Block(attributes map[string]computed.Diff, blocks Blocks) computed.DiffRenderer { - return &blockRenderer{ - attributes: attributes, - blocks: blocks, - } -} - -type blockRenderer struct { - NoWarningsRenderer - - attributes map[string]computed.Diff - blocks Blocks -} - -func (renderer blockRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - if len(renderer.attributes) == 0 && len(renderer.blocks.GetAllKeys()) == 0 { - return fmt.Sprintf("{}%s", forcesReplacement(diff.Replace, opts)) - } - - unchangedAttributes := 0 - unchangedBlocks := 0 - - maximumAttributeKeyLen := 0 - var attributeKeys []string - escapedAttributeKeys := make(map[string]string) - for key := range renderer.attributes { - attributeKeys = append(attributeKeys, key) - escapedKey := EnsureValidAttributeName(key) - escapedAttributeKeys[key] = escapedKey - if maximumAttributeKeyLen < len(escapedKey) { - maximumAttributeKeyLen = len(escapedKey) - } - } - sort.Strings(attributeKeys) - - importantAttributeOpts := opts.Clone() - importantAttributeOpts.ShowUnchangedChildren = true - - attributeOpts := opts.Clone() - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(diff.Replace, opts))) - for _, key := range attributeKeys { - attribute := renderer.attributes[key] - if importantAttribute(key) { - - // Always display the important attributes. - for _, warning := range attribute.WarningsHuman(indent+1, importantAttributeOpts) { - buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) - } - buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, importantAttributeOpts), maximumAttributeKeyLen, key, attribute.RenderHuman(indent+1, importantAttributeOpts))) - continue - } - if attribute.Action == plans.NoOp && !opts.ShowUnchangedChildren { - unchangedAttributes++ - continue - } - - for _, warning := range attribute.WarningsHuman(indent+1, opts) { - buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) - } - buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, attributeOpts), maximumAttributeKeyLen, escapedAttributeKeys[key], attribute.RenderHuman(indent+1, attributeOpts))) - } - - if unchangedAttributes > 0 { - buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("attribute", unchangedAttributes, opts))) - } - - blockKeys := renderer.blocks.GetAllKeys() - for _, key := range blockKeys { - - foundChangedBlock := false - renderBlock := func(diff computed.Diff, mapKey string, opts computed.RenderHumanOpts) { - - creatingSensitiveValue := diff.Action == plans.Create && renderer.blocks.AfterSensitiveBlocks[key] - deletingSensitiveValue := diff.Action == plans.Delete && renderer.blocks.BeforeSensitiveBlocks[key] - modifyingSensitiveValue := (diff.Action == plans.Update || diff.Action == plans.NoOp) && (renderer.blocks.AfterSensitiveBlocks[key] || renderer.blocks.BeforeSensitiveBlocks[key]) - - if creatingSensitiveValue || deletingSensitiveValue || modifyingSensitiveValue { - // Intercept the renderer here if the sensitive data was set - // across all the blocks instead of individually. - action := diff.Action - if diff.Action == plans.NoOp && renderer.blocks.BeforeSensitiveBlocks[key] != renderer.blocks.AfterSensitiveBlocks[key] { - action = plans.Update - } - - diff = computed.NewDiff(SensitiveBlock(diff, renderer.blocks.BeforeSensitiveBlocks[key], renderer.blocks.AfterSensitiveBlocks[key]), action, diff.Replace) - } - - if diff.Action == plans.NoOp && !opts.ShowUnchangedChildren { - unchangedBlocks++ - return - } - - if !foundChangedBlock && len(renderer.attributes) > 0 { - // We always want to put an extra new line between the - // attributes and blocks, and between groups of blocks. - buf.WriteString("\n") - foundChangedBlock = true - } - - // If the force replacement metadata was set for every entry in the - // block we need to override that here. Our child blocks will only - // know about the replace function if it was set on them - // specifically, and not if it was set for all the blocks. - blockOpts := opts.Clone() - blockOpts.OverrideForcesReplacement = renderer.blocks.ReplaceBlocks[key] - - for _, warning := range diff.WarningsHuman(indent+1, blockOpts) { - buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) - } - buf.WriteString(fmt.Sprintf("%s%s%s%s %s\n", formatIndent(indent+1), writeDiffActionSymbol(diff.Action, blockOpts), EnsureValidAttributeName(key), mapKey, diff.RenderHuman(indent+1, blockOpts))) - - } - - switch { - case renderer.blocks.IsSingleBlock(key): - renderBlock(renderer.blocks.SingleBlocks[key], "", opts) - case renderer.blocks.IsMapBlock(key): - var keys []string - for key := range renderer.blocks.MapBlocks[key] { - keys = append(keys, key) - } - sort.Strings(keys) - - for _, innerKey := range keys { - renderBlock(renderer.blocks.MapBlocks[key][innerKey], fmt.Sprintf(" %q", innerKey), opts) - } - case renderer.blocks.IsSetBlock(key): - - setOpts := opts.Clone() - setOpts.OverrideForcesReplacement = diff.Replace - - for _, block := range renderer.blocks.SetBlocks[key] { - renderBlock(block, "", opts) - } - case renderer.blocks.IsListBlock(key): - for _, block := range renderer.blocks.ListBlocks[key] { - renderBlock(block, "", opts) - } - } - } - - if unchangedBlocks > 0 { - buf.WriteString(fmt.Sprintf("\n%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("block", unchangedBlocks, opts))) - } - - buf.WriteString(fmt.Sprintf("%s%s}", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts))) - return buf.String() -} diff --git a/internal/command/jsonformat/computed/renderers/list.go b/internal/command/jsonformat/computed/renderers/list.go deleted file mode 100644 index 0b79ce026bf7..000000000000 --- a/internal/command/jsonformat/computed/renderers/list.go +++ /dev/null @@ -1,124 +0,0 @@ -package renderers - -import ( - "bytes" - "fmt" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" -) - -var _ computed.DiffRenderer = (*listRenderer)(nil) - -func List(elements []computed.Diff) computed.DiffRenderer { - return &listRenderer{ - displayContext: true, - elements: elements, - } -} - -func NestedList(elements []computed.Diff) computed.DiffRenderer { - return &listRenderer{ - elements: elements, - } -} - -type listRenderer struct { - NoWarningsRenderer - - displayContext bool - elements []computed.Diff -} - -func (renderer listRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - if len(renderer.elements) == 0 { - return fmt.Sprintf("[]%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) - } - - elementOpts := opts.Clone() - elementOpts.OverrideNullSuffix = true - - unchangedElementOpts := opts.Clone() - unchangedElementOpts.ShowUnchangedChildren = true - - var unchangedElements []computed.Diff - - // renderNext tells the renderer to print out the next element in the list - // whatever state it is in. So, even if a change is a NoOp we will still - // print it out if the last change we processed wants us to. - renderNext := false - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("[%s\n", forcesReplacement(diff.Replace, opts))) - for _, element := range renderer.elements { - if element.Action == plans.NoOp && !renderNext && !opts.ShowUnchangedChildren { - unchangedElements = append(unchangedElements, element) - continue - } - renderNext = false - - opts := elementOpts - - // If we want to display the context around this change, we want to - // render the change immediately before this change in the list, and the - // change immediately after in the list, even if both these changes are - // NoOps. This will give the user reading the diff some context as to - // where in the list these changes are being made, as order matters. - if renderer.displayContext { - // If our list of unchanged elements contains more than one entry - // we'll print out a count of the number of unchanged elements that - // we skipped. Note, this is the length of the unchanged elements - // minus 1 as the most recent unchanged element will be printed out - // in full. - if len(unchangedElements) > 1 { - buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", len(unchangedElements)-1, opts))) - } - // If our list of unchanged elements contains at least one entry, - // we're going to print out the most recent change in full. That's - // what happens here. - if len(unchangedElements) > 0 { - lastElement := unchangedElements[len(unchangedElements)-1] - buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(lastElement.Action, unchangedElementOpts), lastElement.RenderHuman(indent+1, unchangedElementOpts))) - } - // We now reset the unchanged elements list, we've printed out a - // count of all the elements we skipped so we start counting from - // scratch again. This means that if we process a run of changed - // elements, they won't all start printing out summaries of every - // change that happened previously. - unchangedElements = nil - - if element.Action == plans.NoOp { - // If this is a NoOp action then we're going to render it below - // so we need to just override the opts we're going to use to - // make sure we use the unchanged opts. - opts = unchangedElementOpts - } else { - // As we also want to render the element immediately after any - // changes, we make a note here to say we should render the next - // change whatever it is. But, we only want to render the next - // change if the current change isn't a NoOp. If the current change - // is a NoOp then it was told to print by the last change and we - // don't want to cascade and print all changes from now on. - renderNext = true - } - } - - for _, warning := range element.WarningsHuman(indent+1, opts) { - buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) - } - buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, opts), element.RenderHuman(indent+1, opts))) - } - - // If we were not displaying any context alongside our changes then the - // unchangedElements list will contain every unchanged element, and we'll - // print that out as we do with every other collection. - // - // If we were displaying context, then this will contain any unchanged - // elements since our last change, so we should also print it out. - if len(unchangedElements) > 0 { - buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", len(unchangedElements), opts))) - } - - buf.WriteString(fmt.Sprintf("%s%s]%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) - return buf.String() -} diff --git a/internal/command/jsonformat/computed/renderers/map.go b/internal/command/jsonformat/computed/renderers/map.go deleted file mode 100644 index 10ad997464d5..000000000000 --- a/internal/command/jsonformat/computed/renderers/map.go +++ /dev/null @@ -1,107 +0,0 @@ -package renderers - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - - "github.com/hashicorp/terraform/internal/plans" -) - -var _ computed.DiffRenderer = (*mapRenderer)(nil) - -func Map(elements map[string]computed.Diff) computed.DiffRenderer { - return &mapRenderer{ - elements: elements, - alignKeys: true, - } -} - -func NestedMap(elements map[string]computed.Diff) computed.DiffRenderer { - return &mapRenderer{ - elements: elements, - overrideNullSuffix: true, - overrideForcesReplacement: true, - } -} - -type mapRenderer struct { - NoWarningsRenderer - - elements map[string]computed.Diff - - overrideNullSuffix bool - overrideForcesReplacement bool - alignKeys bool -} - -func (renderer mapRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - forcesReplacementSelf := diff.Replace && !renderer.overrideForcesReplacement - forcesReplacementChildren := diff.Replace && renderer.overrideForcesReplacement - - if len(renderer.elements) == 0 { - return fmt.Sprintf("{}%s%s", nullSuffix(diff.Action, opts), forcesReplacement(forcesReplacementSelf, opts)) - } - - // Sort the map elements by key, so we have a deterministic ordering in - // the output. - var keys []string - - // We need to make sure the keys are capable of rendering properly. - escapedKeys := make(map[string]string) - - maximumKeyLen := 0 - for key := range renderer.elements { - keys = append(keys, key) - - escapedKey := hclEscapeString(key) - escapedKeys[key] = escapedKey - if maximumKeyLen < len(escapedKey) { - maximumKeyLen = len(escapedKey) - } - } - sort.Strings(keys) - - unchangedElements := 0 - - elementOpts := opts.Clone() - elementOpts.OverrideNullSuffix = diff.Action == plans.Delete || renderer.overrideNullSuffix - elementOpts.OverrideForcesReplacement = forcesReplacementChildren - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(forcesReplacementSelf, opts))) - for _, key := range keys { - element := renderer.elements[key] - - if element.Action == plans.NoOp && !opts.ShowUnchangedChildren { - // Don't render NoOp operations when we are compact display. - unchangedElements++ - continue - } - - for _, warning := range element.WarningsHuman(indent+1, opts) { - buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) - } - // Only show commas between elements for objects. - comma := "" - if _, ok := element.Renderer.(*objectRenderer); ok { - comma = "," - } - - if renderer.alignKeys { - buf.WriteString(fmt.Sprintf("%s%s%-*s = %s%s\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), maximumKeyLen, escapedKeys[key], element.RenderHuman(indent+1, elementOpts), comma)) - } else { - buf.WriteString(fmt.Sprintf("%s%s%s = %s%s\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), escapedKeys[key], element.RenderHuman(indent+1, elementOpts), comma)) - } - - } - - if unchangedElements > 0 { - buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", unchangedElements, opts))) - } - - buf.WriteString(fmt.Sprintf("%s%s}%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) - return buf.String() -} diff --git a/internal/command/jsonformat/computed/renderers/object.go b/internal/command/jsonformat/computed/renderers/object.go deleted file mode 100644 index 7cf9710fb550..000000000000 --- a/internal/command/jsonformat/computed/renderers/object.go +++ /dev/null @@ -1,95 +0,0 @@ -package renderers - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" -) - -var _ computed.DiffRenderer = (*objectRenderer)(nil) - -func Object(attributes map[string]computed.Diff) computed.DiffRenderer { - return &objectRenderer{ - attributes: attributes, - overrideNullSuffix: true, - } -} - -func NestedObject(attributes map[string]computed.Diff) computed.DiffRenderer { - return &objectRenderer{ - attributes: attributes, - overrideNullSuffix: false, - } -} - -type objectRenderer struct { - NoWarningsRenderer - - attributes map[string]computed.Diff - overrideNullSuffix bool -} - -func (renderer objectRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - if len(renderer.attributes) == 0 { - return fmt.Sprintf("{}%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) - } - - attributeOpts := opts.Clone() - attributeOpts.OverrideNullSuffix = renderer.overrideNullSuffix - - // We need to keep track of our keys in two ways. The first is the order in - // which we will display them. The second is a mapping to their safely - // escaped equivalent. - - maximumKeyLen := 0 - var keys []string - escapedKeys := make(map[string]string) - for key := range renderer.attributes { - keys = append(keys, key) - escapedKey := EnsureValidAttributeName(key) - escapedKeys[key] = escapedKey - if maximumKeyLen < len(escapedKey) { - maximumKeyLen = len(escapedKey) - } - } - sort.Strings(keys) - - unchangedAttributes := 0 - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(diff.Replace, opts))) - for _, key := range keys { - attribute := renderer.attributes[key] - - if importantAttribute(key) { - importantAttributeOpts := attributeOpts.Clone() - importantAttributeOpts.ShowUnchangedChildren = true - - for _, warning := range attribute.WarningsHuman(indent+1, importantAttributeOpts) { - buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) - } - buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, importantAttributeOpts), maximumKeyLen, escapedKeys[key], attribute.RenderHuman(indent+1, importantAttributeOpts))) - continue - } - - if attribute.Action == plans.NoOp && !opts.ShowUnchangedChildren { - // Don't render NoOp operations when we are compact display. - unchangedAttributes++ - continue - } - - for _, warning := range attribute.WarningsHuman(indent+1, opts) { - buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) - } - buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, attributeOpts), maximumKeyLen, escapedKeys[key], attribute.RenderHuman(indent+1, attributeOpts))) - } - - if unchangedAttributes > 0 { - buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("attribute", unchangedAttributes, opts))) - } - - buf.WriteString(fmt.Sprintf("%s%s}%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) - return buf.String() -} diff --git a/internal/command/jsonformat/computed/renderers/primitive.go b/internal/command/jsonformat/computed/renderers/primitive.go deleted file mode 100644 index efc273692e91..000000000000 --- a/internal/command/jsonformat/computed/renderers/primitive.go +++ /dev/null @@ -1,239 +0,0 @@ -package renderers - -import ( - "fmt" - "math/big" - "strings" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/collections" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" - "github.com/hashicorp/terraform/internal/plans" -) - -var _ computed.DiffRenderer = (*primitiveRenderer)(nil) - -func Primitive(before, after interface{}, ctype cty.Type) computed.DiffRenderer { - return &primitiveRenderer{ - before: before, - after: after, - ctype: ctype, - } -} - -type primitiveRenderer struct { - NoWarningsRenderer - - before interface{} - after interface{} - ctype cty.Type -} - -func (renderer primitiveRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - if renderer.ctype == cty.String { - return renderer.renderStringDiff(diff, indent, opts) - } - - beforeValue := renderPrimitiveValue(renderer.before, renderer.ctype, opts) - afterValue := renderPrimitiveValue(renderer.after, renderer.ctype, opts) - - switch diff.Action { - case plans.Create: - return fmt.Sprintf("%s%s", afterValue, forcesReplacement(diff.Replace, opts)) - case plans.Delete: - return fmt.Sprintf("%s%s%s", beforeValue, nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) - case plans.NoOp: - return fmt.Sprintf("%s%s", beforeValue, forcesReplacement(diff.Replace, opts)) - default: - return fmt.Sprintf("%s %s %s%s", beforeValue, opts.Colorize.Color("[yellow]->[reset]"), afterValue, forcesReplacement(diff.Replace, opts)) - } -} - -func renderPrimitiveValue(value interface{}, t cty.Type, opts computed.RenderHumanOpts) string { - if value == nil { - return opts.Colorize.Color("[dark_gray]null[reset]") - } - - switch { - case t == cty.Bool: - if value.(bool) { - return "true" - } - return "false" - case t == cty.Number: - bf := big.NewFloat(value.(float64)) - return bf.Text('f', -1) - default: - panic("unrecognized primitive type: " + t.FriendlyName()) - } -} - -func (renderer primitiveRenderer) renderStringDiff(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - - // We process multiline strings at the end of the switch statement. - var lines []string - - switch diff.Action { - case plans.Create, plans.NoOp: - str := evaluatePrimitiveString(renderer.after, opts) - - if str.Json != nil { - if diff.Action == plans.NoOp { - return renderer.renderStringDiffAsJson(diff, indent, opts, str, str) - } else { - return renderer.renderStringDiffAsJson(diff, indent, opts, evaluatedString{}, str) - } - } - - if !str.IsMultiline { - return fmt.Sprintf("%s%s", str.RenderSimple(), forcesReplacement(diff.Replace, opts)) - } - - // We are creating a single multiline string, so let's split by the new - // line character. While we are doing this, we are going to insert our - // indents and make sure each line is formatted correctly. - lines = strings.Split(strings.ReplaceAll(str.String, "\n", fmt.Sprintf("\n%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts))), "\n") - - // We now just need to do the same for the first entry in lines, because - // we split on the new line characters which won't have been at the - // beginning of the first line. - lines[0] = fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), lines[0]) - case plans.Delete: - str := evaluatePrimitiveString(renderer.before, opts) - if str.IsNull { - // We don't put the null suffix (-> null) here because the final - // render or null -> null would look silly. - return fmt.Sprintf("%s%s", str.RenderSimple(), forcesReplacement(diff.Replace, opts)) - } - - if str.Json != nil { - return renderer.renderStringDiffAsJson(diff, indent, opts, str, evaluatedString{}) - } - - if !str.IsMultiline { - return fmt.Sprintf("%s%s%s", str.RenderSimple(), nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) - } - - // We are creating a single multiline string, so let's split by the new - // line character. While we are doing this, we are going to insert our - // indents and make sure each line is formatted correctly. - lines = strings.Split(strings.ReplaceAll(str.String, "\n", fmt.Sprintf("\n%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts))), "\n") - - // We now just need to do the same for the first entry in lines, because - // we split on the new line characters which won't have been at the - // beginning of the first line. - lines[0] = fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), lines[0]) - default: - beforeString := evaluatePrimitiveString(renderer.before, opts) - afterString := evaluatePrimitiveString(renderer.after, opts) - - if beforeString.Json != nil && afterString.Json != nil { - return renderer.renderStringDiffAsJson(diff, indent, opts, beforeString, afterString) - } - - if beforeString.Json != nil || afterString.Json != nil { - // This means one of the strings is JSON and one isn't. We're going - // to be a little inefficient here, but we can just reuse another - // renderer for this so let's keep it simple. - return computed.NewDiff( - TypeChange( - computed.NewDiff(Primitive(renderer.before, nil, cty.String), plans.Delete, false), - computed.NewDiff(Primitive(nil, renderer.after, cty.String), plans.Create, false)), - diff.Action, - diff.Replace).RenderHuman(indent, opts) - } - - if !beforeString.IsMultiline && !afterString.IsMultiline { - return fmt.Sprintf("%s %s %s%s", beforeString.RenderSimple(), opts.Colorize.Color("[yellow]->[reset]"), afterString.RenderSimple(), forcesReplacement(diff.Replace, opts)) - } - - beforeLines := strings.Split(beforeString.String, "\n") - afterLines := strings.Split(afterString.String, "\n") - - processIndices := func(beforeIx, afterIx int) { - if beforeIx < 0 || beforeIx >= len(beforeLines) { - lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.Create, opts), afterLines[afterIx])) - return - } - - if afterIx < 0 || afterIx >= len(afterLines) { - lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.Delete, opts), beforeLines[beforeIx])) - return - } - - lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), beforeLines[beforeIx])) - } - isObjType := func(_ string) bool { - return false - } - - collections.ProcessSlice(beforeLines, afterLines, processIndices, isObjType) - } - - // We return early if we find non-multiline strings or JSON strings, so we - // know here that we just render the lines slice properly. - return fmt.Sprintf("<<-EOT%s\n%s\n%s%sEOT%s", - forcesReplacement(diff.Replace, opts), - strings.Join(lines, "\n"), - formatIndent(indent), - writeDiffActionSymbol(plans.NoOp, opts), - nullSuffix(diff.Action, opts)) -} - -func (renderer primitiveRenderer) renderStringDiffAsJson(diff computed.Diff, indent int, opts computed.RenderHumanOpts, before evaluatedString, after evaluatedString) string { - jsonDiff := RendererJsonOpts().Transform(structured.Change{ - BeforeExplicit: diff.Action != plans.Create, - AfterExplicit: diff.Action != plans.Delete, - Before: before.Json, - After: after.Json, - Unknown: false, - BeforeSensitive: false, - AfterSensitive: false, - ReplacePaths: attribute_path.Empty(false), - RelevantAttributes: attribute_path.AlwaysMatcher(), - }) - - action := diff.Action - - jsonOpts := opts.Clone() - jsonOpts.OverrideNullSuffix = true - - var whitespace, replace string - if jsonDiff.Action == plans.NoOp && diff.Action == plans.Update { - // Then this means we are rendering a whitespace only change. The JSON - // differ will have ignored the whitespace changes so that makes the - // diff we are about to print out very confusing without extra - // explanation. - if diff.Replace { - whitespace = " # whitespace changes force replacement" - } else { - whitespace = " # whitespace changes" - } - - // Because we'd be showing no changes otherwise: - jsonOpts.ShowUnchangedChildren = true - - // Whitespace changes should not appear as if edited. - action = plans.NoOp - } else { - // We only show the replace suffix if we didn't print something out - // about whitespace changes. - replace = forcesReplacement(diff.Replace, opts) - } - - renderedJsonDiff := jsonDiff.RenderHuman(indent+1, jsonOpts) - - if diff.Action == plans.Create || diff.Action == plans.Delete { - // We don't display the '+' or '-' symbols on the JSON diffs, we should - // still display the '~' for an update action though. - action = plans.NoOp - } - - if strings.Contains(renderedJsonDiff, "\n") { - return fmt.Sprintf("jsonencode(%s\n%s%s%s%s\n%s%s)%s", whitespace, formatIndent(indent+1), writeDiffActionSymbol(action, opts), renderedJsonDiff, replace, formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts)) - } - return fmt.Sprintf("jsonencode(%s)%s%s", renderedJsonDiff, whitespace, replace) -} diff --git a/internal/command/jsonformat/computed/renderers/sensitive.go b/internal/command/jsonformat/computed/renderers/sensitive.go deleted file mode 100644 index cb0b9e7f1dc0..000000000000 --- a/internal/command/jsonformat/computed/renderers/sensitive.go +++ /dev/null @@ -1,50 +0,0 @@ -package renderers - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" -) - -var _ computed.DiffRenderer = (*sensitiveRenderer)(nil) - -func Sensitive(change computed.Diff, beforeSensitive, afterSensitive bool) computed.DiffRenderer { - return &sensitiveRenderer{ - inner: change, - beforeSensitive: beforeSensitive, - afterSensitive: afterSensitive, - } -} - -type sensitiveRenderer struct { - inner computed.Diff - - beforeSensitive bool - afterSensitive bool -} - -func (renderer sensitiveRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - return fmt.Sprintf("(sensitive value)%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) -} - -func (renderer sensitiveRenderer) WarningsHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) []string { - if (renderer.beforeSensitive == renderer.afterSensitive) || renderer.inner.Action == plans.Create || renderer.inner.Action == plans.Delete { - // Only display warnings for sensitive values if they are changing from - // being sensitive or to being sensitive and if they are not being - // destroyed or created. - return []string{} - } - - var warning string - if renderer.beforeSensitive { - warning = opts.Colorize.Color(fmt.Sprintf(" # [yellow]Warning[reset]: this attribute value will no longer be marked as sensitive\n%s # after applying this change.", formatIndent(indent))) - } else { - warning = opts.Colorize.Color(fmt.Sprintf(" # [yellow]Warning[reset]: this attribute value will be marked as sensitive and will not\n%s # display in UI output after applying this change.", formatIndent(indent))) - } - - if renderer.inner.Action == plans.NoOp { - return []string{fmt.Sprintf("%s The value is unchanged.", warning)} - } - return []string{warning} -} diff --git a/internal/command/jsonformat/computed/renderers/set.go b/internal/command/jsonformat/computed/renderers/set.go deleted file mode 100644 index 8848fbc15ada..000000000000 --- a/internal/command/jsonformat/computed/renderers/set.go +++ /dev/null @@ -1,72 +0,0 @@ -package renderers - -import ( - "bytes" - "fmt" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" -) - -var _ computed.DiffRenderer = (*setRenderer)(nil) - -func Set(elements []computed.Diff) computed.DiffRenderer { - return &setRenderer{ - elements: elements, - } -} - -func NestedSet(elements []computed.Diff) computed.DiffRenderer { - return &setRenderer{ - elements: elements, - overrideForcesReplacement: true, - } -} - -type setRenderer struct { - NoWarningsRenderer - - elements []computed.Diff - - overrideForcesReplacement bool -} - -func (renderer setRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - // Sets are a bit finicky, nested sets don't render the forces replacement - // suffix themselves, but push it onto their children. So if we are - // overriding the forces replacement setting, we set it to true for children - // and false for ourselves. - displayForcesReplacementInSelf := diff.Replace && !renderer.overrideForcesReplacement - displayForcesReplacementInChildren := diff.Replace && renderer.overrideForcesReplacement - - if len(renderer.elements) == 0 { - return fmt.Sprintf("[]%s%s", nullSuffix(diff.Action, opts), forcesReplacement(displayForcesReplacementInSelf, opts)) - } - - elementOpts := opts.Clone() - elementOpts.OverrideNullSuffix = true - elementOpts.OverrideForcesReplacement = displayForcesReplacementInChildren - - unchangedElements := 0 - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("[%s\n", forcesReplacement(displayForcesReplacementInSelf, opts))) - for _, element := range renderer.elements { - if element.Action == plans.NoOp && !opts.ShowUnchangedChildren { - unchangedElements++ - continue - } - - for _, warning := range element.WarningsHuman(indent+1, opts) { - buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) - } - buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), element.RenderHuman(indent+1, elementOpts))) - } - - if unchangedElements > 0 { - buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", unchangedElements, opts))) - } - - buf.WriteString(fmt.Sprintf("%s%s]%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) - return buf.String() -} diff --git a/internal/command/jsonformat/computed/renderers/testing.go b/internal/command/jsonformat/computed/renderers/testing.go deleted file mode 100644 index 4adbcc2ee7c2..000000000000 --- a/internal/command/jsonformat/computed/renderers/testing.go +++ /dev/null @@ -1,318 +0,0 @@ -package renderers - -import ( - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" -) - -type ValidateDiffFunction func(t *testing.T, diff computed.Diff) - -func validateDiff(t *testing.T, diff computed.Diff, expectedAction plans.Action, expectedReplace bool) { - if diff.Replace != expectedReplace || diff.Action != expectedAction { - t.Errorf("\nreplace:\n\texpected:%t\n\tactual:%t\naction:\n\texpected:%s\n\tactual:%s", expectedReplace, diff.Replace, expectedAction, diff.Action) - } -} - -func ValidatePrimitive(before, after interface{}, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - primitive, ok := diff.Renderer.(*primitiveRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - beforeDiff := cmp.Diff(primitive.before, before) - afterDiff := cmp.Diff(primitive.after, after) - - if len(beforeDiff) > 0 || len(afterDiff) > 0 { - t.Errorf("before diff: (%s), after diff: (%s)", beforeDiff, afterDiff) - } - } -} - -func ValidateObject(attributes map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - object, ok := diff.Renderer.(*objectRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - if !object.overrideNullSuffix { - t.Errorf("created the wrong type of object renderer") - } - - validateMapType(t, object.attributes, attributes) - } -} - -func ValidateNestedObject(attributes map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - object, ok := diff.Renderer.(*objectRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - if object.overrideNullSuffix { - t.Errorf("created the wrong type of object renderer") - } - - validateMapType(t, object.attributes, attributes) - } -} - -func ValidateMap(elements map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - m, ok := diff.Renderer.(*mapRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - validateMapType(t, m.elements, elements) - } -} - -func validateMapType(t *testing.T, actual map[string]computed.Diff, expected map[string]ValidateDiffFunction) { - validateKeys(t, actual, expected) - - for key, expected := range expected { - if actual, ok := actual[key]; ok { - expected(t, actual) - } - } -} - -func validateKeys[C, V any](t *testing.T, actual map[string]C, expected map[string]V) { - if len(actual) != len(expected) { - - var actualAttributes []string - var expectedAttributes []string - - for key := range actual { - actualAttributes = append(actualAttributes, key) - } - for key := range expected { - expectedAttributes = append(expectedAttributes, key) - } - - sort.Strings(actualAttributes) - sort.Strings(expectedAttributes) - - if diff := cmp.Diff(actualAttributes, expectedAttributes); len(diff) > 0 { - t.Errorf("actual and expected attributes did not match: %s", diff) - } - } -} - -func ValidateList(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - list, ok := diff.Renderer.(*listRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - if !list.displayContext { - t.Errorf("created the wrong type of list renderer") - } - - validateSliceType(t, list.elements, elements) - } -} - -func ValidateNestedList(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - list, ok := diff.Renderer.(*listRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - if list.displayContext { - t.Errorf("created the wrong type of list renderer") - } - - validateSliceType(t, list.elements, elements) - } -} - -func ValidateSet(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - set, ok := diff.Renderer.(*setRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - validateSliceType(t, set.elements, elements) - } -} - -func validateSliceType(t *testing.T, actual []computed.Diff, expected []ValidateDiffFunction) { - if len(actual) != len(expected) { - t.Errorf("expected %d elements but found %d elements", len(expected), len(actual)) - return - } - - for ix := 0; ix < len(expected); ix++ { - expected[ix](t, actual[ix]) - } -} - -func ValidateBlock( - attributes map[string]ValidateDiffFunction, - singleBlocks map[string]ValidateDiffFunction, - listBlocks map[string][]ValidateDiffFunction, - mapBlocks map[string]map[string]ValidateDiffFunction, - setBlocks map[string][]ValidateDiffFunction, - action plans.Action, - replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - block, ok := diff.Renderer.(*blockRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - validateKeys(t, block.attributes, attributes) - validateKeys(t, block.blocks.SingleBlocks, singleBlocks) - validateKeys(t, block.blocks.ListBlocks, listBlocks) - validateKeys(t, block.blocks.MapBlocks, mapBlocks) - validateKeys(t, block.blocks.SetBlocks, setBlocks) - - for key, expected := range attributes { - if actual, ok := block.attributes[key]; ok { - expected(t, actual) - } - } - - for key, expected := range singleBlocks { - expected(t, block.blocks.SingleBlocks[key]) - } - - for key, expected := range listBlocks { - if actual, ok := block.blocks.ListBlocks[key]; ok { - if len(actual) != len(expected) { - t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) - } - for ix := range expected { - expected[ix](t, actual[ix]) - } - } - } - - for key, expected := range setBlocks { - if actual, ok := block.blocks.SetBlocks[key]; ok { - if len(actual) != len(expected) { - t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) - } - for ix := range expected { - expected[ix](t, actual[ix]) - } - } - } - - for key, expected := range setBlocks { - if actual, ok := block.blocks.SetBlocks[key]; ok { - if len(actual) != len(expected) { - t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) - } - for ix := range expected { - expected[ix](t, actual[ix]) - } - } - } - - for key, expected := range mapBlocks { - if actual, ok := block.blocks.MapBlocks[key]; ok { - if len(actual) != len(expected) { - t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) - } - for dKey := range expected { - expected[dKey](t, actual[dKey]) - } - } - } - } -} - -func ValidateTypeChange(before, after ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - typeChange, ok := diff.Renderer.(*typeChangeRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - before(t, typeChange.before) - after(t, typeChange.after) - } -} - -func ValidateSensitive(inner ValidateDiffFunction, beforeSensitive, afterSensitive bool, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - sensitive, ok := diff.Renderer.(*sensitiveRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - if beforeSensitive != sensitive.beforeSensitive || afterSensitive != sensitive.afterSensitive { - t.Errorf("before or after sensitive values don't match:\n\texpected; before: %t after: %t\n\tactual; before: %t, after: %t", beforeSensitive, afterSensitive, sensitive.beforeSensitive, sensitive.afterSensitive) - } - - inner(t, sensitive.inner) - } -} - -func ValidateUnknown(before ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { - return func(t *testing.T, diff computed.Diff) { - validateDiff(t, diff, action, replace) - - unknown, ok := diff.Renderer.(*unknownRenderer) - if !ok { - t.Errorf("invalid renderer type: %T", diff.Renderer) - return - } - - if before == nil { - if unknown.before.Renderer != nil { - t.Errorf("did not expect a before renderer, but found one") - } - return - } - - if unknown.before.Renderer == nil { - t.Errorf("expected a before renderer, but found none") - } - - before(t, unknown.before) - } -} diff --git a/internal/command/jsonformat/computed/renderers/unknown.go b/internal/command/jsonformat/computed/renderers/unknown.go deleted file mode 100644 index f24b836a032e..000000000000 --- a/internal/command/jsonformat/computed/renderers/unknown.go +++ /dev/null @@ -1,33 +0,0 @@ -package renderers - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - - "github.com/hashicorp/terraform/internal/plans" -) - -var _ computed.DiffRenderer = (*unknownRenderer)(nil) - -func Unknown(before computed.Diff) computed.DiffRenderer { - return &unknownRenderer{ - before: before, - } -} - -type unknownRenderer struct { - NoWarningsRenderer - - before computed.Diff -} - -func (renderer unknownRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { - if diff.Action == plans.Create { - return fmt.Sprintf("(known after apply)%s", forcesReplacement(diff.Replace, opts)) - } - - // Never render null suffix for children of unknown changes. - opts.OverrideNullSuffix = true - return fmt.Sprintf("%s -> (known after apply)%s", renderer.before.RenderHuman(indent, opts), forcesReplacement(diff.Replace, opts)) -} diff --git a/internal/command/jsonformat/computed/renderers/util.go b/internal/command/jsonformat/computed/renderers/util.go deleted file mode 100644 index bb29acccbbc4..000000000000 --- a/internal/command/jsonformat/computed/renderers/util.go +++ /dev/null @@ -1,90 +0,0 @@ -package renderers - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/command/format" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" -) - -// NoWarningsRenderer defines a Warnings function that returns an empty list of -// warnings. This can be used by other renderers to ensure we don't see lots of -// repeats of this empty function. -type NoWarningsRenderer struct{} - -// WarningsHuman returns an empty slice, as the name NoWarningsRenderer suggests. -func (render NoWarningsRenderer) WarningsHuman(_ computed.Diff, _ int, _ computed.RenderHumanOpts) []string { - return nil -} - -// nullSuffix returns the `-> null` suffix if the change is a delete action, and -// it has not been overridden. -func nullSuffix(action plans.Action, opts computed.RenderHumanOpts) string { - if !opts.OverrideNullSuffix && action == plans.Delete { - return opts.Colorize.Color(" [dark_gray]-> null[reset]") - } - return "" -} - -// forcesReplacement returns the `# forces replacement` suffix if this change is -// driving the entire resource to be replaced. -func forcesReplacement(replace bool, opts computed.RenderHumanOpts) string { - if replace || opts.OverrideForcesReplacement { - return opts.Colorize.Color(" [red]# forces replacement[reset]") - } - return "" -} - -// indent returns whitespace that is the required length for the specified -// indent. -func formatIndent(indent int) string { - return strings.Repeat(" ", indent) -} - -// unchanged prints out a description saying how many of 'keyword' have been -// hidden because they are unchanged or noop actions. -func unchanged(keyword string, count int, opts computed.RenderHumanOpts) string { - if count == 1 { - return opts.Colorize.Color(fmt.Sprintf("[dark_gray]# (%d unchanged %s hidden)[reset]", count, keyword)) - } - return opts.Colorize.Color(fmt.Sprintf("[dark_gray]# (%d unchanged %ss hidden)[reset]", count, keyword)) -} - -// EnsureValidAttributeName checks if `name` contains any HCL syntax and calls -// and returns hclEscapeString. -func EnsureValidAttributeName(name string) string { - if !hclsyntax.ValidIdentifier(name) { - return hclEscapeString(name) - } - return name -} - -// hclEscapeString formats the input string into a format that is safe for -// rendering within HCL. -// -// Note, this function doesn't actually do a very good job of this currently. We -// need to expose some internal functions from HCL in a future version and call -// them from here. For now, just use "%q" formatting. -func hclEscapeString(str string) string { - // TODO: Replace this with more complete HCL logic instead of the simple - // go workaround. - return fmt.Sprintf("%q", str) -} - -// writeDiffActionSymbol writes out the symbols for the associated action, and -// handles localized colorization of the symbol as well as indenting the symbol -// to be 4 spaces wide. -// -// If the opts has HideDiffActionSymbols set then this function returns an empty -// string. -func writeDiffActionSymbol(action plans.Action, opts computed.RenderHumanOpts) string { - if opts.HideDiffActionSymbols { - return "" - } - return fmt.Sprintf("%s ", opts.Colorize.Color(format.DiffActionSymbol(action))) -} diff --git a/internal/command/jsonformat/diff.go b/internal/command/jsonformat/diff.go deleted file mode 100644 index fc8b60009001..000000000000 --- a/internal/command/jsonformat/diff.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsonformat - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/differ" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" - "github.com/hashicorp/terraform/internal/command/jsonplan" - "github.com/hashicorp/terraform/internal/plans" -) - -func precomputeDiffs(plan Plan, mode plans.Mode) diffs { - diffs := diffs{ - outputs: make(map[string]computed.Diff), - } - - for _, drift := range plan.ResourceDrift { - - var relevantAttrs attribute_path.Matcher - if mode == plans.RefreshOnlyMode { - // For a refresh only plan, we show all the drift. - relevantAttrs = attribute_path.AlwaysMatcher() - } else { - matcher := attribute_path.Empty(true) - - // Otherwise we only want to show the drift changes that are - // relevant. - for _, attr := range plan.RelevantAttributes { - if len(attr.Resource) == 0 || attr.Resource == drift.Address { - matcher = attribute_path.AppendSingle(matcher, attr.Attr) - } - } - - if len(matcher.Paths) > 0 { - relevantAttrs = matcher - } - } - - if relevantAttrs == nil { - // If we couldn't build a relevant attribute matcher, then we are - // not going to show anything for this drift. - continue - } - - schema := plan.getSchema(drift) - change := structured.FromJsonChange(drift.Change, relevantAttrs) - diffs.drift = append(diffs.drift, diff{ - change: drift, - diff: differ.ComputeDiffForBlock(change, schema.Block), - }) - } - - for _, change := range plan.ResourceChanges { - schema := plan.getSchema(change) - structuredChange := structured.FromJsonChange(change.Change, attribute_path.AlwaysMatcher()) - diffs.changes = append(diffs.changes, diff{ - change: change, - diff: differ.ComputeDiffForBlock(structuredChange, schema.Block), - }) - } - - for key, output := range plan.OutputChanges { - change := structured.FromJsonChange(output, attribute_path.AlwaysMatcher()) - diffs.outputs[key] = differ.ComputeDiffForOutput(change) - } - - return diffs -} - -type diffs struct { - drift []diff - changes []diff - outputs map[string]computed.Diff -} - -func (d diffs) Empty() bool { - for _, change := range d.changes { - if change.diff.Action != plans.NoOp || change.Moved() { - return false - } - } - - for _, output := range d.outputs { - if output.Action != plans.NoOp { - return false - } - } - - return true -} - -type diff struct { - change jsonplan.ResourceChange - diff computed.Diff -} - -func (d diff) Moved() bool { - return len(d.change.PreviousAddress) > 0 && d.change.PreviousAddress != d.change.Address -} diff --git a/internal/command/jsonformat/differ/attribute.go b/internal/command/jsonformat/differ/attribute.go deleted file mode 100644 index e1c913ddc769..000000000000 --- a/internal/command/jsonformat/differ/attribute.go +++ /dev/null @@ -1,84 +0,0 @@ -package differ - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - - "github.com/hashicorp/terraform/internal/command/jsonprovider" -) - -func ComputeDiffForAttribute(change structured.Change, attribute *jsonprovider.Attribute) computed.Diff { - if attribute.AttributeNestedType != nil { - return computeDiffForNestedAttribute(change, attribute.AttributeNestedType) - } - return ComputeDiffForType(change, unmarshalAttribute(attribute)) -} - -func computeDiffForNestedAttribute(change structured.Change, nested *jsonprovider.NestedType) computed.Diff { - if sensitive, ok := checkForSensitiveNestedAttribute(change, nested); ok { - return sensitive - } - - if computed, ok := checkForUnknownNestedAttribute(change, nested); ok { - return computed - } - - switch NestingMode(nested.NestingMode) { - case nestingModeSingle, nestingModeGroup: - return computeAttributeDiffAsNestedObject(change, nested.Attributes) - case nestingModeMap: - return computeAttributeDiffAsNestedMap(change, nested.Attributes) - case nestingModeList: - return computeAttributeDiffAsNestedList(change, nested.Attributes) - case nestingModeSet: - return computeAttributeDiffAsNestedSet(change, nested.Attributes) - default: - panic("unrecognized nesting mode: " + nested.NestingMode) - } -} - -func ComputeDiffForType(change structured.Change, ctype cty.Type) computed.Diff { - if sensitive, ok := checkForSensitiveType(change, ctype); ok { - return sensitive - } - - if computed, ok := checkForUnknownType(change, ctype); ok { - return computed - } - - switch { - case ctype == cty.NilType, ctype == cty.DynamicPseudoType: - // Forward nil or dynamic types over to be processed as outputs. - // There is nothing particularly special about the way outputs are - // processed that make this unsafe, we could just as easily call this - // function computeChangeForDynamicValues(), but external callers will - // only be in this situation when processing outputs so this function - // is named for their benefit. - return ComputeDiffForOutput(change) - case ctype.IsPrimitiveType(): - return computeAttributeDiffAsPrimitive(change, ctype) - case ctype.IsObjectType(): - return computeAttributeDiffAsObject(change, ctype.AttributeTypes()) - case ctype.IsMapType(): - return computeAttributeDiffAsMap(change, ctype.ElementType()) - case ctype.IsListType(): - return computeAttributeDiffAsList(change, ctype.ElementType()) - case ctype.IsTupleType(): - return computeAttributeDiffAsTuple(change, ctype.TupleElementTypes()) - case ctype.IsSetType(): - return computeAttributeDiffAsSet(change, ctype.ElementType()) - default: - panic("unrecognized type: " + ctype.FriendlyName()) - } -} - -func unmarshalAttribute(attribute *jsonprovider.Attribute) cty.Type { - ctyType, err := ctyjson.UnmarshalType(attribute.AttributeType) - if err != nil { - panic("could not unmarshal attribute type: " + err.Error()) - } - return ctyType -} diff --git a/internal/command/jsonformat/differ/block.go b/internal/command/jsonformat/differ/block.go deleted file mode 100644 index 9b2375c76fb7..000000000000 --- a/internal/command/jsonformat/differ/block.go +++ /dev/null @@ -1,118 +0,0 @@ -package differ - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/collections" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/plans" -) - -func ComputeDiffForBlock(change structured.Change, block *jsonprovider.Block) computed.Diff { - if sensitive, ok := checkForSensitiveBlock(change, block); ok { - return sensitive - } - - if unknown, ok := checkForUnknownBlock(change, block); ok { - return unknown - } - - current := change.GetDefaultActionForIteration() - - blockValue := change.AsMap() - - attributes := make(map[string]computed.Diff) - for key, attr := range block.Attributes { - childValue := blockValue.GetChild(key) - - if !childValue.RelevantAttributes.MatchesPartial() { - // Mark non-relevant attributes as unchanged. - childValue = childValue.AsNoOp() - } - - // Empty strings in blocks should be considered null for legacy reasons. - // The SDK doesn't support null strings yet, so we work around this now. - if before, ok := childValue.Before.(string); ok && len(before) == 0 { - childValue.Before = nil - } - if after, ok := childValue.After.(string); ok && len(after) == 0 { - childValue.After = nil - } - - // Always treat changes to blocks as implicit. - childValue.BeforeExplicit = false - childValue.AfterExplicit = false - - childChange := ComputeDiffForAttribute(childValue, attr) - if childChange.Action == plans.NoOp && childValue.Before == nil && childValue.After == nil { - // Don't record nil values at all in blocks. - continue - } - - attributes[key] = childChange - current = collections.CompareActions(current, childChange.Action) - } - - blocks := renderers.Blocks{ - ReplaceBlocks: make(map[string]bool), - BeforeSensitiveBlocks: make(map[string]bool), - AfterSensitiveBlocks: make(map[string]bool), - SingleBlocks: make(map[string]computed.Diff), - ListBlocks: make(map[string][]computed.Diff), - SetBlocks: make(map[string][]computed.Diff), - MapBlocks: make(map[string]map[string]computed.Diff), - } - - for key, blockType := range block.BlockTypes { - childValue := blockValue.GetChild(key) - - if !childValue.RelevantAttributes.MatchesPartial() { - // Mark non-relevant attributes as unchanged. - childValue = childValue.AsNoOp() - } - - beforeSensitive := childValue.IsBeforeSensitive() - afterSensitive := childValue.IsAfterSensitive() - forcesReplacement := childValue.ReplacePaths.Matches() - - switch NestingMode(blockType.NestingMode) { - case nestingModeSet: - diffs, action := computeBlockDiffsAsSet(childValue, blockType.Block) - if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { - // Don't record nil values in blocks. - continue - } - blocks.AddAllSetBlock(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) - current = collections.CompareActions(current, action) - case nestingModeList: - diffs, action := computeBlockDiffsAsList(childValue, blockType.Block) - if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { - // Don't record nil values in blocks. - continue - } - blocks.AddAllListBlock(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) - current = collections.CompareActions(current, action) - case nestingModeMap: - diffs, action := computeBlockDiffsAsMap(childValue, blockType.Block) - if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { - // Don't record nil values in blocks. - continue - } - blocks.AddAllMapBlocks(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) - current = collections.CompareActions(current, action) - case nestingModeSingle, nestingModeGroup: - diff := ComputeDiffForBlock(childValue, blockType.Block) - if diff.Action == plans.NoOp && childValue.Before == nil && childValue.After == nil { - // Don't record nil values in blocks. - continue - } - blocks.AddSingleBlock(key, diff, forcesReplacement, beforeSensitive, afterSensitive) - current = collections.CompareActions(current, diff.Action) - default: - panic("unrecognized nesting mode: " + blockType.NestingMode) - } - } - - return computed.NewDiff(renderers.Block(attributes, blocks), current, change.ReplacePaths.Matches()) -} diff --git a/internal/command/jsonformat/differ/differ.go b/internal/command/jsonformat/differ/differ.go deleted file mode 100644 index 3ca584e8d4ca..000000000000 --- a/internal/command/jsonformat/differ/differ.go +++ /dev/null @@ -1,12 +0,0 @@ -package differ - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" -) - -// asDiff is a helper function to abstract away some simple and common -// functionality when converting a renderer into a concrete diff. -func asDiff(change structured.Change, renderer computed.DiffRenderer) computed.Diff { - return computed.NewDiff(renderer, change.CalculateAction(), change.ReplacePaths.Matches()) -} diff --git a/internal/command/jsonformat/differ/list.go b/internal/command/jsonformat/differ/list.go deleted file mode 100644 index 0ce937df46e8..000000000000 --- a/internal/command/jsonformat/differ/list.go +++ /dev/null @@ -1,87 +0,0 @@ -package differ - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/collections" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/plans" -) - -func computeAttributeDiffAsList(change structured.Change, elementType cty.Type) computed.Diff { - sliceValue := change.AsSlice() - - processIndices := func(beforeIx, afterIx int) computed.Diff { - value := sliceValue.GetChild(beforeIx, afterIx) - - // It's actually really difficult to render the diffs when some indices - // within a slice are relevant and others aren't. To make this simpler - // we just treat all children of a relevant list or set as also - // relevant. - // - // Interestingly the terraform plan builder also agrees with this, and - // never sets relevant attributes beneath lists or sets. We're just - // going to enforce this logic here as well. If the collection is - // relevant (decided elsewhere), then every element in the collection is - // also relevant. To be clear, in practice even if we didn't do the - // following explicitly the effect would be the same. It's just nicer - // for us to be clear about the behaviour we expect. - // - // What makes this difficult is the fact that the beforeIx and afterIx - // can be different, and it's quite difficult to work out which one is - // the relevant one. For nested lists, block lists, and tuples it's much - // easier because we always process the same indices in the before and - // after. - value.RelevantAttributes = attribute_path.AlwaysMatcher() - - return ComputeDiffForType(value, elementType) - } - - isObjType := func(_ interface{}) bool { - return elementType.IsObjectType() - } - - elements, current := collections.TransformSlice(sliceValue.Before, sliceValue.After, processIndices, isObjType) - return computed.NewDiff(renderers.List(elements), current, change.ReplacePaths.Matches()) -} - -func computeAttributeDiffAsNestedList(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { - var elements []computed.Diff - current := change.GetDefaultActionForIteration() - processNestedList(change, func(value structured.Change) { - element := computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ - Attributes: attributes, - NestingMode: "single", - }) - elements = append(elements, element) - current = collections.CompareActions(current, element.Action) - }) - return computed.NewDiff(renderers.NestedList(elements), current, change.ReplacePaths.Matches()) -} - -func computeBlockDiffsAsList(change structured.Change, block *jsonprovider.Block) ([]computed.Diff, plans.Action) { - var elements []computed.Diff - current := change.GetDefaultActionForIteration() - processNestedList(change, func(value structured.Change) { - element := ComputeDiffForBlock(value, block) - elements = append(elements, element) - current = collections.CompareActions(current, element.Action) - }) - return elements, current -} - -func processNestedList(change structured.Change, process func(value structured.Change)) { - sliceValue := change.AsSlice() - for ix := 0; ix < len(sliceValue.Before) || ix < len(sliceValue.After); ix++ { - value := sliceValue.GetChild(ix, ix) - if !value.RelevantAttributes.MatchesPartial() { - // Mark non-relevant attributes as unchanged. - value = value.AsNoOp() - } - process(value) - } -} diff --git a/internal/command/jsonformat/differ/map.go b/internal/command/jsonformat/differ/map.go deleted file mode 100644 index e9f09542045e..000000000000 --- a/internal/command/jsonformat/differ/map.go +++ /dev/null @@ -1,53 +0,0 @@ -package differ - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/collections" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/plans" -) - -func computeAttributeDiffAsMap(change structured.Change, elementType cty.Type) computed.Diff { - mapValue := change.AsMap() - elements, current := collections.TransformMap(mapValue.Before, mapValue.After, mapValue.AllKeys(), func(key string) computed.Diff { - value := mapValue.GetChild(key) - if !value.RelevantAttributes.MatchesPartial() { - // Mark non-relevant attributes as unchanged. - value = value.AsNoOp() - } - return ComputeDiffForType(value, elementType) - }) - return computed.NewDiff(renderers.Map(elements), current, change.ReplacePaths.Matches()) -} - -func computeAttributeDiffAsNestedMap(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { - mapValue := change.AsMap() - elements, current := collections.TransformMap(mapValue.Before, mapValue.After, mapValue.ExplicitKeys(), func(key string) computed.Diff { - value := mapValue.GetChild(key) - if !value.RelevantAttributes.MatchesPartial() { - // Mark non-relevant attributes as unchanged. - value = value.AsNoOp() - } - return computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ - Attributes: attributes, - NestingMode: "single", - }) - }) - return computed.NewDiff(renderers.NestedMap(elements), current, change.ReplacePaths.Matches()) -} - -func computeBlockDiffsAsMap(change structured.Change, block *jsonprovider.Block) (map[string]computed.Diff, plans.Action) { - mapValue := change.AsMap() - return collections.TransformMap(mapValue.Before, mapValue.After, mapValue.ExplicitKeys(), func(key string) computed.Diff { - value := mapValue.GetChild(key) - if !value.RelevantAttributes.MatchesPartial() { - // Mark non-relevant attributes as unchanged. - value = value.AsNoOp() - } - return ComputeDiffForBlock(value, block) - }) -} diff --git a/internal/command/jsonformat/differ/object.go b/internal/command/jsonformat/differ/object.go deleted file mode 100644 index 657089af97ab..000000000000 --- a/internal/command/jsonformat/differ/object.go +++ /dev/null @@ -1,67 +0,0 @@ -package differ - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/collections" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/plans" -) - -func computeAttributeDiffAsObject(change structured.Change, attributes map[string]cty.Type) computed.Diff { - attributeDiffs, action := processObject(change, attributes, func(value structured.Change, ctype cty.Type) computed.Diff { - return ComputeDiffForType(value, ctype) - }) - return computed.NewDiff(renderers.Object(attributeDiffs), action, change.ReplacePaths.Matches()) -} - -func computeAttributeDiffAsNestedObject(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { - attributeDiffs, action := processObject(change, attributes, func(value structured.Change, attribute *jsonprovider.Attribute) computed.Diff { - return ComputeDiffForAttribute(value, attribute) - }) - return computed.NewDiff(renderers.NestedObject(attributeDiffs), action, change.ReplacePaths.Matches()) -} - -// processObject steps through the children of value as if it is an object and -// calls out to the provided computeDiff function once it has collated the -// diffs for each child attribute. -// -// We have to make this generic as attributes and nested objects process either -// cty.Type or jsonprovider.Attribute children respectively. And we want to -// reuse as much code as possible. -// -// Also, as it generic we cannot make this function a method on Change as you -// can't create generic methods on structs. Instead, we make this a generic -// function that receives the value as an argument. -func processObject[T any](v structured.Change, attributes map[string]T, computeDiff func(structured.Change, T) computed.Diff) (map[string]computed.Diff, plans.Action) { - attributeDiffs := make(map[string]computed.Diff) - mapValue := v.AsMap() - - currentAction := v.GetDefaultActionForIteration() - for key, attribute := range attributes { - attributeValue := mapValue.GetChild(key) - - if !attributeValue.RelevantAttributes.MatchesPartial() { - // Mark non-relevant attributes as unchanged. - attributeValue = attributeValue.AsNoOp() - } - - // We always assume changes to object are implicit. - attributeValue.BeforeExplicit = false - attributeValue.AfterExplicit = false - - attributeDiff := computeDiff(attributeValue, attribute) - if attributeDiff.Action == plans.NoOp && attributeValue.Before == nil && attributeValue.After == nil { - // We skip attributes of objects that are null both before and - // after. We don't even count these as unchanged attributes. - continue - } - attributeDiffs[key] = attributeDiff - currentAction = collections.CompareActions(currentAction, attributeDiff.Action) - } - - return attributeDiffs, currentAction -} diff --git a/internal/command/jsonformat/differ/output.go b/internal/command/jsonformat/differ/output.go deleted file mode 100644 index 5ab4bf411641..000000000000 --- a/internal/command/jsonformat/differ/output.go +++ /dev/null @@ -1,22 +0,0 @@ -package differ - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" -) - -func ComputeDiffForOutput(change structured.Change) computed.Diff { - if sensitive, ok := checkForSensitiveType(change, cty.DynamicPseudoType); ok { - return sensitive - } - - if unknown, ok := checkForUnknownType(change, cty.DynamicPseudoType); ok { - return unknown - } - - jsonOpts := renderers.RendererJsonOpts() - return jsonOpts.Transform(change) -} diff --git a/internal/command/jsonformat/differ/primitive.go b/internal/command/jsonformat/differ/primitive.go deleted file mode 100644 index a1dcc6ec21cf..000000000000 --- a/internal/command/jsonformat/differ/primitive.go +++ /dev/null @@ -1,13 +0,0 @@ -package differ - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" -) - -func computeAttributeDiffAsPrimitive(change structured.Change, ctype cty.Type) computed.Diff { - return asDiff(change, renderers.Primitive(change.Before, change.After, ctype)) -} diff --git a/internal/command/jsonformat/differ/sensitive.go b/internal/command/jsonformat/differ/sensitive.go deleted file mode 100644 index 90a7a2886e5f..000000000000 --- a/internal/command/jsonformat/differ/sensitive.go +++ /dev/null @@ -1,43 +0,0 @@ -package differ - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/plans" -) - -type CreateSensitiveRenderer func(computed.Diff, bool, bool) computed.DiffRenderer - -func checkForSensitiveType(change structured.Change, ctype cty.Type) (computed.Diff, bool) { - return change.CheckForSensitive( - func(value structured.Change) computed.Diff { - return ComputeDiffForType(value, ctype) - }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { - return computed.NewDiff(renderers.Sensitive(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) - }, - ) -} - -func checkForSensitiveNestedAttribute(change structured.Change, attribute *jsonprovider.NestedType) (computed.Diff, bool) { - return change.CheckForSensitive( - func(value structured.Change) computed.Diff { - return computeDiffForNestedAttribute(value, attribute) - }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { - return computed.NewDiff(renderers.Sensitive(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) - }, - ) -} - -func checkForSensitiveBlock(change structured.Change, block *jsonprovider.Block) (computed.Diff, bool) { - return change.CheckForSensitive( - func(value structured.Change) computed.Diff { - return ComputeDiffForBlock(value, block) - }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { - return computed.NewDiff(renderers.SensitiveBlock(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) - }, - ) -} diff --git a/internal/command/jsonformat/differ/set.go b/internal/command/jsonformat/differ/set.go deleted file mode 100644 index 8e0bccbbd4b6..000000000000 --- a/internal/command/jsonformat/differ/set.go +++ /dev/null @@ -1,132 +0,0 @@ -package differ - -import ( - "reflect" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/collections" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/plans" -) - -func computeAttributeDiffAsSet(change structured.Change, elementType cty.Type) computed.Diff { - var elements []computed.Diff - current := change.GetDefaultActionForIteration() - processSet(change, func(value structured.Change) { - element := ComputeDiffForType(value, elementType) - elements = append(elements, element) - current = collections.CompareActions(current, element.Action) - }) - return computed.NewDiff(renderers.Set(elements), current, change.ReplacePaths.Matches()) -} - -func computeAttributeDiffAsNestedSet(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { - var elements []computed.Diff - current := change.GetDefaultActionForIteration() - processSet(change, func(value structured.Change) { - element := computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ - Attributes: attributes, - NestingMode: "single", - }) - elements = append(elements, element) - current = collections.CompareActions(current, element.Action) - }) - return computed.NewDiff(renderers.NestedSet(elements), current, change.ReplacePaths.Matches()) -} - -func computeBlockDiffsAsSet(change structured.Change, block *jsonprovider.Block) ([]computed.Diff, plans.Action) { - var elements []computed.Diff - current := change.GetDefaultActionForIteration() - processSet(change, func(value structured.Change) { - element := ComputeDiffForBlock(value, block) - elements = append(elements, element) - current = collections.CompareActions(current, element.Action) - }) - return elements, current -} - -func processSet(change structured.Change, process func(value structured.Change)) { - sliceValue := change.AsSlice() - - foundInBefore := make(map[int]int) - foundInAfter := make(map[int]int) - - // O(n^2) operation here to find matching pairs in the set, so we can make - // the display look pretty. There might be a better way to do this, so look - // here for potential optimisations. - - for ix := 0; ix < len(sliceValue.Before); ix++ { - matched := false - for jx := 0; jx < len(sliceValue.After); jx++ { - if _, ok := foundInAfter[jx]; ok { - // We've already found a match for this after value. - continue - } - - child := sliceValue.GetChild(ix, jx) - if reflect.DeepEqual(child.Before, child.After) && child.IsBeforeSensitive() == child.IsAfterSensitive() && !child.IsUnknown() { - matched = true - foundInBefore[ix] = jx - foundInAfter[jx] = ix - } - } - - if !matched { - foundInBefore[ix] = -1 - } - } - - clearRelevantStatus := func(change structured.Change) structured.Change { - // It's actually really difficult to render the diffs when some indices - // within a slice are relevant and others aren't. To make this simpler - // we just treat all children of a relevant list or set as also - // relevant. - // - // Interestingly the terraform plan builder also agrees with this, and - // never sets relevant attributes beneath lists or sets. We're just - // going to enforce this logic here as well. If the collection is - // relevant (decided elsewhere), then every element in the collection is - // also relevant. To be clear, in practice even if we didn't do the - // following explicitly the effect would be the same. It's just nicer - // for us to be clear about the behaviour we expect. - // - // What makes this difficult is the fact that the beforeIx and afterIx - // can be different, and it's quite difficult to work out which one is - // the relevant one. For nested lists, block lists, and tuples it's much - // easier because we always process the same indices in the before and - // after. - change.RelevantAttributes = attribute_path.AlwaysMatcher() - return change - } - - // Now everything in before should be a key in foundInBefore and a value - // in foundInAfter. If a key is mapped to -1 in foundInBefore it means it - // does not have an equivalent in foundInAfter and so has been deleted. - // Everything in foundInAfter has a matching value in foundInBefore, but - // some values in after may not be in foundInAfter. This means these values - // are newly created. - - for ix := 0; ix < len(sliceValue.Before); ix++ { - if jx := foundInBefore[ix]; jx >= 0 { - child := clearRelevantStatus(sliceValue.GetChild(ix, jx)) - process(child) - continue - } - child := clearRelevantStatus(sliceValue.GetChild(ix, len(sliceValue.After))) - process(child) - } - - for jx := 0; jx < len(sliceValue.After); jx++ { - if _, ok := foundInAfter[jx]; ok { - // Then this value was handled in the previous for loop. - continue - } - child := clearRelevantStatus(sliceValue.GetChild(len(sliceValue.Before), jx)) - process(child) - } -} diff --git a/internal/command/jsonformat/differ/tuple.go b/internal/command/jsonformat/differ/tuple.go deleted file mode 100644 index f45023b0f67e..000000000000 --- a/internal/command/jsonformat/differ/tuple.go +++ /dev/null @@ -1,27 +0,0 @@ -package differ - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/collections" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" -) - -func computeAttributeDiffAsTuple(change structured.Change, elementTypes []cty.Type) computed.Diff { - var elements []computed.Diff - current := change.GetDefaultActionForIteration() - sliceValue := change.AsSlice() - for ix, elementType := range elementTypes { - childValue := sliceValue.GetChild(ix, ix) - if !childValue.RelevantAttributes.MatchesPartial() { - // Mark non-relevant attributes as unchanged. - childValue = childValue.AsNoOp() - } - element := ComputeDiffForType(childValue, elementType) - elements = append(elements, element) - current = collections.CompareActions(current, element.Action) - } - return computed.NewDiff(renderers.List(elements), current, change.ReplacePaths.Matches()) -} diff --git a/internal/command/jsonformat/differ/unknown.go b/internal/command/jsonformat/differ/unknown.go deleted file mode 100644 index 2a0bfa5e2152..000000000000 --- a/internal/command/jsonformat/differ/unknown.go +++ /dev/null @@ -1,63 +0,0 @@ -package differ - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonprovider" -) - -func checkForUnknownType(change structured.Change, ctype cty.Type) (computed.Diff, bool) { - return change.CheckForUnknown( - false, - processUnknown, - createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { - return ComputeDiffForType(value, ctype) - })) -} - -func checkForUnknownNestedAttribute(change structured.Change, attribute *jsonprovider.NestedType) (computed.Diff, bool) { - - // We want our child attributes to show up as computed instead of deleted. - // Let's populate that here. - childUnknown := make(map[string]interface{}) - for key := range attribute.Attributes { - childUnknown[key] = true - } - - return change.CheckForUnknown( - childUnknown, - processUnknown, - createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { - return computeDiffForNestedAttribute(value, attribute) - })) -} - -func checkForUnknownBlock(change structured.Change, block *jsonprovider.Block) (computed.Diff, bool) { - - // We want our child attributes to show up as computed instead of deleted. - // Let's populate that here. - childUnknown := make(map[string]interface{}) - for key := range block.Attributes { - childUnknown[key] = true - } - - return change.CheckForUnknown( - childUnknown, - processUnknown, - createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { - return ComputeDiffForBlock(value, block) - })) -} - -func processUnknown(current structured.Change) computed.Diff { - return asDiff(current, renderers.Unknown(computed.Diff{})) -} - -func createProcessUnknownWithBefore(computeDiff func(value structured.Change) computed.Diff) structured.ProcessUnknownWithBefore { - return func(current structured.Change, before structured.Change) computed.Diff { - return asDiff(current, renderers.Unknown(computeDiff(before))) - } -} diff --git a/internal/command/jsonformat/jsondiff/diff.go b/internal/command/jsonformat/jsondiff/diff.go deleted file mode 100644 index 99bc4137c8ec..000000000000 --- a/internal/command/jsonformat/jsondiff/diff.go +++ /dev/null @@ -1,148 +0,0 @@ -package jsondiff - -import ( - "reflect" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/command/jsonformat/collections" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/plans" -) - -type TransformPrimitiveJson func(before, after interface{}, ctype cty.Type, action plans.Action) computed.Diff -type TransformObjectJson func(map[string]computed.Diff, plans.Action) computed.Diff -type TransformArrayJson func([]computed.Diff, plans.Action) computed.Diff -type TransformUnknownJson func(computed.Diff, plans.Action) computed.Diff -type TransformSensitiveJson func(computed.Diff, bool, bool, plans.Action) computed.Diff -type TransformTypeChangeJson func(before, after computed.Diff, action plans.Action) computed.Diff - -// JsonOpts defines the external callback functions that callers should -// implement to process the supplied diffs. -type JsonOpts struct { - Primitive TransformPrimitiveJson - Object TransformObjectJson - Array TransformArrayJson - Unknown TransformUnknownJson - Sensitive TransformSensitiveJson - TypeChange TransformTypeChangeJson -} - -// Transform accepts a generic before and after value that is assumed to be JSON -// formatted and transforms it into a computed.Diff, using the callbacks -// supplied in the JsonOpts class. -func (opts JsonOpts) Transform(change structured.Change) computed.Diff { - if sensitive, ok := opts.processSensitive(change); ok { - return sensitive - } - - if unknown, ok := opts.processUnknown(change); ok { - return unknown - } - - beforeType := GetType(change.Before) - afterType := GetType(change.After) - - deleted := afterType == Null && !change.AfterExplicit - created := beforeType == Null && !change.BeforeExplicit - - if beforeType == afterType || (created || deleted) { - targetType := beforeType - if targetType == Null { - targetType = afterType - } - return opts.processUpdate(change, targetType) - } - - b := opts.processUpdate(change.AsDelete(), beforeType) - a := opts.processUpdate(change.AsCreate(), afterType) - return opts.TypeChange(b, a, plans.Update) -} - -func (opts JsonOpts) processUpdate(change structured.Change, jtype Type) computed.Diff { - switch jtype { - case Null: - return opts.processPrimitive(change, cty.NilType) - case Bool: - return opts.processPrimitive(change, cty.Bool) - case String: - return opts.processPrimitive(change, cty.String) - case Number: - return opts.processPrimitive(change, cty.Number) - case Object: - return opts.processObject(change.AsMap()) - case Array: - return opts.processArray(change.AsSlice()) - default: - panic("unrecognized json type: " + jtype) - } -} - -func (opts JsonOpts) processPrimitive(change structured.Change, ctype cty.Type) computed.Diff { - beforeMissing := change.Before == nil && !change.BeforeExplicit - afterMissing := change.After == nil && !change.AfterExplicit - - var action plans.Action - switch { - case beforeMissing && !afterMissing: - action = plans.Create - case !beforeMissing && afterMissing: - action = plans.Delete - case reflect.DeepEqual(change.Before, change.After): - action = plans.NoOp - default: - action = plans.Update - } - - return opts.Primitive(change.Before, change.After, ctype, action) -} - -func (opts JsonOpts) processArray(change structured.ChangeSlice) computed.Diff { - processIndices := func(beforeIx, afterIx int) computed.Diff { - // It's actually really difficult to render the diffs when some indices - // within a list are relevant and others aren't. To make this simpler - // we just treat all children of a relevant list as also relevant, so we - // ignore the relevant attributes field. - // - // Interestingly the terraform plan builder also agrees with this, and - // never sets relevant attributes beneath lists or sets. We're just - // going to enforce this logic here as well. If the list is relevant - // (decided elsewhere), then every element in the list is also relevant. - return opts.Transform(change.GetChild(beforeIx, afterIx)) - } - - isObjType := func(value interface{}) bool { - return GetType(value) == Object - } - - return opts.Array(collections.TransformSlice(change.Before, change.After, processIndices, isObjType)) -} - -func (opts JsonOpts) processObject(change structured.ChangeMap) computed.Diff { - return opts.Object(collections.TransformMap(change.Before, change.After, change.AllKeys(), func(key string) computed.Diff { - child := change.GetChild(key) - if !child.RelevantAttributes.MatchesPartial() { - child = child.AsNoOp() - } - - return opts.Transform(child) - })) -} - -func (opts JsonOpts) processUnknown(change structured.Change) (computed.Diff, bool) { - return change.CheckForUnknown( - false, - func(current structured.Change) computed.Diff { - return opts.Unknown(computed.Diff{}, plans.Create) - }, func(current structured.Change, before structured.Change) computed.Diff { - return opts.Unknown(opts.Transform(before), plans.Update) - }, - ) -} - -func (opts JsonOpts) processSensitive(change structured.Change) (computed.Diff, bool) { - return change.CheckForSensitive(opts.Transform, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { - return opts.Sensitive(inner, beforeSensitive, afterSensitive, action) - }) -} diff --git a/internal/command/jsonformat/plan.go b/internal/command/jsonformat/plan.go deleted file mode 100644 index 5deaeca54467..000000000000 --- a/internal/command/jsonformat/plan.go +++ /dev/null @@ -1,481 +0,0 @@ -package jsonformat - -import ( - "bytes" - "encoding/json" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/computed/renderers" - "github.com/hashicorp/terraform/internal/command/jsonplan" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/command/jsonstate" - "github.com/hashicorp/terraform/internal/plans" -) - -type PlanRendererOpt int - -const ( - detectedDrift string = "drift" - proposedChange string = "change" - - Errored PlanRendererOpt = iota - CanNotApply -) - -type Plan struct { - PlanFormatVersion string `json:"plan_format_version"` - OutputChanges map[string]jsonplan.Change `json:"output_changes"` - ResourceChanges []jsonplan.ResourceChange `json:"resource_changes"` - ResourceDrift []jsonplan.ResourceChange `json:"resource_drift"` - RelevantAttributes []jsonplan.ResourceAttr `json:"relevant_attributes"` - - ProviderFormatVersion string `json:"provider_format_version"` - ProviderSchemas map[string]*jsonprovider.Provider `json:"provider_schemas"` -} - -func (plan Plan) getSchema(change jsonplan.ResourceChange) *jsonprovider.Schema { - switch change.Mode { - case jsonstate.ManagedResourceMode: - return plan.ProviderSchemas[change.ProviderName].ResourceSchemas[change.Type] - case jsonstate.DataResourceMode: - return plan.ProviderSchemas[change.ProviderName].DataSourceSchemas[change.Type] - default: - panic("found unrecognized resource mode: " + change.Mode) - } -} - -func (plan Plan) renderHuman(renderer Renderer, mode plans.Mode, opts ...PlanRendererOpt) { - checkOpts := func(target PlanRendererOpt) bool { - for _, opt := range opts { - if opt == target { - return true - } - } - return false - } - - diffs := precomputeDiffs(plan, mode) - haveRefreshChanges := renderHumanDiffDrift(renderer, diffs, mode) - - willPrintResourceChanges := false - counts := make(map[plans.Action]int) - var changes []diff - for _, diff := range diffs.changes { - action := jsonplan.UnmarshalActions(diff.change.Change.Actions) - if action == plans.NoOp && !diff.Moved() { - // Don't show anything for NoOp changes. - continue - } - if action == plans.Delete && diff.change.Mode != jsonstate.ManagedResourceMode { - // Don't render anything for deleted data sources. - continue - } - - changes = append(changes, diff) - - // Don't count move-only changes - if action != plans.NoOp { - willPrintResourceChanges = true - counts[action]++ - } - } - - // Precompute the outputs early, so we can make a decision about whether we - // display the "there are no changes messages". - outputs := renderHumanDiffOutputs(renderer, diffs.outputs) - - if len(changes) == 0 && len(outputs) == 0 { - // If we didn't find any changes to report at all then this is a - // "No changes" plan. How we'll present this depends on whether - // the plan is "applyable" and, if so, whether it had refresh changes - // that we already would've presented above. - - if checkOpts(Errored) { - if haveRefreshChanges { - renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) - renderer.Streams.Println() - } - renderer.Streams.Print( - renderer.Colorize.Color("\n[reset][bold][red]Planning failed.[reset][bold] Terraform encountered an error while generating this plan.[reset]\n\n"), - ) - } else { - switch mode { - case plans.RefreshOnlyMode: - if haveRefreshChanges { - // We already generated a sufficient prompt about what will - // happen if applying this change above, so we don't need to - // say anything more. - return - } - - renderer.Streams.Print(renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] Your infrastructure still matches the configuration.[reset]\n\n")) - renderer.Streams.Println(format.WordWrap( - "Terraform has checked that the real remote objects still match the result of your most recent changes, and found no differences.", - renderer.Streams.Stdout.Columns())) - case plans.DestroyMode: - if haveRefreshChanges { - renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) - fmt.Fprintln(renderer.Streams.Stdout.File) - } - renderer.Streams.Print(renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] No objects need to be destroyed.[reset]\n\n")) - renderer.Streams.Println(format.WordWrap( - "Either you have not created any objects yet or the existing objects were already deleted outside of Terraform.", - renderer.Streams.Stdout.Columns())) - default: - if haveRefreshChanges { - renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) - renderer.Streams.Println("") - } - renderer.Streams.Print( - renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] Your infrastructure matches the configuration.[reset]\n\n"), - ) - - if haveRefreshChanges { - if !checkOpts(CanNotApply) { - // In this case, applying this plan will not change any - // remote objects but _will_ update the state to match what - // we detected during refresh, so we'll reassure the user - // about that. - renderer.Streams.Println(format.WordWrap( - "Your configuration already matches the changes detected above, so applying this plan will only update the state to include the changes detected above and won't change any real infrastructure.", - renderer.Streams.Stdout.Columns(), - )) - } else { - // In this case we detected changes during refresh but this isn't - // a planning mode where we consider those to be applyable. The - // user must re-run in refresh-only mode in order to update the - // state to match the upstream changes. - suggestion := "." - if !renderer.RunningInAutomation { - // The normal message includes a specific command line to run. - suggestion = ":\n terraform apply -refresh-only" - } - renderer.Streams.Println(format.WordWrap( - "Your configuration already matches the changes detected above. If you'd like to update the Terraform state to match, create and apply a refresh-only plan"+suggestion, - renderer.Streams.Stdout.Columns(), - )) - } - return - } - - // If we get down here then we're just in the simple situation where - // the plan isn't applyable at all. - renderer.Streams.Println(format.WordWrap( - "Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed.", - renderer.Streams.Stdout.Columns(), - )) - } - } - } - - if haveRefreshChanges { - renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) - renderer.Streams.Println() - } - - if willPrintResourceChanges { - renderer.Streams.Println(format.WordWrap( - "\nTerraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:", - renderer.Streams.Stdout.Columns())) - if counts[plans.Create] > 0 { - renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Create))) - } - if counts[plans.Update] > 0 { - renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Update))) - } - if counts[plans.Delete] > 0 { - renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Delete))) - } - if counts[plans.DeleteThenCreate] > 0 { - renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.DeleteThenCreate))) - } - if counts[plans.CreateThenDelete] > 0 { - renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.CreateThenDelete))) - } - if counts[plans.Read] > 0 { - renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Read))) - } - } - - if len(changes) > 0 { - if checkOpts(Errored) { - renderer.Streams.Printf("\nTerraform planned the following actions, but then encountered a problem:\n") - } else { - renderer.Streams.Printf("\nTerraform will perform the following actions:\n") - } - - for _, change := range changes { - diff, render := renderHumanDiff(renderer, change, proposedChange) - if render { - fmt.Fprintln(renderer.Streams.Stdout.File) - renderer.Streams.Println(diff) - } - } - - renderer.Streams.Printf( - renderer.Colorize.Color("\n[bold]Plan:[reset] %d to add, %d to change, %d to destroy.\n"), - counts[plans.Create]+counts[plans.DeleteThenCreate]+counts[plans.CreateThenDelete], - counts[plans.Update], - counts[plans.Delete]+counts[plans.DeleteThenCreate]+counts[plans.CreateThenDelete]) - } - - if len(outputs) > 0 { - renderer.Streams.Print("\nChanges to Outputs:\n") - renderer.Streams.Printf("%s\n", outputs) - - if len(counts) == 0 { - // If we have output changes but not resource changes then we - // won't have output any indication about the changes at all yet, - // so we need some extra context about what it would mean to - // apply a change that _only_ includes output changes. - renderer.Streams.Println(format.WordWrap( - "\nYou can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure.", - renderer.Streams.Stdout.Columns())) - } - } -} - -func renderHumanDiffOutputs(renderer Renderer, outputs map[string]computed.Diff) string { - var rendered []string - - var keys []string - escapedKeys := make(map[string]string) - var escapedKeyMaxLen int - for key := range outputs { - escapedKey := renderers.EnsureValidAttributeName(key) - keys = append(keys, key) - escapedKeys[key] = escapedKey - if len(escapedKey) > escapedKeyMaxLen { - escapedKeyMaxLen = len(escapedKey) - } - } - sort.Strings(keys) - - for _, key := range keys { - output := outputs[key] - if output.Action != plans.NoOp { - rendered = append(rendered, fmt.Sprintf("%s %-*s = %s", renderer.Colorize.Color(format.DiffActionSymbol(output.Action)), escapedKeyMaxLen, escapedKeys[key], output.RenderHuman(0, computed.NewRenderHumanOpts(renderer.Colorize)))) - } - } - return strings.Join(rendered, "\n") -} - -func renderHumanDiffDrift(renderer Renderer, diffs diffs, mode plans.Mode) bool { - var drs []diff - - // In refresh-only mode, we show all resources marked as drifted, - // including those which have moved without other changes. In other plan - // modes, move-only changes will be rendered in the planned changes, so - // we skip them here. - - if mode == plans.RefreshOnlyMode { - drs = diffs.drift - } else { - for _, dr := range diffs.drift { - if dr.diff.Action != plans.NoOp { - drs = append(drs, dr) - } - } - } - - if len(drs) == 0 { - return false - } - - // If the overall plan is empty, and it's not a refresh only plan then we - // won't show any drift changes. - if diffs.Empty() && mode != plans.RefreshOnlyMode { - return false - } - - renderer.Streams.Print(renderer.Colorize.Color("\n[bold][cyan]Note:[reset][bold] Objects have changed outside of Terraform\n")) - renderer.Streams.Println() - renderer.Streams.Print(format.WordWrap( - "Terraform detected the following changes made outside of Terraform since the last \"terraform apply\" which may have affected this plan:\n", - renderer.Streams.Stdout.Columns())) - - for _, drift := range drs { - diff, render := renderHumanDiff(renderer, drift, detectedDrift) - if render { - renderer.Streams.Println() - renderer.Streams.Println(diff) - } - } - - switch mode { - case plans.RefreshOnlyMode: - renderer.Streams.Println(format.WordWrap( - "\n\nThis is a refresh-only plan, so Terraform will not take any actions to undo these. If you were expecting these changes then you can apply this plan to record the updated values in the Terraform state without changing any remote objects.", - renderer.Streams.Stdout.Columns(), - )) - default: - renderer.Streams.Println(format.WordWrap( - "\n\nUnless you have made equivalent changes to your configuration, or ignored the relevant attributes using ignore_changes, the following plan may include actions to undo or respond to these changes.", - renderer.Streams.Stdout.Columns(), - )) - } - - return true -} - -func renderHumanDiff(renderer Renderer, diff diff, cause string) (string, bool) { - - // Internally, our computed diffs can't tell the difference between a - // replace action (eg. CreateThenDestroy, DestroyThenCreate) and a simple - // update action. So, at the top most level we rely on the action provided - // by the plan itself instead of what we compute. Nested attributes and - // blocks however don't have the replace type of actions, so we can trust - // the computed actions of these. - - action := jsonplan.UnmarshalActions(diff.change.Change.Actions) - if action == plans.NoOp && (len(diff.change.PreviousAddress) == 0 || diff.change.PreviousAddress == diff.change.Address) { - // Skip resource changes that have nothing interesting to say. - return "", false - } - - var buf bytes.Buffer - buf.WriteString(renderer.Colorize.Color(resourceChangeComment(diff.change, action, cause))) - buf.WriteString(fmt.Sprintf("%s %s %s", renderer.Colorize.Color(format.DiffActionSymbol(action)), resourceChangeHeader(diff.change), diff.diff.RenderHuman(0, computed.NewRenderHumanOpts(renderer.Colorize)))) - return buf.String(), true -} - -func resourceChangeComment(resource jsonplan.ResourceChange, action plans.Action, changeCause string) string { - var buf bytes.Buffer - - dispAddr := resource.Address - if len(resource.Deposed) != 0 { - dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, resource.Deposed) - } - - switch action { - case plans.Create: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr)) - case plans.Read: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be read during apply", dispAddr)) - switch resource.ActionReason { - case jsonplan.ResourceInstanceReadBecauseConfigUnknown: - buf.WriteString("\n # (config refers to values not yet known)") - case jsonplan.ResourceInstanceReadBecauseDependencyPending: - buf.WriteString("\n # (depends on a resource or a module with changes pending)") - } - case plans.Update: - switch changeCause { - case proposedChange: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr)) - case detectedDrift: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has changed", dispAddr)) - default: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] update (unknown reason %s)", dispAddr, changeCause)) - } - case plans.CreateThenDelete, plans.DeleteThenCreate: - switch resource.ActionReason { - case jsonplan.ResourceInstanceReplaceBecauseTainted: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced[reset]", dispAddr)) - case jsonplan.ResourceInstanceReplaceByRequest: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]replaced[reset], as requested", dispAddr)) - case jsonplan.ResourceInstanceReplaceByTriggers: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]replaced[reset] due to changes in replace_triggered_by", dispAddr)) - default: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced[reset]", dispAddr)) - } - case plans.Delete: - switch changeCause { - case proposedChange: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed[reset]", dispAddr)) - case detectedDrift: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has been deleted", dispAddr)) - default: - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] delete (unknown reason %s)", dispAddr, changeCause)) - } - // We can sometimes give some additional detail about why we're - // proposing to delete. We show this as additional notes, rather than - // as additional wording in the main action statement, in an attempt - // to make the "will be destroyed" message prominent and consistent - // in all cases, for easier scanning of this often-risky action. - switch resource.ActionReason { - case jsonplan.ResourceInstanceDeleteBecauseNoResourceConfig: - buf.WriteString(fmt.Sprintf("\n # (because %s.%s is not in configuration)", resource.Type, resource.Name)) - case jsonplan.ResourceInstanceDeleteBecauseNoMoveTarget: - buf.WriteString(fmt.Sprintf("\n # (because %s was moved to %s, which is not in configuration)", resource.PreviousAddress, resource.Address)) - case jsonplan.ResourceInstanceDeleteBecauseNoModule: - // FIXME: Ideally we'd truncate addr.Module to reflect the earliest - // step that doesn't exist, so it's clearer which call this refers - // to, but we don't have enough information out here in the UI layer - // to decide that; only the "expander" in Terraform Core knows - // which module instance keys are actually declared. - buf.WriteString(fmt.Sprintf("\n # (because %s is not in configuration)", resource.ModuleAddress)) - case jsonplan.ResourceInstanceDeleteBecauseWrongRepetition: - var index interface{} - if resource.Index != nil { - if err := json.Unmarshal(resource.Index, &index); err != nil { - panic(err) - } - } - - // We have some different variations of this one - switch index.(type) { - case nil: - buf.WriteString("\n # (because resource uses count or for_each)") - case float64: - buf.WriteString("\n # (because resource does not use count)") - case string: - buf.WriteString("\n # (because resource does not use for_each)") - } - case jsonplan.ResourceInstanceDeleteBecauseCountIndex: - buf.WriteString(fmt.Sprintf("\n # (because index [%s] is out of range for count)", resource.Index)) - case jsonplan.ResourceInstanceDeleteBecauseEachKey: - buf.WriteString(fmt.Sprintf("\n # (because key [%s] is not in for_each map)", resource.Index)) - } - if len(resource.Deposed) != 0 { - // Some extra context about this unusual situation. - buf.WriteString("\n # (left over from a partially-failed replacement of this instance)") - } - case plans.NoOp: - if len(resource.PreviousAddress) > 0 && resource.PreviousAddress != resource.Address { - buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has moved to [bold]%s[reset]", resource.PreviousAddress, dispAddr)) - break - } - fallthrough - default: - // should never happen, since the above is exhaustive - buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) - } - buf.WriteString("\n") - - if len(resource.PreviousAddress) > 0 && resource.PreviousAddress != resource.Address && action != plans.NoOp { - buf.WriteString(fmt.Sprintf(" # [reset](moved from %s)\n", resource.PreviousAddress)) - } - - return buf.String() -} - -func resourceChangeHeader(change jsonplan.ResourceChange) string { - mode := "resource" - if change.Mode != jsonstate.ManagedResourceMode { - mode = "data" - } - return fmt.Sprintf("%s \"%s\" \"%s\"", mode, change.Type, change.Name) -} - -func actionDescription(action plans.Action) string { - switch action { - case plans.Create: - return " [green]+[reset] create" - case plans.Delete: - return " [red]-[reset] destroy" - case plans.Update: - return " [yellow]~[reset] update in-place" - case plans.CreateThenDelete: - return "[green]+[reset]/[red]-[reset] create replacement and then destroy" - case plans.DeleteThenCreate: - return "[red]-[reset]/[green]+[reset] destroy and then create replacement" - case plans.Read: - return " [cyan]<=[reset] read (data resources)" - default: - panic(fmt.Sprintf("unrecognized change type: %s", action.String())) - } -} diff --git a/internal/command/jsonformat/plan_test.go b/internal/command/jsonformat/plan_test.go deleted file mode 100644 index 077d29afa073..000000000000 --- a/internal/command/jsonformat/plan_test.go +++ /dev/null @@ -1,6956 +0,0 @@ -package jsonformat - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/jsonformat/differ" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" - "github.com/hashicorp/terraform/internal/command/jsonplan" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" -) - -func TestRenderHuman_EmptyPlan(t *testing.T) { - color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} - streams, done := terminal.StreamsForTesting(t) - - plan := Plan{} - - renderer := Renderer{Colorize: color, Streams: streams} - plan.renderHuman(renderer, plans.NormalMode) - - want := ` -No changes. Your infrastructure matches the configuration. - -Terraform has compared your real infrastructure against your configuration -and found no differences, so no changes are needed. -` - - got := done(t).Stdout() - if diff := cmp.Diff(want, got); len(diff) > 0 { - t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) - } -} - -func TestRenderHuman_EmptyOutputs(t *testing.T) { - color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} - streams, done := terminal.StreamsForTesting(t) - - outputVal, _ := json.Marshal("some-text") - plan := Plan{ - OutputChanges: map[string]jsonplan.Change{ - "a_string": { - Actions: []string{"no-op"}, - Before: outputVal, - After: outputVal, - }, - }, - } - - renderer := Renderer{Colorize: color, Streams: streams} - plan.renderHuman(renderer, plans.NormalMode) - - want := ` -No changes. Your infrastructure matches the configuration. - -Terraform has compared your real infrastructure against your configuration -and found no differences, so no changes are needed. -` - - got := done(t).Stdout() - if diff := cmp.Diff(want, got); len(diff) > 0 { - t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) - } -} - -func TestResourceChange_primitiveTypes(t *testing.T) { - testCases := map[string]testCase{ - "creation": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + id = (known after apply) - }`, - }, - "creation (null string)": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal("null"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "string": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + string = "null" - }`, - }, - "creation (null string with extra whitespace)": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal("null "), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "string": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + string = "null " - }`, - }, - "creation (object with quoted keys)": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "object": cty.ObjectVal(map[string]cty.Value{ - "unquoted": cty.StringVal("value"), - "quoted:key": cty.StringVal("some-value"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "object": {Type: cty.Object(map[string]cty.Type{ - "unquoted": cty.String, - "quoted:key": cty.String, - }), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + object = { - + "quoted:key" = "some-value" - + unquoted = "value" - } - }`, - }, - "deletion": { - Action: plans.Delete, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - }), - After: cty.NullVal(cty.EmptyObject), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be destroyed - - resource "test_instance" "example" { - - id = "i-02ae66f368e8518a9" -> null - }`, - }, - "deletion of deposed object": { - Action: plans.Delete, - Mode: addrs.ManagedResourceMode, - DeposedKey: states.DeposedKey("byebye"), - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - }), - After: cty.NullVal(cty.EmptyObject), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example (deposed object byebye) will be destroyed - # (left over from a partially-failed replacement of this instance) - - resource "test_instance" "example" { - - id = "i-02ae66f368e8518a9" -> null - }`, - }, - "deletion (empty string)": { - Action: plans.Delete, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "intentionally_long": cty.StringVal(""), - }), - After: cty.NullVal(cty.EmptyObject), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "intentionally_long": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be destroyed - - resource "test_instance" "example" { - - id = "i-02ae66f368e8518a9" -> null - }`, - }, - "string in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - }`, - }, - "update with quoted key": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "saml:aud": cty.StringVal("https://example.com/saml"), - "zeta": cty.StringVal("alpha"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "saml:aud": cty.StringVal("https://saml.example.com"), - "zeta": cty.StringVal("alpha"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "saml:aud": {Type: cty.String, Optional: true}, - "zeta": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - id = "i-02ae66f368e8518a9" - ~ "saml:aud" = "https://example.com/saml" -> "https://saml.example.com" - # (1 unchanged attribute hidden) - }`, - }, - "string force-new update": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "ami"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement - id = "i-02ae66f368e8518a9" - }`, - }, - "string in-place update (null values)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "unchanged": cty.NullVal(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "unchanged": cty.NullVal(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "unchanged": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update of multi-line string field": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "more_lines": cty.StringVal(`original -long -multi-line -string -field`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "more_lines": cty.StringVal(`original -extremely long -multi-line -string -field`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "more_lines": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ more_lines = <<-EOT - original - - long - + extremely long - multi-line - string - field - EOT - }`, - }, - "addition of multi-line string field": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "more_lines": cty.NullVal(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "more_lines": cty.StringVal(`original -new line`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "more_lines": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + more_lines = <<-EOT - original - new line - EOT - }`, - }, - "force-new update of multi-line string field": { - Action: plans.DeleteThenCreate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "more_lines": cty.StringVal(`original`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "more_lines": cty.StringVal(`original -new line`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "more_lines": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "more_lines"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ more_lines = <<-EOT # forces replacement - original - + new line - EOT - }`, - }, - - // Sensitive - - "creation with sensitive field": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "password": cty.StringVal("top-secret"), - "conn_info": cty.ObjectVal(map[string]cty.Value{ - "user": cty.StringVal("not-secret"), - "password": cty.StringVal("top-secret"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - "conn_info": { - NestedType: &configschema.Object{ - Nesting: configschema.NestingSingle, - Attributes: map[string]*configschema.Attribute{ - "user": {Type: cty.String, Optional: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - }, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + conn_info = { - + password = (sensitive value) - + user = "not-secret" - } - + id = (known after apply) - + password = (sensitive value) - }`, - }, - "update with equal sensitive field": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("blah"), - "str": cty.StringVal("before"), - "password": cty.StringVal("top-secret"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "str": cty.StringVal("after"), - "password": cty.StringVal("top-secret"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "str": {Type: cty.String, Optional: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "blah" -> (known after apply) - ~ str = "before" -> "after" - # (1 unchanged attribute hidden) - }`, - }, - - // tainted objects - "replace tainted resource": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseTainted, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-AFTER"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "ami"}, - }), - ExpectedOutput: ` # test_instance.example is tainted, so must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - }`, - }, - "force replacement with empty before value": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - "forced": cty.NullVal(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - "forced": cty.StringVal("example"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - "forced": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "forced"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - + forced = "example" # forces replacement - name = "name" - }`, - }, - "force replacement with empty before value legacy": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - "forced": cty.StringVal(""), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - "forced": cty.StringVal("example"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - "forced": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "forced"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - + forced = "example" # forces replacement - name = "name" - }`, - }, - "read during apply because of unknown configuration": { - Action: plans.Read, - ActionReason: plans.ResourceInstanceReadBecauseConfigUnknown, - Mode: addrs.DataResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - }, - }, - ExpectedOutput: ` # data.test_instance.example will be read during apply - # (config refers to values not yet known) - <= data "test_instance" "example" { - name = "name" - }`, - }, - "read during apply because of pending changes to upstream dependency": { - Action: plans.Read, - ActionReason: plans.ResourceInstanceReadBecauseDependencyPending, - Mode: addrs.DataResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - }, - }, - ExpectedOutput: ` # data.test_instance.example will be read during apply - # (depends on a resource or a module with changes pending) - <= data "test_instance" "example" { - name = "name" - }`, - }, - "read during apply for unspecified reason": { - Action: plans.Read, - Mode: addrs.DataResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("name"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - }, - }, - ExpectedOutput: ` # data.test_instance.example will be read during apply - <= data "test_instance" "example" { - name = "name" - }`, - }, - "show all identifying attributes even if unchanged": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "bar": cty.StringVal("bar"), - "foo": cty.StringVal("foo"), - "name": cty.StringVal("alice"), - "tags": cty.MapVal(map[string]cty.Value{ - "name": cty.StringVal("bob"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "bar": cty.StringVal("bar"), - "foo": cty.StringVal("foo"), - "name": cty.StringVal("alice"), - "tags": cty.MapVal(map[string]cty.Value{ - "name": cty.StringVal("bob"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, - "foo": {Type: cty.String, Optional: true}, - "name": {Type: cty.String, Optional: true}, - "tags": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - name = "alice" - tags = { - "name" = "bob" - } - # (2 unchanged attributes hidden) - }`, - }, - } - - runTestCases(t, testCases) -} - -func TestResourceChange_JSON(t *testing.T) { - testCases := map[string]testCase{ - "creation": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{ - "str": "value", - "list":["a","b", 234, true], - "obj": {"key": "val"} - }`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + id = (known after apply) - + json_field = jsonencode( - { - + list = [ - + "a", - + "b", - + 234, - + true, - ] - + obj = { - + key = "val" - } - + str = "value" - } - ) - }`, - }, - "in-place update of object": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value","ccc": 5}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - + bbb = "new_value" - - ccc = 5 - # (1 unchanged attribute hidden) - } - ) - }`, - }, - "in-place update of object with quoted keys": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value", "c:c": "old_value"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": "value", "b:bb": "new_value"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - + "b:bb" = "new_value" - - "c:c" = "old_value" - # (1 unchanged attribute hidden) - } - ) - }`, - }, - "in-place update (from empty tuple)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": []}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": ["value"]}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ aaa = [ - + "value", - ] - } - ) - }`, - }, - "in-place update (to empty tuple)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": ["value"]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": []}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ aaa = [ - - "value", - ] - } - ) - }`, - }, - "in-place update (tuple of different types)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": [42, {"foo":"baz"}, "value"]}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ aaa = [ - 42, - ~ { - ~ foo = "bar" -> "baz" - }, - "value", - ] - } - ) - }`, - }, - "force-new update": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "json_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - + bbb = "new_value" - # (1 unchanged attribute hidden) - } # forces replacement - ) - }`, - }, - "in-place update (whitespace change)": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa":"value", - "bbb":"another"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( # whitespace changes - { - aaa = "value" - bbb = "another" - } - ) - }`, - }, - "force-new update (whitespace change)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"aaa":"value", - "bbb":"another"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "json_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( # whitespace changes force replacement - { - aaa = "value" - bbb = "another" - } - ) - }`, - }, - "creation (empty)": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + id = (known after apply) - + json_field = jsonencode({}) - }`, - }, - "JSON list item removal": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`["first","second","third"]`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`["first","second"]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - # (1 unchanged element hidden) - "second", - - "third", - ] - ) - }`, - }, - "JSON list item addition": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`["first","second"]`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`["first","second","third"]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - # (1 unchanged element hidden) - "second", - + "third", - ] - ) - }`, - }, - "JSON list object addition": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"first":"111"}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"first":"111","second":"222"}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - + second = "222" - # (1 unchanged attribute hidden) - } - ) - }`, - }, - "JSON object with nested list": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{ - "Statement": ["first"] - }`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{ - "Statement": ["first", "second"] - }`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ Statement = [ - "first", - + "second", - ] - } - ) - }`, - }, - "JSON list of objects - adding item": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`[{"one": "111"}]`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - { - one = "111" - }, - + { - + two = "222" - }, - ] - ) - }`, - }, - "JSON list of objects - removing item": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}, {"three": "333"}]`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`[{"one": "111"}, {"three": "333"}]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - { - one = "111" - }, - - { - - two = "222" - }, - { - three = "333" - }, - ] - ) - }`, - }, - "JSON object with list of objects": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"parent":[{"one": "111"}]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"parent":[{"one": "111"}, {"two": "222"}]}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ parent = [ - { - one = "111" - }, - + { - + two = "222" - }, - ] - } - ) - }`, - }, - "JSON object double nested lists": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"parent":[{"another_list": ["111"]}]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`{"parent":[{"another_list": ["111", "222"]}]}`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - ~ parent = [ - ~ { - ~ another_list = [ - "111", - + "222", - ] - }, - ] - } - ) - }`, - }, - "in-place update from object to tuple": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "json_field": cty.StringVal(`["aaa", 42, "something"]`), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "json_field": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - - aaa = [ - - 42, - - { - - foo = "bar" - }, - - "value", - ] - } -> [ - + "aaa", - + 42, - + "something", - ] - ) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_listObject(t *testing.T) { - testCases := map[string]testCase{ - // https://github.com/hashicorp/terraform/issues/30641 - "updating non-identifying attribute": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "accounts": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("1"), - "name": cty.StringVal("production"), - "status": cty.StringVal("ACTIVE"), - }), - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("2"), - "name": cty.StringVal("staging"), - "status": cty.StringVal("ACTIVE"), - }), - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("3"), - "name": cty.StringVal("disaster-recovery"), - "status": cty.StringVal("ACTIVE"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "accounts": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("1"), - "name": cty.StringVal("production"), - "status": cty.StringVal("ACTIVE"), - }), - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("2"), - "name": cty.StringVal("staging"), - "status": cty.StringVal("EXPLODED"), - }), - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("3"), - "name": cty.StringVal("disaster-recovery"), - "status": cty.StringVal("ACTIVE"), - }), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "accounts": { - Type: cty.List(cty.Object(map[string]cty.Type{ - "id": cty.String, - "name": cty.String, - "status": cty.String, - })), - }, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ accounts = [ - { - id = "1" - name = "production" - status = "ACTIVE" - }, - ~ { - id = "2" - name = "staging" - ~ status = "ACTIVE" -> "EXPLODED" - }, - { - id = "3" - name = "disaster-recovery" - status = "ACTIVE" - }, - ] - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_primitiveList(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.NullVal(cty.List(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("new-element"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + list_field = [ - + "new-element", - ] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - first addition": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListValEmpty(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("new-element"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - + "new-element", - ] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - cty.StringVal("ffff"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - cty.StringVal("ffff"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - # (1 unchanged element hidden) - "bbbb", - + "cccc", - "dddd", - # (2 unchanged elements hidden) - ] - # (1 unchanged attribute hidden) - }`, - }, - "force-new update - insertion": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "list_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ # forces replacement - "aaaa", - + "bbbb", - "cccc", - ] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("bbbb"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - - "aaaa", - "bbbb", - - "cccc", - "dddd", - # (1 unchanged element hidden) - ] - # (1 unchanged attribute hidden) - }`, - }, - "creation - empty list": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = "ami-STATIC" - + id = (known after apply) - + list_field = [] - }`, - }, - "in-place update - full to empty": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - - "aaaa", - - "bbbb", - - "cccc", - ] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - null to empty": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.NullVal(cty.List(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + list_field = [] - # (1 unchanged attribute hidden) - }`, - }, - "update to unknown element": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.UnknownVal(cty.String), - cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - "aaaa", - - "bbbb", - + (known after apply), - "cccc", - ] - # (1 unchanged attribute hidden) - }`, - }, - "update - two new unknown elements": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - cty.StringVal("cccc"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - "aaaa", - - "bbbb", - + (known after apply), - + (known after apply), - "cccc", - # (2 unchanged elements hidden) - ] - # (1 unchanged attribute hidden) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_primitiveTuple(t *testing.T) { - testCases := map[string]testCase{ - "in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "tuple_field": cty.TupleVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("dddd"), - cty.StringVal("eeee"), - cty.StringVal("ffff"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "tuple_field": cty.TupleVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - cty.StringVal("eeee"), - cty.StringVal("ffff"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Required: true}, - "tuple_field": {Type: cty.Tuple([]cty.Type{cty.String, cty.String, cty.String, cty.String, cty.String}), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - id = "i-02ae66f368e8518a9" - ~ tuple_field = [ - # (1 unchanged element hidden) - "bbbb", - ~ "dddd" -> "cccc", - "eeee", - # (1 unchanged element hidden) - ] - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_primitiveSet(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.NullVal(cty.Set(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("new-element"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + set_field = [ - + "new-element", - ] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - first insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetValEmpty(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("new-element"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - + "new-element", - ] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - + "bbbb", - # (2 unchanged elements hidden) - ] - # (1 unchanged attribute hidden) - }`, - }, - "force-new update - insertion": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "set_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ # forces replacement - + "bbbb", - # (2 unchanged elements hidden) - ] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("bbbb"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - - "cccc", - # (1 unchanged element hidden) - ] - # (1 unchanged attribute hidden) - }`, - }, - "creation - empty set": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = "ami-STATIC" - + id = (known after apply) - + set_field = [] - }`, - }, - "in-place update - full to empty set": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - - "bbbb", - ] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - null to empty set": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.NullVal(cty.Set(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + set_field = [] - # (1 unchanged attribute hidden) - }`, - }, - "in-place update to unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.UnknownVal(cty.Set(cty.String)), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - - "bbbb", - ] -> (known after apply) - # (1 unchanged attribute hidden) - }`, - }, - "in-place update to unknown element": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.StringVal("bbbb"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "set_field": cty.SetVal([]cty.Value{ - cty.StringVal("aaaa"), - cty.UnknownVal(cty.String), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "set_field": {Type: cty.Set(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "bbbb", - + (known after apply), - # (1 unchanged element hidden) - ] - # (1 unchanged attribute hidden) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_map(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.NullVal(cty.Map(cty.String)), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "new-key": cty.StringVal("new-element"), - "be:ep": cty.StringVal("boop"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + map_field = { - + "be:ep" = "boop" - + "new-key" = "new-element" - } - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - first insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapValEmpty(cty.String), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "new-key": cty.StringVal("new-element"), - "be:ep": cty.StringVal("boop"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - + "be:ep" = "boop" - + "new-key" = "new-element" - } - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "c": cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.StringVal("bbbb"), - "b:b": cty.StringVal("bbbb"), - "c": cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - + "b" = "bbbb" - + "b:b" = "bbbb" - # (2 unchanged elements hidden) - } - # (1 unchanged attribute hidden) - }`, - }, - "force-new update - insertion": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "c": cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.StringVal("bbbb"), - "c": cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "map_field"}, - }), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { # forces replacement - + "b" = "bbbb" - # (2 unchanged elements hidden) - } - # (1 unchanged attribute hidden) - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.StringVal("bbbb"), - "c": cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "b": cty.StringVal("bbbb"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - - "a" = "aaaa" -> null - - "c" = "cccc" -> null - # (1 unchanged element hidden) - } - # (1 unchanged attribute hidden) - }`, - }, - "creation - empty": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapValEmpty(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = "ami-STATIC" - + id = (known after apply) - + map_field = {} - }`, - }, - "update to unknown element": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.StringVal("bbbb"), - "c": cty.StringVal("cccc"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("ami-STATIC"), - "map_field": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("aaaa"), - "b": cty.UnknownVal(cty.String), - "c": cty.StringVal("cccc"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_field": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - ~ "b" = "bbbb" -> (known after apply) - # (2 unchanged elements hidden) - } - # (1 unchanged attribute hidden) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedList(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - equal": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (1 unchanged block hidden) - }`, - }, - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - })}), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - + { - + mount_point = "/var/diska" - + size = "50GB" - }, - ] - id = "i-02ae66f368e8518a9" - - + root_block_device {} - }`, - }, - "in-place update - first insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - + { - + mount_point = "/var/diska" - }, - ] - id = "i-02ae66f368e8518a9" - - + root_block_device { - + volume_type = "gp2" - } - }`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - ~ { - + size = "50GB" - # (1 unchanged attribute hidden) - }, - # (1 unchanged element hidden) - ] - id = "i-02ae66f368e8518a9" - - ~ root_block_device { - + new_field = "new_value" - # (1 unchanged attribute hidden) - } - }`, - }, - "force-new update (inside blocks)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{ - cty.GetAttrStep{Name: "root_block_device"}, - cty.IndexStep{Key: cty.NumberIntVal(0)}, - cty.GetAttrStep{Name: "volume_type"}, - }, - cty.Path{ - cty.GetAttrStep{Name: "disks"}, - cty.IndexStep{Key: cty.NumberIntVal(0)}, - cty.GetAttrStep{Name: "mount_point"}, - }, - ), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - ~ { - ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement - # (1 unchanged attribute hidden) - }, - ] - id = "i-02ae66f368e8518a9" - - ~ root_block_device { - ~ volume_type = "gp2" -> "different" # forces replacement - } - }`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ # forces replacement - ~ { - ~ mount_point = "/var/diska" -> "/var/diskb" - # (1 unchanged attribute hidden) - }, - ] - id = "i-02ae66f368e8518a9" - - ~ root_block_device { # forces replacement - ~ volume_type = "gp2" -> "different" - } - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - ] - id = "i-02ae66f368e8518a9" - - - root_block_device { - - volume_type = "gp2" -> null - } - }`, - }, - "with dynamically-typed attribute": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "block": cty.EmptyTupleVal, - }), - After: cty.ObjectVal(map[string]cty.Value{ - "block": cty.TupleVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - cty.ObjectVal(map[string]cty.Value{ - "attr": cty.True, - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.DynamicPseudoType, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + block { - + attr = "foo" - } - + block { - + attr = true - } - }`, - }, - "in-place sequence update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("x")}), - cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), - cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("z")}), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "attr": { - Type: cty.String, - Required: true, - }, - }, - }, - Nesting: configschema.NestingList, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ list { - ~ attr = "x" -> "y" - } - ~ list { - ~ attr = "y" -> "z" - } - }`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - ] -> (known after apply) - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - }`, - }, - "in-place update - modification": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskc"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("75GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskc"), - "size": cty.StringVal("25GB"), - }), - }), - "root_block_device": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - ~ { - ~ size = "50GB" -> "75GB" - # (1 unchanged attribute hidden) - }, - ~ { - ~ size = "50GB" -> "25GB" - # (1 unchanged attribute hidden) - }, - # (1 unchanged element hidden) - ] - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedSet(t *testing.T) { - testCases := map[string]testCase{ - "creation from null - sensitive set": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.Object(map[string]cty.Type{ - "id": cty.String, - "ami": cty.String, - "disks": cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.Set(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - })), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = (sensitive value) - + id = "i-02ae66f368e8518a9" - - + root_block_device { - + volume_type = "gp2" - } - }`, - }, - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - + { - + mount_point = "/var/diska" - }, - ] - id = "i-02ae66f368e8518a9" - - + root_block_device { - + volume_type = "gp2" - } - }`, - }, - "in-place update - creation - sensitive set": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - # Warning: this attribute value will be marked as sensitive and will not - # display in UI output after applying this change. - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - - + root_block_device { - + volume_type = "gp2" - } - }`, - }, - "in-place update - marking set sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - # Warning: this attribute value will be marked as sensitive and will not - # display in UI output after applying this change. The value is unchanged. - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("100GB"), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("100GB"), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - }, - + { - + mount_point = "/var/diska" - + size = "50GB" - }, - # (1 unchanged element hidden) - ] - id = "i-02ae66f368e8518a9" - - - root_block_device { - - volume_type = "gp2" -> null - } - + root_block_device { - + new_field = "new_value" - + volume_type = "gp2" - } - }`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { # forces replacement - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - + { # forces replacement - + mount_point = "/var/diskb" - + size = "50GB" - }, - ] - id = "i-02ae66f368e8518a9" - - - root_block_device { # forces replacement - - volume_type = "gp2" -> null - } - + root_block_device { # forces replacement - + volume_type = "different" - } - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - "new_field": cty.String, - })), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - ] - id = "i-02ae66f368e8518a9" - - - root_block_device { - - new_field = "new_value" -> null - - volume_type = "gp2" -> null - } - }`, - }, - "in-place update - empty nested sets": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - + disks = [] - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update - null insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - + disks = [ - + { - + mount_point = "/var/diska" - + size = "50GB" - }, - ] - id = "i-02ae66f368e8518a9" - - - root_block_device { - - volume_type = "gp2" -> null - } - + root_block_device { - + new_field = "new_value" - + volume_type = "gp2" - } - }`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = [ - - { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - ] -> (known after apply) - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedMap(t *testing.T) { - testCases := map[string]testCase{ - "creation from null": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.NullVal(cty.String), - "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - }))), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = { - + "disk_a" = { - + mount_point = "/var/diska" - }, - } - + id = "i-02ae66f368e8518a9" - - + root_block_device "a" { - + volume_type = "gp2" - } - }`, - }, - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - + "disk_a" = { - + mount_point = "/var/diska" - }, - } - id = "i-02ae66f368e8518a9" - - + root_block_device "a" { - + volume_type = "gp2" - } - }`, - }, - "in-place update - change attr": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - ~ "disk_a" = { - + size = "50GB" - # (1 unchanged attribute hidden) - }, - } - id = "i-02ae66f368e8518a9" - - ~ root_block_device "a" { - + new_field = "new_value" - # (1 unchanged attribute hidden) - } - }`, - }, - "in-place update - insertion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "disk_2": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/disk2"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.NullVal(cty.String), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - + "disk_2" = { - + mount_point = "/var/disk2" - + size = "50GB" - }, - # (1 unchanged element hidden) - } - id = "i-02ae66f368e8518a9" - - + root_block_device "b" { - + new_field = "new_value" - + volume_type = "gp2" - } - - # (1 unchanged block hidden) - }`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("standard"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("100GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("standard"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(cty.Path{ - cty.GetAttrStep{Name: "root_block_device"}, - cty.IndexStep{Key: cty.StringVal("a")}, - }, - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - ~ "disk_a" = { # forces replacement - ~ size = "50GB" -> "100GB" - # (1 unchanged attribute hidden) - }, - } - id = "i-02ae66f368e8518a9" - - ~ root_block_device "a" { # forces replacement - ~ volume_type = "gp2" -> "different" - } - - # (1 unchanged block hidden) - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - "new_field": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - - "disk_a" = { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - } - id = "i-02ae66f368e8518a9" - - - root_block_device "a" { - - new_field = "new_value" -> null - - volume_type = "gp2" -> null - } - }`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - - "disk_a" = { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - }, - } -> (known after apply) - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - }`, - }, - "in-place update - insertion sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "disks"}, - cty.IndexStep{Key: cty.StringVal("disk_a")}, - cty.GetAttrStep{Name: "mount_point"}, - }, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = { - + "disk_a" = { - + mount_point = (sensitive value) - + size = "50GB" - }, - } - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - }`, - }, - "in-place update - multiple unchanged blocks": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (2 unchanged blocks hidden) - }`, - }, - "in-place update - multiple blocks first changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ root_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - # (1 unchanged block hidden) - }`, - }, - "in-place update - multiple blocks second changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ root_block_device "a" { - ~ volume_type = "gp2" -> "gp3" - } - - # (1 unchanged block hidden) - }`, - }, - "in-place update - multiple blocks changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ root_block_device "a" { - ~ volume_type = "gp2" -> "gp3" - } - ~ root_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - }`, - }, - "in-place update - multiple different unchanged blocks": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (2 unchanged blocks hidden) - }`, - }, - "in-place update - multiple different blocks first changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ leaf_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - # (1 unchanged block hidden) - }`, - }, - "in-place update - multiple different blocks second changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ root_block_device "a" { - ~ volume_type = "gp2" -> "gp3" - } - - # (1 unchanged block hidden) - }`, - }, - "in-place update - multiple different blocks changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ leaf_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - ~ root_block_device "a" { - ~ volume_type = "gp2" -> "gp3" - } - }`, - }, - "in-place update - mixed blocks unchanged": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (4 unchanged blocks hidden) - }`, - }, - "in-place update - mixed blocks changed": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - "root_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - "leaf_block_device": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp3"), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaMultipleBlocks(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - ~ leaf_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - ~ root_block_device "b" { - ~ volume_type = "gp2" -> "gp3" - } - - # (2 unchanged blocks hidden) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedSingle(t *testing.T) { - testCases := map[string]testCase{ - "in-place update - equal": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - # (1 unchanged attribute hidden) - - # (1 unchanged block hidden) - }`, - }, - "in-place update - creation": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - "disk": cty.NullVal(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.NullVal(cty.String), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - + disk = { - + mount_point = "/var/diska" - + size = "50GB" - } - id = "i-02ae66f368e8518a9" - - + root_block_device {} - }`, - }, - "force-new update (inside blocks)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{ - cty.GetAttrStep{Name: "root_block_device"}, - cty.GetAttrStep{Name: "volume_type"}, - }, - cty.Path{ - cty.GetAttrStep{Name: "disk"}, - cty.GetAttrStep{Name: "mount_point"}, - }, - ), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disk = { - ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement - # (1 unchanged attribute hidden) - } - id = "i-02ae66f368e8518a9" - - ~ root_block_device { - ~ volume_type = "gp2" -> "different" # forces replacement - } - }`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diskb"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("different"), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, - cty.Path{cty.GetAttrStep{Name: "disk"}}, - ), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disk = { # forces replacement - ~ mount_point = "/var/diska" -> "/var/diskb" - # (1 unchanged attribute hidden) - } - id = "i-02ae66f368e8518a9" - - ~ root_block_device { # forces replacement - ~ volume_type = "gp2" -> "different" - } - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ - "volume_type": cty.String, - })), - "disk": cty.NullVal(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchema(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - - disk = { - - mount_point = "/var/diska" -> null - - size = "50GB" -> null - } -> null - id = "i-02ae66f368e8518a9" - - - root_block_device { - - volume_type = "gp2" -> null - } - }`, - }, - "with dynamically-typed attribute": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "block": cty.NullVal(cty.Object(map[string]cty.Type{ - "attr": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "block": cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.DynamicPseudoType, Optional: true}, - }, - }, - Nesting: configschema.NestingSingle, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + block { - + attr = "foo" - } - }`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.UnknownVal(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disk = { - ~ mount_point = "/var/diska" -> (known after apply) - ~ size = "50GB" -> (known after apply) - } -> (known after apply) - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - }`, - }, - "in-place update - modification": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disk": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("25GB"), - }), - "root_block_device": cty.ObjectVal(map[string]cty.Value{ - "volume_type": cty.StringVal("gp2"), - "new_field": cty.StringVal("new_value"), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaPlus(configschema.NestingSingle), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disk = { - ~ size = "50GB" -> "25GB" - # (1 unchanged attribute hidden) - } - id = "i-02ae66f368e8518a9" - - # (1 unchanged block hidden) - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedMapSensitiveSchema(t *testing.T) { - testCases := map[string]testCase{ - "creation from null": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.NullVal(cty.String), - "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = (sensitive value) - + id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - }`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("100GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - - disks = (sensitive value) -> null - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.MapVal(map[string]cty.Value{ - "disk_a": cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingMap), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedListSensitiveSchema(t *testing.T) { - testCases := map[string]testCase{ - "creation from null": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.NullVal(cty.String), - "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = (sensitive value) - + id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - }`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("100GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - - disks = (sensitive value) -> null - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingList), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_nestedSetSensitiveSchema(t *testing.T) { - testCases := map[string]testCase{ - "creation from null": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.NullVal(cty.String), - "ami": cty.NullVal(cty.String), - "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - + ami = "ami-AFTER" - + disks = (sensitive value) - + id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - })), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.NullVal(cty.String), - }), - }), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - }`, - }, - "force-new update (whole block)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("100GB"), - }), - }), - }), - RequiredReplace: cty.NewPathSet( - cty.Path{cty.GetAttrStep{Name: "disks"}}, - ), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update - deletion": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - - disks = (sensitive value) -> null - id = "i-02ae66f368e8518a9" - }`, - }, - "in-place update - unknown": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "disks": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "mount_point": cty.StringVal("/var/diska"), - "size": cty.StringVal("50GB"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ - "mount_point": cty.String, - "size": cty.String, - }))), - }), - RequiredReplace: cty.NewPathSet(), - Schema: testSchemaSensitive(configschema.NestingSet), - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - ~ disks = (sensitive value) - id = "i-02ae66f368e8518a9" - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_actionReason(t *testing.T) { - emptySchema := &configschema.Block{} - nullVal := cty.NullVal(cty.EmptyObject) - emptyVal := cty.EmptyObjectVal - - testCases := map[string]testCase{ - "delete for no particular reason": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceChangeNoReason, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be destroyed - - resource "test_instance" "example" {}`, - }, - "delete because of wrong repetition mode (NoKey)": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.NoKey, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be destroyed - # (because resource uses count or for_each) - - resource "test_instance" "example" {}`, - }, - "delete because of wrong repetition mode (IntKey)": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.IntKey(1), - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example[1] will be destroyed - # (because resource does not use count) - - resource "test_instance" "example" {}`, - }, - "delete because of wrong repetition mode (StringKey)": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.StringKey("a"), - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example["a"] will be destroyed - # (because resource does not use for_each) - - resource "test_instance" "example" {}`, - }, - "delete because no resource configuration": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseNoResourceConfig, - ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.NoKey), - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # module.foo.test_instance.example will be destroyed - # (because test_instance.example is not in configuration) - - resource "test_instance" "example" {}`, - }, - "delete because no module": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseNoModule, - ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.IntKey(1)), - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # module.foo[1].test_instance.example will be destroyed - # (because module.foo[1] is not in configuration) - - resource "test_instance" "example" {}`, - }, - "delete because out of range for count": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseCountIndex, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.IntKey(1), - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example[1] will be destroyed - # (because index [1] is out of range for count) - - resource "test_instance" "example" {}`, - }, - "delete because out of range for for_each": { - Action: plans.Delete, - ActionReason: plans.ResourceInstanceDeleteBecauseEachKey, - Mode: addrs.ManagedResourceMode, - InstanceKey: addrs.StringKey("boop"), - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example["boop"] will be destroyed - # (because key ["boop"] is not in for_each map) - - resource "test_instance" "example" {}`, - }, - "replace for no particular reason (delete first)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceChangeNoReason, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" {}`, - }, - "replace for no particular reason (create first)": { - Action: plans.CreateThenDelete, - ActionReason: plans.ResourceInstanceChangeNoReason, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example must be replaced -+/- resource "test_instance" "example" {}`, - }, - "replace by request (delete first)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceByRequest, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be replaced, as requested --/+ resource "test_instance" "example" {}`, - }, - "replace by request (create first)": { - Action: plans.CreateThenDelete, - ActionReason: plans.ResourceInstanceReplaceByRequest, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be replaced, as requested -+/- resource "test_instance" "example" {}`, - }, - "replace because tainted (delete first)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseTainted, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example is tainted, so must be replaced --/+ resource "test_instance" "example" {}`, - }, - "replace because tainted (create first)": { - Action: plans.CreateThenDelete, - ActionReason: plans.ResourceInstanceReplaceBecauseTainted, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example is tainted, so must be replaced -+/- resource "test_instance" "example" {}`, - }, - "replace because cannot update (delete first)": { - Action: plans.DeleteThenCreate, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - // This one has no special message, because the fuller explanation - // typically appears inline as a "# forces replacement" comment. - // (not shown here) - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" {}`, - }, - "replace because cannot update (create first)": { - Action: plans.CreateThenDelete, - ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, - Mode: addrs.ManagedResourceMode, - Before: emptyVal, - After: nullVal, - Schema: emptySchema, - RequiredReplace: cty.NewPathSet(), - // This one has no special message, because the fuller explanation - // typically appears inline as a "# forces replacement" comment. - // (not shown here) - ExpectedOutput: ` # test_instance.example must be replaced -+/- resource "test_instance" "example" {}`, - }, - } - - runTestCases(t, testCases) -} - -func TestResourceChange_sensitiveVariable(t *testing.T) { - testCases := map[string]testCase{ - "creation": { - Action: plans.Create, - Mode: addrs.ManagedResourceMode, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-123"), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("!"), - }), - "nested_block_list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - "another": cty.StringVal("not secret"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - "another": cty.StringVal("not secret"), - }), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - // Nested blocks/sets will mark the whole set/block as sensitive - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_list"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_list": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - "another": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - }, - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - "another": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be created - + resource "test_instance" "example" { - + ami = (sensitive value) - + id = "i-02ae66f368e8518a9" - + list_field = [ - + "hello", - + (sensitive value), - + "!", - ] - + map_key = { - + "breakfast" = 800 - + "dinner" = (sensitive value) - } - + map_whole = (sensitive value) - - + nested_block_list { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - - + nested_block_set { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - }`, - }, - "in-place update - before sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "special": cty.BoolVal(true), - "some_number": cty.NumberIntVal(1), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("!"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "special": cty.BoolVal(false), - "some_number": cty.NumberIntVal(2), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("."), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(1900), - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("cereal"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("changed"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("changed"), - }), - }), - }), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "special"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "special": {Type: cty.Bool, Optional: true}, - "some_number": {Type: cty.Number, Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - }, - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ ami = (sensitive value) - id = "i-02ae66f368e8518a9" - ~ list_field = [ - # (1 unchanged element hidden) - "friends", - - (sensitive value), - + ".", - ] - ~ map_key = { - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ "dinner" = (sensitive value) - # (1 unchanged element hidden) - } - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ map_whole = (sensitive value) - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ some_number = (sensitive value) - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. - ~ special = (sensitive value) - - # Warning: this block will no longer be marked as sensitive - # after applying this change. - ~ nested_block { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - - - nested_block_set { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - + nested_block_set { - + an_attr = "changed" - } - }`, - }, - "in-place update - after sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block_single": cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("original"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("goodbye"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(700), - "dinner": cty.NumberIntVal(2100), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("cereal"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block_single": cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("changed"), - }), - }), - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "tags"}, cty.IndexStep{Key: cty.StringVal("address")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_single"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_single": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSingle, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - id = "i-02ae66f368e8518a9" - ~ list_field = [ - - "hello", - + (sensitive value), - "friends", - ] - ~ map_key = { - ~ "breakfast" = 800 -> 700 - # Warning: this attribute value will be marked as sensitive and will not - # display in UI output after applying this change. - ~ "dinner" = (sensitive value) - } - # Warning: this attribute value will be marked as sensitive and will not - # display in UI output after applying this change. - ~ map_whole = (sensitive value) - - # Warning: this block will be marked as sensitive and will not - # display in UI output after applying this change. - ~ nested_block_single { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - }`, - }, - "in-place update - both sensitive": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block_map": cty.MapVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("original"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("goodbye"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(1800), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("cereal"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block_map": cty.MapVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.UnknownVal(cty.String), - }), - }), - }), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_map": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingMap, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = (sensitive value) - id = "i-02ae66f368e8518a9" - ~ list_field = [ - - (sensitive value), - + (sensitive value), - "friends", - ] - ~ map_key = { - ~ "dinner" = (sensitive value) - # (1 unchanged element hidden) - } - ~ map_whole = (sensitive value) - - ~ nested_block_map "foo" { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - }`, - }, - "in-place update - value unchanged, sensitivity changes": { - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "special": cty.BoolVal(true), - "some_number": cty.NumberIntVal(1), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("!"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "special": cty.BoolVal(true), - "some_number": cty.NumberIntVal(1), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - cty.StringVal("!"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secretval"), - }), - }), - }), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "special"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "special": {Type: cty.Bool, Optional: true}, - "some_number": {Type: cty.Number, Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - }, - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ ami = (sensitive value) - id = "i-02ae66f368e8518a9" - ~ list_field = [ - # (1 unchanged element hidden) - "friends", - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ (sensitive value), - ] - ~ map_key = { - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ "dinner" = (sensitive value) - # (1 unchanged element hidden) - } - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ map_whole = (sensitive value) - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ some_number = (sensitive value) - # Warning: this attribute value will no longer be marked as sensitive - # after applying this change. The value is unchanged. - ~ special = (sensitive value) - - # Warning: this block will no longer be marked as sensitive - # after applying this change. - ~ nested_block { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - - # Warning: this block will no longer be marked as sensitive - # after applying this change. - ~ nested_block_set { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - }`, - }, - "deletion": { - Action: plans.Delete, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "list_field": cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friends"), - }), - "map_key": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.NumberIntVal(800), - "dinner": cty.NumberIntVal(2000), // sensitive key - }), - "map_whole": cty.MapVal(map[string]cty.Value{ - "breakfast": cty.StringVal("pizza"), - "dinner": cty.StringVal("pizza"), - }), - "nested_block": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secret"), - "another": cty.StringVal("not secret"), - }), - }), - "nested_block_set": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secret"), - "another": cty.StringVal("not secret"), - }), - }), - }), - After: cty.NullVal(cty.EmptyObject), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - RequiredReplace: cty.NewPathSet(), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "list_field": {Type: cty.List(cty.String), Optional: true}, - "map_key": {Type: cty.Map(cty.Number), Optional: true}, - "map_whole": {Type: cty.Map(cty.String), Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Optional: true}, - "another": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - ExpectedOutput: ` # test_instance.example will be destroyed - - resource "test_instance" "example" { - - ami = (sensitive value) -> null - - id = "i-02ae66f368e8518a9" -> null - - list_field = [ - - "hello", - - (sensitive value), - ] -> null - - map_key = { - - "breakfast" = 800 - - "dinner" = (sensitive value) - } -> null - - map_whole = (sensitive value) -> null - - - nested_block_set { - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - }`, - }, - "update with sensitive value forcing replacement": { - Action: plans.DeleteThenCreate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - "nested_block_set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("secret"), - }), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - "nested_block_set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "an_attr": cty.StringVal("changed"), - }), - }), - }), - BeforeValMarks: []cty.PathValueMarks{ - { - Path: cty.GetAttrPath("ami"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.GetAttrPath("nested_block_set"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - AfterValMarks: []cty.PathValueMarks{ - { - Path: cty.GetAttrPath("ami"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - { - Path: cty.GetAttrPath("nested_block_set"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested_block_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "an_attr": {Type: cty.String, Required: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - RequiredReplace: cty.NewPathSet( - cty.GetAttrPath("ami"), - cty.GetAttrPath("nested_block_set"), - ), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - - - nested_block_set { # forces replacement - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - + nested_block_set { # forces replacement - # At least one attribute in this block is (or was) sensitive, - # so its contents will not be displayed. - } - }`, - }, - "update with sensitive attribute forcing replacement": { - Action: plans.DeleteThenCreate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-BEFORE"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "ami": cty.StringVal("ami-AFTER"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true, Computed: true, Sensitive: true}, - }, - }, - RequiredReplace: cty.NewPathSet( - cty.GetAttrPath("ami"), - ), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = (sensitive value) # forces replacement - id = "i-02ae66f368e8518a9" - }`, - }, - "update with sensitive nested type attribute forcing replacement": { - Action: plans.DeleteThenCreate, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "conn_info": cty.ObjectVal(map[string]cty.Value{ - "user": cty.StringVal("not-secret"), - "password": cty.StringVal("top-secret"), - }), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-02ae66f368e8518a9"), - "conn_info": cty.ObjectVal(map[string]cty.Value{ - "user": cty.StringVal("not-secret"), - "password": cty.StringVal("new-secret"), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "conn_info": { - NestedType: &configschema.Object{ - Nesting: configschema.NestingSingle, - Attributes: map[string]*configschema.Attribute{ - "user": {Type: cty.String, Optional: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - }, - }, - }, - RequiredReplace: cty.NewPathSet( - cty.GetAttrPath("conn_info"), - cty.GetAttrPath("password"), - ), - ExpectedOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ conn_info = { # forces replacement - ~ password = (sensitive value) - # (1 unchanged attribute hidden) - } - id = "i-02ae66f368e8518a9" - }`, - }, - } - runTestCases(t, testCases) -} - -func TestResourceChange_moved(t *testing.T) { - prevRunAddr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "previous", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - - testCases := map[string]testCase{ - "moved and updated": { - PrevRunAddr: prevRunAddr, - Action: plans.Update, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("12345"), - "foo": cty.StringVal("hello"), - "bar": cty.StringVal("baz"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("12345"), - "foo": cty.StringVal("hello"), - "bar": cty.StringVal("boop"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.example will be updated in-place - # (moved from test_instance.previous) - ~ resource "test_instance" "example" { - ~ bar = "baz" -> "boop" - id = "12345" - # (1 unchanged attribute hidden) - }`, - }, - "moved without changes": { - PrevRunAddr: prevRunAddr, - Action: plans.NoOp, - Mode: addrs.ManagedResourceMode, - Before: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("12345"), - "foo": cty.StringVal("hello"), - "bar": cty.StringVal("baz"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("12345"), - "foo": cty.StringVal("hello"), - "bar": cty.StringVal("baz"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, - }, - }, - RequiredReplace: cty.NewPathSet(), - ExpectedOutput: ` # test_instance.previous has moved to test_instance.example - resource "test_instance" "example" { - id = "12345" - # (2 unchanged attributes hidden) - }`, - }, - } - - runTestCases(t, testCases) -} - -type testCase struct { - Action plans.Action - ActionReason plans.ResourceInstanceChangeActionReason - ModuleInst addrs.ModuleInstance - Mode addrs.ResourceMode - InstanceKey addrs.InstanceKey - DeposedKey states.DeposedKey - Before cty.Value - BeforeValMarks []cty.PathValueMarks - AfterValMarks []cty.PathValueMarks - After cty.Value - Schema *configschema.Block - RequiredReplace cty.PathSet - ExpectedOutput string - PrevRunAddr addrs.AbsResourceInstance -} - -func runTestCases(t *testing.T, testCases map[string]testCase) { - color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - ty := tc.Schema.ImpliedType() - - beforeVal := tc.Before - switch { // Some fixups to make the test cases a little easier to write - case beforeVal.IsNull(): - beforeVal = cty.NullVal(ty) // allow mistyped nulls - case !beforeVal.IsKnown(): - beforeVal = cty.UnknownVal(ty) // allow mistyped unknowns - } - - afterVal := tc.After - switch { // Some fixups to make the test cases a little easier to write - case afterVal.IsNull(): - afterVal = cty.NullVal(ty) // allow mistyped nulls - case !afterVal.IsKnown(): - afterVal = cty.UnknownVal(ty) // allow mistyped unknowns - } - - addr := addrs.Resource{ - Mode: tc.Mode, - Type: "test_instance", - Name: "example", - }.Instance(tc.InstanceKey).Absolute(tc.ModuleInst) - - prevRunAddr := tc.PrevRunAddr - // If no previous run address is given, reuse the current address - // to make initialization easier - if prevRunAddr.Resource.Resource.Type == "" { - prevRunAddr = addr - } - - beforeDynamicValue, err := plans.NewDynamicValue(beforeVal, ty) - if err != nil { - t.Fatalf("failed to create dynamic before value: " + err.Error()) - } - - afterDynamicValue, err := plans.NewDynamicValue(afterVal, ty) - if err != nil { - t.Fatalf("failed to create dynamic after value: " + err.Error()) - } - - src := &plans.ResourceInstanceChangeSrc{ - ChangeSrc: plans.ChangeSrc{ - Action: tc.Action, - Before: beforeDynamicValue, - BeforeValMarks: tc.BeforeValMarks, - After: afterDynamicValue, - AfterValMarks: tc.AfterValMarks, - }, - - Addr: addr, - PrevRunAddr: prevRunAddr, - DeposedKey: tc.DeposedKey, - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ActionReason: tc.ActionReason, - RequiredReplace: tc.RequiredReplace, - } - - tfschemas := &terraform.Schemas{ - Providers: map[addrs.Provider]*providers.Schemas{ - src.ProviderAddr.Provider: { - ResourceTypes: map[string]*configschema.Block{ - src.Addr.Resource.Resource.Type: tc.Schema, - }, - DataSources: map[string]*configschema.Block{ - src.Addr.Resource.Resource.Type: tc.Schema, - }, - }, - }, - } - jsonchanges, err := jsonplan.MarshalResourceChanges([]*plans.ResourceInstanceChangeSrc{src}, tfschemas) - if err != nil { - t.Errorf("failed to marshal resource changes: " + err.Error()) - return - } - - jsonschemas := jsonprovider.MarshalForRenderer(tfschemas) - change := structured.FromJsonChange(jsonchanges[0].Change, attribute_path.AlwaysMatcher()) - renderer := Renderer{Colorize: color} - diff := diff{ - change: jsonchanges[0], - diff: differ.ComputeDiffForBlock(change, jsonschemas[jsonchanges[0].ProviderName].ResourceSchemas[jsonchanges[0].Type].Block), - } - output, _ := renderHumanDiff(renderer, diff, proposedChange) - if diff := cmp.Diff(output, tc.ExpectedOutput); diff != "" { - t.Errorf("wrong output\nexpected:\n%s\nactual:\n%s\ndiff:\n%s\n", tc.ExpectedOutput, output, diff) - } - }) - } -} - -func TestOutputChanges(t *testing.T) { - color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} - - testCases := map[string]struct { - changes []*plans.OutputChangeSrc - output string - }{ - "new output value": { - []*plans.OutputChangeSrc{ - outputChange( - "foo", - cty.NullVal(cty.DynamicPseudoType), - cty.StringVal("bar"), - false, - ), - }, - ` + foo = "bar"`, - }, - "removed output": { - []*plans.OutputChangeSrc{ - outputChange( - "foo", - cty.StringVal("bar"), - cty.NullVal(cty.DynamicPseudoType), - false, - ), - }, - ` - foo = "bar" -> null`, - }, - "single string change": { - []*plans.OutputChangeSrc{ - outputChange( - "foo", - cty.StringVal("bar"), - cty.StringVal("baz"), - false, - ), - }, - ` ~ foo = "bar" -> "baz"`, - }, - "element added to list": { - []*plans.OutputChangeSrc{ - outputChange( - "foo", - cty.ListVal([]cty.Value{ - cty.StringVal("alpha"), - cty.StringVal("beta"), - cty.StringVal("delta"), - cty.StringVal("epsilon"), - }), - cty.ListVal([]cty.Value{ - cty.StringVal("alpha"), - cty.StringVal("beta"), - cty.StringVal("gamma"), - cty.StringVal("delta"), - cty.StringVal("epsilon"), - }), - false, - ), - }, - ` ~ foo = [ - # (1 unchanged element hidden) - "beta", - + "gamma", - "delta", - # (1 unchanged element hidden) - ]`, - }, - "multiple outputs changed, one sensitive": { - []*plans.OutputChangeSrc{ - outputChange( - "a", - cty.NumberIntVal(1), - cty.NumberIntVal(2), - false, - ), - outputChange( - "b", - cty.StringVal("hunter2"), - cty.StringVal("correct-horse-battery-staple"), - true, - ), - outputChange( - "c", - cty.BoolVal(false), - cty.BoolVal(true), - false, - ), - }, - ` ~ a = 1 -> 2 - ~ b = (sensitive value) - ~ c = false -> true`, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - changes := &plans.Changes{ - Outputs: tc.changes, - } - - outputs, err := jsonplan.MarshalOutputChanges(changes) - if err != nil { - t.Fatalf("failed to marshal output changes") - } - - renderer := Renderer{Colorize: color} - diffs := precomputeDiffs(Plan{ - OutputChanges: outputs, - }, plans.NormalMode) - - output := renderHumanDiffOutputs(renderer, diffs.outputs) - if output != tc.output { - t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.output) - } - }) - } -} - -func outputChange(name string, before, after cty.Value, sensitive bool) *plans.OutputChangeSrc { - addr := addrs.AbsOutputValue{ - OutputValue: addrs.OutputValue{Name: name}, - } - - change := &plans.OutputChange{ - Addr: addr, Change: plans.Change{ - Before: before, - After: after, - }, - Sensitive: sensitive, - } - - changeSrc, err := change.Encode() - if err != nil { - panic(fmt.Sprintf("failed to encode change for %s: %s", addr, err)) - } - - return changeSrc -} - -// A basic test schema using a configurable NestingMode for one (NestedType) attribute and one block -func testSchema(nesting configschema.NestingMode) *configschema.Block { - var diskKey = "disks" - if nesting == configschema.NestingSingle { - diskKey = "disk" - } - - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - diskKey: { - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "mount_point": {Type: cty.String, Optional: true}, - "size": {Type: cty.String, Optional: true}, - }, - Nesting: nesting, - }, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "root_block_device": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "volume_type": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - Nesting: nesting, - }, - }, - } -} - -// A basic test schema using a configurable NestingMode for one (NestedType) -// attribute marked sensitive. -func testSchemaSensitive(nesting configschema.NestingMode) *configschema.Block { - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "disks": { - Sensitive: true, - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "mount_point": {Type: cty.String, Optional: true}, - "size": {Type: cty.String, Optional: true}, - }, - Nesting: nesting, - }, - }, - }, - } -} - -func testSchemaMultipleBlocks(nesting configschema.NestingMode) *configschema.Block { - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "disks": { - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "mount_point": {Type: cty.String, Optional: true}, - "size": {Type: cty.String, Optional: true}, - }, - Nesting: nesting, - }, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "root_block_device": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "volume_type": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - Nesting: nesting, - }, - "leaf_block_device": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "volume_type": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - Nesting: nesting, - }, - }, - } -} - -// similar to testSchema with the addition of a "new_field" block -func testSchemaPlus(nesting configschema.NestingMode) *configschema.Block { - var diskKey = "disks" - if nesting == configschema.NestingSingle { - diskKey = "disk" - } - - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - diskKey: { - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "mount_point": {Type: cty.String, Optional: true}, - "size": {Type: cty.String, Optional: true}, - }, - Nesting: nesting, - }, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "root_block_device": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "volume_type": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "new_field": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - Nesting: nesting, - }, - }, - } -} diff --git a/internal/command/jsonformat/state.go b/internal/command/jsonformat/state.go deleted file mode 100644 index a86838ff16c8..000000000000 --- a/internal/command/jsonformat/state.go +++ /dev/null @@ -1,108 +0,0 @@ -package jsonformat - -import ( - "sort" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/command/jsonformat/differ" - "github.com/hashicorp/terraform/internal/command/jsonformat/structured" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/command/jsonstate" -) - -type State struct { - StateFormatVersion string `json:"state_format_version"` - RootModule jsonstate.Module `json:"root"` - RootModuleOutputs map[string]jsonstate.Output `json:"root_module_outputs"` - - ProviderFormatVersion string `json:"provider_format_version"` - ProviderSchemas map[string]*jsonprovider.Provider `json:"provider_schemas"` -} - -func (state State) Empty() bool { - return len(state.RootModuleOutputs) == 0 && len(state.RootModule.Resources) == 0 && len(state.RootModule.ChildModules) == 0 -} - -func (state State) GetSchema(resource jsonstate.Resource) *jsonprovider.Schema { - switch resource.Mode { - case jsonstate.ManagedResourceMode: - return state.ProviderSchemas[resource.ProviderName].ResourceSchemas[resource.Type] - case jsonstate.DataResourceMode: - return state.ProviderSchemas[resource.ProviderName].DataSourceSchemas[resource.Type] - default: - panic("found unrecognized resource mode: " + resource.Mode) - } -} - -func (state State) renderHumanStateModule(renderer Renderer, module jsonstate.Module, opts computed.RenderHumanOpts, first bool) { - if len(module.Resources) > 0 && !first { - renderer.Streams.Println() - } - - for _, resource := range module.Resources { - - if !first { - renderer.Streams.Println() - } - - if first { - first = false - } - - if len(resource.DeposedKey) > 0 { - renderer.Streams.Printf("# %s: (deposed object %s)", resource.Address, resource.DeposedKey) - } else if resource.Tainted { - renderer.Streams.Printf("# %s: (tainted)", resource.Address) - } else { - renderer.Streams.Printf("# %s:", resource.Address) - } - - renderer.Streams.Println() - - schema := state.GetSchema(resource) - switch resource.Mode { - case jsonstate.ManagedResourceMode: - change := structured.FromJsonResource(resource) - renderer.Streams.Printf("resource %q %q %s", resource.Type, resource.Name, differ.ComputeDiffForBlock(change, schema.Block).RenderHuman(0, opts)) - case jsonstate.DataResourceMode: - change := structured.FromJsonResource(resource) - renderer.Streams.Printf("data %q %q %s", resource.Type, resource.Name, differ.ComputeDiffForBlock(change, schema.Block).RenderHuman(0, opts)) - default: - panic("found unrecognized resource mode: " + resource.Mode) - } - - renderer.Streams.Println() - } - - for _, child := range module.ChildModules { - state.renderHumanStateModule(renderer, child, opts, first) - } -} - -func (state State) renderHumanStateOutputs(renderer Renderer, opts computed.RenderHumanOpts) { - - if len(state.RootModuleOutputs) > 0 { - renderer.Streams.Printf("\n\nOutputs:\n\n") - - var keys []string - for key := range state.RootModuleOutputs { - keys = append(keys, key) - } - sort.Strings(keys) - - for _, key := range keys { - output := state.RootModuleOutputs[key] - change := structured.FromJsonOutput(output) - ctype, err := ctyjson.UnmarshalType(output.Type) - if err != nil { - // We can actually do this without the type, so even if we fail - // to work out the type let's just render this anyway. - renderer.Streams.Printf("%s = %s\n", key, differ.ComputeDiffForOutput(change).RenderHuman(0, opts)) - } else { - renderer.Streams.Printf("%s = %s\n", key, differ.ComputeDiffForType(change, ctype).RenderHuman(0, opts)) - } - } - } -} diff --git a/internal/command/jsonformat/state_test.go b/internal/command/jsonformat/state_test.go deleted file mode 100644 index 1728f63a5a0a..000000000000 --- a/internal/command/jsonformat/state_test.go +++ /dev/null @@ -1,437 +0,0 @@ -package jsonformat - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/mitchellh/colorstring" - - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/command/jsonstate" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terminal" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" -) - -func TestState(t *testing.T) { - color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} - - tests := []struct { - State *format.StateOpts - Want string - }{ - { - &format.StateOpts{ - State: &states.State{}, - Color: color, - Schemas: &terraform.Schemas{}, - }, - "The state file is empty. No resources are represented.\n", - }, - { - &format.StateOpts{ - State: basicState(t), - Color: color, - Schemas: testSchemas(), - }, - basicStateOutput, - }, - { - &format.StateOpts{ - State: nestedState(t), - Color: color, - Schemas: testSchemas(), - }, - nestedStateOutput, - }, - { - &format.StateOpts{ - State: deposedState(t), - Color: color, - Schemas: testSchemas(), - }, - deposedNestedStateOutput, - }, - { - &format.StateOpts{ - State: onlyDeposedState(t), - Color: color, - Schemas: testSchemas(), - }, - onlyDeposedOutput, - }, - { - &format.StateOpts{ - State: stateWithMoreOutputs(t), - Color: color, - Schemas: testSchemas(), - }, - stateWithMoreOutputsOutput, - }, - } - - for i, tt := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - - root, outputs, err := jsonstate.MarshalForRenderer(&statefile.File{ - State: tt.State.State, - }, tt.State.Schemas) - - if err != nil { - t.Errorf("found err: %v", err) - return - } - - streams, done := terminal.StreamsForTesting(t) - renderer := Renderer{ - Colorize: color, - Streams: streams, - } - - renderer.RenderHumanState(State{ - StateFormatVersion: jsonstate.FormatVersion, - RootModule: root, - RootModuleOutputs: outputs, - ProviderFormatVersion: jsonprovider.FormatVersion, - ProviderSchemas: jsonprovider.MarshalForRenderer(tt.State.Schemas), - }) - - result := done(t).All() - if diff := cmp.Diff(result, tt.Want); diff != "" { - t.Errorf("wrong output\nexpected:\n%s\nactual:\n%s\ndiff:\n%s\n", tt.Want, result, diff) - } - }) - } -} - -func testProvider() *terraform.MockProvider { - p := new(terraform.MockProvider) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{NewState: req.PriorState} - } - - p.GetProviderSchemaResponse = testProviderSchema() - - return p -} - -func testProviderSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "test_resource": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "woozles": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "test_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } -} - -func testSchemas() *terraform.Schemas { - provider := testProvider() - return &terraform.Schemas{ - Providers: map[addrs.Provider]*terraform.ProviderSchema{ - addrs.NewDefaultProvider("test"): provider.ProviderSchema(), - }, - } -} - -const basicStateOutput = `# data.test_data_source.data: -data "test_data_source" "data" { - compute = "sure" -} - -# test_resource.baz[0]: -resource "test_resource" "baz" { - woozles = "confuzles" -} - - -Outputs: - -bar = "bar value" -` - -const nestedStateOutput = `# test_resource.baz[0]: -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -} -` - -const deposedNestedStateOutput = `# test_resource.baz[0]: -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -} - -# test_resource.baz[0]: (deposed object 1234) -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -} -` - -const onlyDeposedOutput = `# test_resource.baz[0]: (deposed object 1234) -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -} - -# test_resource.baz[0]: (deposed object 5678) -resource "test_resource" "baz" { - woozles = "confuzles" - - nested { - value = "42" - } -} -` - -const stateWithMoreOutputsOutput = `# test_resource.baz[0]: -resource "test_resource" "baz" { - woozles = "confuzles" -} - - -Outputs: - -bool_var = true -int_var = 42 -map_var = { - "first" = "foo" - "second" = "bar" -} -sensitive_var = (sensitive value) -string_var = "string value" -` - -func basicState(t *testing.T) *states.State { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetLocalValue("foo", cty.StringVal("foo value")) - rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 0, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_data_source", - Name: "data", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 0, - AttrsJSON: []byte(`{"compute":"sure"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func stateWithMoreOutputs(t *testing.T) *states.State { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetOutputValue("string_var", cty.StringVal("string value"), false) - rootModule.SetOutputValue("int_var", cty.NumberIntVal(42), false) - rootModule.SetOutputValue("bool_var", cty.BoolVal(true), false) - rootModule.SetOutputValue("sensitive_var", cty.StringVal("secret!!!"), true) - rootModule.SetOutputValue("map_var", cty.MapVal(map[string]cty.Value{ - "first": cty.StringVal("foo"), - "second": cty.StringVal("bar"), - }), false) - - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 0, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func nestedState(t *testing.T) *states.State { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 0, - AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -func deposedState(t *testing.T) *states.State { - state := nestedState(t) - rootModule := state.RootModule() - rootModule.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - states.DeposedKey("1234"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 0, - AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} - -// replicate a corrupt resource where only a deposed exists -func onlyDeposedState(t *testing.T) *states.State { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - states.DeposedKey("1234"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 0, - AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - rootModule.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "baz", - }.Instance(addrs.IntKey(0)), - states.DeposedKey("5678"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 0, - AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - return state -} diff --git a/internal/command/jsonformat/structured/change.go b/internal/command/jsonformat/structured/change.go deleted file mode 100644 index 3f06301f928e..000000000000 --- a/internal/command/jsonformat/structured/change.go +++ /dev/null @@ -1,277 +0,0 @@ -package structured - -import ( - "encoding/json" - "reflect" - - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" - "github.com/hashicorp/terraform/internal/command/jsonplan" - "github.com/hashicorp/terraform/internal/command/jsonstate" - viewsjson "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/plans" -) - -// Change contains the unmarshalled generic interface{} types that are output by -// the JSON functions in the various json packages (such as jsonplan and -// jsonprovider). -// -// A Change can be converted into a computed.Diff, ready for rendering, with the -// ComputeDiffForAttribute, ComputeDiffForOutput, and ComputeDiffForBlock -// functions. -// -// The Before and After fields are actually go-cty values, but we cannot convert -// them directly because of the Terraform Cloud redacted endpoint. The redacted -// endpoint turns sensitive values into strings regardless of their types. -// Because of this, we cannot just do a direct conversion using the ctyjson -// package. We would have to iterate through the schema first, find the -// sensitive values and their mapped types, update the types inside the schema -// to strings, and then go back and do the overall conversion. This isn't -// including any of the more complicated parts around what happens if something -// was sensitive before and isn't sensitive after or vice versa. This would mean -// the type would need to change between the before and after value. It is in -// fact just easier to iterate through the values as generic JSON interfaces. -type Change struct { - - // BeforeExplicit matches AfterExplicit except references the Before value. - BeforeExplicit bool - - // AfterExplicit refers to whether the After value is explicit or - // implicit. It is explicit if it has been specified by the user, and - // implicit if it has been set as a consequence of other changes. - // - // For example, explicitly setting a value to null in a list should result - // in After being null and AfterExplicit being true. In comparison, - // removing an element from a list should also result in After being null - // and AfterExplicit being false. Without the explicit information our - // functions would not be able to tell the difference between these two - // cases. - AfterExplicit bool - - // Before contains the value before the proposed change. - // - // The type of the value should be informed by the schema and cast - // appropriately when needed. - Before interface{} - - // After contains the value after the proposed change. - // - // The type of the value should be informed by the schema and cast - // appropriately when needed. - After interface{} - - // Unknown describes whether the After value is known or unknown at the time - // of the plan. In practice, this means the after value should be rendered - // simply as `(known after apply)`. - // - // The concrete value could be a boolean describing whether the entirety of - // the After value is unknown, or it could be a list or a map depending on - // the schema describing whether specific elements or attributes within the - // value are unknown. - Unknown interface{} - - // BeforeSensitive matches Unknown, but references whether the Before value - // is sensitive. - BeforeSensitive interface{} - - // AfterSensitive matches Unknown, but references whether the After value is - // sensitive. - AfterSensitive interface{} - - // ReplacePaths contains a set of paths that point to attributes/elements - // that are causing the overall resource to be replaced rather than simply - // updated. - ReplacePaths attribute_path.Matcher - - // RelevantAttributes contains a set of paths that point attributes/elements - // that we should display. Any element/attribute not matched by this Matcher - // should be skipped. - RelevantAttributes attribute_path.Matcher -} - -// FromJsonChange unmarshals the raw []byte values in the jsonplan.Change -// structs into generic interface{} types that can be reasoned about. -func FromJsonChange(change jsonplan.Change, relevantAttributes attribute_path.Matcher) Change { - return Change{ - Before: unmarshalGeneric(change.Before), - After: unmarshalGeneric(change.After), - Unknown: unmarshalGeneric(change.AfterUnknown), - BeforeSensitive: unmarshalGeneric(change.BeforeSensitive), - AfterSensitive: unmarshalGeneric(change.AfterSensitive), - ReplacePaths: attribute_path.Parse(change.ReplacePaths, false), - RelevantAttributes: relevantAttributes, - } -} - -// FromJsonResource unmarshals the raw values in the jsonstate.Resource structs -// into generic interface{} types that can be reasoned about. -func FromJsonResource(resource jsonstate.Resource) Change { - return Change{ - // We model resource formatting as NoOps. - Before: unwrapAttributeValues(resource.AttributeValues), - After: unwrapAttributeValues(resource.AttributeValues), - - // We have some sensitive values, but we don't have any unknown values. - Unknown: false, - BeforeSensitive: unmarshalGeneric(resource.SensitiveValues), - AfterSensitive: unmarshalGeneric(resource.SensitiveValues), - - // We don't display replacement data for resources, and all attributes - // are relevant. - ReplacePaths: attribute_path.Empty(false), - RelevantAttributes: attribute_path.AlwaysMatcher(), - } -} - -// FromJsonOutput unmarshals the raw values in the jsonstate.Output structs into -// generic interface{} types that can be reasoned about. -func FromJsonOutput(output jsonstate.Output) Change { - return Change{ - // We model resource formatting as NoOps. - Before: unmarshalGeneric(output.Value), - After: unmarshalGeneric(output.Value), - - // We have some sensitive values, but we don't have any unknown values. - Unknown: false, - BeforeSensitive: output.Sensitive, - AfterSensitive: output.Sensitive, - - // We don't display replacement data for resources, and all attributes - // are relevant. - ReplacePaths: attribute_path.Empty(false), - RelevantAttributes: attribute_path.AlwaysMatcher(), - } -} - -// FromJsonViewsOutput unmarshals the raw values in the viewsjson.Output structs into -// generic interface{} types that can be reasoned about. -func FromJsonViewsOutput(output viewsjson.Output) Change { - return Change{ - // We model resource formatting as NoOps. - Before: unmarshalGeneric(output.Value), - After: unmarshalGeneric(output.Value), - - // We have some sensitive values, but we don't have any unknown values. - Unknown: false, - BeforeSensitive: output.Sensitive, - AfterSensitive: output.Sensitive, - - // We don't display replacement data for resources, and all attributes - // are relevant. - ReplacePaths: attribute_path.Empty(false), - RelevantAttributes: attribute_path.AlwaysMatcher(), - } -} - -// CalculateAction does a very simple analysis to make the best guess at the -// action this change describes. For complex types such as objects, maps, lists, -// or sets it is likely more efficient to work out the action directly instead -// of relying on this function. -func (change Change) CalculateAction() plans.Action { - if (change.Before == nil && !change.BeforeExplicit) && (change.After != nil || change.AfterExplicit) { - return plans.Create - } - if (change.After == nil && !change.AfterExplicit) && (change.Before != nil || change.BeforeExplicit) { - return plans.Delete - } - - if reflect.DeepEqual(change.Before, change.After) && change.AfterExplicit == change.BeforeExplicit && change.IsAfterSensitive() == change.IsBeforeSensitive() { - return plans.NoOp - } - - return plans.Update -} - -// GetDefaultActionForIteration is used to guess what the change could be for -// complex attributes (collections and objects) and blocks. -// -// You can't really tell the difference between a NoOp and an Update just by -// looking at the attribute itself as you need to inspect the children. -// -// This function returns a Delete or a Create action if the before or after -// values were null, and returns a NoOp for all other cases. It should be used -// in conjunction with compareActions to calculate the actual action based on -// the actions of the children. -func (change Change) GetDefaultActionForIteration() plans.Action { - if change.Before == nil && change.After == nil { - return plans.NoOp - } - - if change.Before == nil { - return plans.Create - } - if change.After == nil { - return plans.Delete - } - return plans.NoOp -} - -// AsNoOp returns the current change as if it is a NoOp operation. -// -// Basically it replaces all the after values with the before values. -func (change Change) AsNoOp() Change { - return Change{ - BeforeExplicit: change.BeforeExplicit, - AfterExplicit: change.BeforeExplicit, - Before: change.Before, - After: change.Before, - Unknown: false, - BeforeSensitive: change.BeforeSensitive, - AfterSensitive: change.BeforeSensitive, - ReplacePaths: change.ReplacePaths, - RelevantAttributes: change.RelevantAttributes, - } -} - -// AsDelete returns the current change as if it is a Delete operation. -// -// Basically it replaces all the after values with nil or false. -func (change Change) AsDelete() Change { - return Change{ - BeforeExplicit: change.BeforeExplicit, - AfterExplicit: false, - Before: change.Before, - After: nil, - Unknown: nil, - BeforeSensitive: change.BeforeSensitive, - AfterSensitive: nil, - ReplacePaths: change.ReplacePaths, - RelevantAttributes: change.RelevantAttributes, - } -} - -// AsCreate returns the current change as if it is a Create operation. -// -// Basically it replaces all the before values with nil or false. -func (change Change) AsCreate() Change { - return Change{ - BeforeExplicit: false, - AfterExplicit: change.AfterExplicit, - Before: nil, - After: change.After, - Unknown: change.Unknown, - BeforeSensitive: nil, - AfterSensitive: change.AfterSensitive, - ReplacePaths: change.ReplacePaths, - RelevantAttributes: change.RelevantAttributes, - } -} - -func unmarshalGeneric(raw json.RawMessage) interface{} { - if raw == nil { - return nil - } - - var out interface{} - if err := json.Unmarshal(raw, &out); err != nil { - panic("unrecognized json type: " + err.Error()) - } - return out -} - -func unwrapAttributeValues(values jsonstate.AttributeValues) map[string]interface{} { - out := make(map[string]interface{}) - for key, value := range values { - out[key] = unmarshalGeneric(value) - } - return out -} diff --git a/internal/command/jsonformat/structured/map.go b/internal/command/jsonformat/structured/map.go deleted file mode 100644 index 0c8268ad6beb..000000000000 --- a/internal/command/jsonformat/structured/map.go +++ /dev/null @@ -1,160 +0,0 @@ -package structured - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" -) - -// ChangeMap is a Change that represents a Map or an Object type, and has -// converted the relevant interfaces into maps for easier access. -type ChangeMap struct { - // Before contains the value before the proposed change. - Before map[string]interface{} - - // After contains the value after the proposed change. - After map[string]interface{} - - // Unknown contains the unknown status of any elements/attributes of this - // map/object. - Unknown map[string]interface{} - - // BeforeSensitive contains the before sensitive status of any - // elements/attributes of this map/object. - BeforeSensitive map[string]interface{} - - // AfterSensitive contains the after sensitive status of any - // elements/attributes of this map/object. - AfterSensitive map[string]interface{} - - // ReplacePaths matches the same attributes in Change exactly. - ReplacePaths attribute_path.Matcher - - // RelevantAttributes matches the same attributes in Change exactly. - RelevantAttributes attribute_path.Matcher -} - -// AsMap converts the Change into an object or map representation by converting -// the internal Before, After, Unknown, BeforeSensitive, and AfterSensitive -// data structures into generic maps. -func (change Change) AsMap() ChangeMap { - return ChangeMap{ - Before: genericToMap(change.Before), - After: genericToMap(change.After), - Unknown: genericToMap(change.Unknown), - BeforeSensitive: genericToMap(change.BeforeSensitive), - AfterSensitive: genericToMap(change.AfterSensitive), - ReplacePaths: change.ReplacePaths, - RelevantAttributes: change.RelevantAttributes, - } -} - -// GetChild safely packages up a Change object for the given child, handling -// all the cases where the data might be null or a static boolean. -func (m ChangeMap) GetChild(key string) Change { - before, beforeExplicit := getFromGenericMap(m.Before, key) - after, afterExplicit := getFromGenericMap(m.After, key) - unknown, _ := getFromGenericMap(m.Unknown, key) - beforeSensitive, _ := getFromGenericMap(m.BeforeSensitive, key) - afterSensitive, _ := getFromGenericMap(m.AfterSensitive, key) - - return Change{ - BeforeExplicit: beforeExplicit, - AfterExplicit: afterExplicit, - Before: before, - After: after, - Unknown: unknown, - BeforeSensitive: beforeSensitive, - AfterSensitive: afterSensitive, - ReplacePaths: m.ReplacePaths.GetChildWithKey(key), - RelevantAttributes: m.RelevantAttributes.GetChildWithKey(key), - } -} - -// ExplicitKeys returns the keys in the Before and After, as opposed to AllKeys -// which also includes keys from the additional meta structures (like the -// sensitive and unknown values). -// -// This function is useful for processing nested attributes and repeated blocks -// where the unknown and sensitive structs contain information about the actual -// attributes, while the before and after structs hold the actual nested values. -func (m ChangeMap) ExplicitKeys() []string { - keys := make(map[string]bool) - for before := range m.Before { - if _, ok := keys[before]; ok { - continue - } - keys[before] = true - } - for after := range m.After { - if _, ok := keys[after]; ok { - continue - } - keys[after] = true - } - - var dedupedKeys []string - for key := range keys { - dedupedKeys = append(dedupedKeys, key) - } - return dedupedKeys -} - -// AllKeys returns all the possible keys for this map. The keys for the map are -// potentially hidden and spread across multiple internal data structures and -// so this function conveniently packages them up. -func (m ChangeMap) AllKeys() []string { - keys := make(map[string]bool) - for before := range m.Before { - if _, ok := keys[before]; ok { - continue - } - keys[before] = true - } - for after := range m.After { - if _, ok := keys[after]; ok { - continue - } - keys[after] = true - } - for unknown := range m.Unknown { - if _, ok := keys[unknown]; ok { - continue - } - keys[unknown] = true - } - for sensitive := range m.AfterSensitive { - if _, ok := keys[sensitive]; ok { - continue - } - keys[sensitive] = true - } - for sensitive := range m.BeforeSensitive { - if _, ok := keys[sensitive]; ok { - continue - } - keys[sensitive] = true - } - - var dedupedKeys []string - for key := range keys { - dedupedKeys = append(dedupedKeys, key) - } - return dedupedKeys -} - -func getFromGenericMap(generic map[string]interface{}, key string) (interface{}, bool) { - if generic == nil { - return nil, false - } - - if child, ok := generic[key]; ok { - return child, ok - } - return nil, false -} - -func genericToMap(generic interface{}) map[string]interface{} { - if concrete, ok := generic.(map[string]interface{}); ok { - return concrete - } - return nil -} diff --git a/internal/command/jsonformat/structured/sensitive.go b/internal/command/jsonformat/structured/sensitive.go deleted file mode 100644 index 57e2ff7cf7e8..000000000000 --- a/internal/command/jsonformat/structured/sensitive.go +++ /dev/null @@ -1,89 +0,0 @@ -package structured - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" - "github.com/hashicorp/terraform/internal/plans" -) - -type ProcessSensitiveInner func(change Change) computed.Diff -type CreateSensitiveDiff func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff - -func (change Change) IsBeforeSensitive() bool { - if sensitive, ok := change.BeforeSensitive.(bool); ok { - return sensitive - } - return false -} - -func (change Change) IsAfterSensitive() bool { - if sensitive, ok := change.AfterSensitive.(bool); ok { - return sensitive - } - return false -} - -// CheckForSensitive is a helper function that handles all common functionality -// for processing a sensitive value. -// -// It returns the computed sensitive diff and true if this value was sensitive -// and needs to be rendered as such, otherwise it returns the second return -// value as false and the first value can be discarded. -// -// The actual processing of sensitive values happens within the -// ProcessSensitiveInner and CreateSensitiveDiff functions. Callers should -// implement these functions as appropriate when using this function. -// -// The ProcessSensitiveInner function should simply return a computed.Diff for -// the provided Change. The provided Change will be the same as the original -// change but with the sensitive metadata removed. The new inner diff is then -// passed into the actual CreateSensitiveDiff function which should return the -// actual sensitive diff. -// -// We include the inner change into the sensitive diff as a way to let the -// sensitive renderer have as much information as possible, while still letting -// it do the actual rendering. -func (change Change) CheckForSensitive(processInner ProcessSensitiveInner, createDiff CreateSensitiveDiff) (computed.Diff, bool) { - beforeSensitive := change.IsBeforeSensitive() - afterSensitive := change.IsAfterSensitive() - - if !beforeSensitive && !afterSensitive { - return computed.Diff{}, false - } - - // We are still going to give the change the contents of the actual change. - // So we create a new Change with everything matching the current value, - // except for the sensitivity. - // - // The change can choose what to do with this information, in most cases - // it will just be ignored in favour of printing `(sensitive value)`. - - value := Change{ - BeforeExplicit: change.BeforeExplicit, - AfterExplicit: change.AfterExplicit, - Before: change.Before, - After: change.After, - Unknown: change.Unknown, - BeforeSensitive: false, - AfterSensitive: false, - ReplacePaths: change.ReplacePaths, - RelevantAttributes: change.RelevantAttributes, - } - - inner := processInner(value) - - action := inner.Action - sensitiveStatusChanged := beforeSensitive != afterSensitive - - // nullNoOp is a stronger NoOp, where not only is there no change happening - // but the before and after values are not explicitly set and are both - // null. This will override even the sensitive state changing. - nullNoOp := change.Before == nil && !change.BeforeExplicit && change.After == nil && !change.AfterExplicit - - if action == plans.NoOp && sensitiveStatusChanged && !nullNoOp { - // Let's override this, since it means the sensitive status has changed - // rather than the actual content of the value. - action = plans.Update - } - - return createDiff(inner, beforeSensitive, afterSensitive, action), true -} diff --git a/internal/command/jsonformat/structured/slice.go b/internal/command/jsonformat/structured/slice.go deleted file mode 100644 index ae7467d60423..000000000000 --- a/internal/command/jsonformat/structured/slice.go +++ /dev/null @@ -1,91 +0,0 @@ -package structured - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/structured/attribute_path" -) - -// ChangeSlice is a Change that represents a Tuple, Set, or List type, and has -// converted the relevant interfaces into slices for easier access. -type ChangeSlice struct { - // Before contains the value before the proposed change. - Before []interface{} - - // After contains the value after the proposed change. - After []interface{} - - // Unknown contains the unknown status of any elements of this list/set. - Unknown []interface{} - - // BeforeSensitive contains the before sensitive status of any elements of - //this list/set. - BeforeSensitive []interface{} - - // AfterSensitive contains the after sensitive status of any elements of - //this list/set. - AfterSensitive []interface{} - - // ReplacePaths matches the same attributes in Change exactly. - ReplacePaths attribute_path.Matcher - - // RelevantAttributes matches the same attributes in Change exactly. - RelevantAttributes attribute_path.Matcher -} - -// AsSlice converts the Change into a slice representation by converting the -// internal Before, After, Unknown, BeforeSensitive, and AfterSensitive data -// structures into generic slices. -func (change Change) AsSlice() ChangeSlice { - return ChangeSlice{ - Before: genericToSlice(change.Before), - After: genericToSlice(change.After), - Unknown: genericToSlice(change.Unknown), - BeforeSensitive: genericToSlice(change.BeforeSensitive), - AfterSensitive: genericToSlice(change.AfterSensitive), - ReplacePaths: change.ReplacePaths, - RelevantAttributes: change.RelevantAttributes, - } -} - -// GetChild safely packages up a Change object for the given child, handling -// all the cases where the data might be null or a static boolean. -func (s ChangeSlice) GetChild(beforeIx, afterIx int) Change { - before, beforeExplicit := getFromGenericSlice(s.Before, beforeIx) - after, afterExplicit := getFromGenericSlice(s.After, afterIx) - unknown, _ := getFromGenericSlice(s.Unknown, afterIx) - beforeSensitive, _ := getFromGenericSlice(s.BeforeSensitive, beforeIx) - afterSensitive, _ := getFromGenericSlice(s.AfterSensitive, afterIx) - - mostRelevantIx := beforeIx - if beforeIx < 0 || beforeIx >= len(s.Before) { - mostRelevantIx = afterIx - } - - return Change{ - BeforeExplicit: beforeExplicit, - AfterExplicit: afterExplicit, - Before: before, - After: after, - Unknown: unknown, - BeforeSensitive: beforeSensitive, - AfterSensitive: afterSensitive, - ReplacePaths: s.ReplacePaths.GetChildWithIndex(mostRelevantIx), - RelevantAttributes: s.RelevantAttributes.GetChildWithIndex(mostRelevantIx), - } -} - -func getFromGenericSlice(generic []interface{}, ix int) (interface{}, bool) { - if generic == nil { - return nil, false - } - if ix < 0 || ix >= len(generic) { - return nil, false - } - return generic[ix], true -} - -func genericToSlice(generic interface{}) []interface{} { - if concrete, ok := generic.([]interface{}); ok { - return concrete - } - return nil -} diff --git a/internal/command/jsonformat/structured/unknown.go b/internal/command/jsonformat/structured/unknown.go deleted file mode 100644 index 8a3ad386bc6e..000000000000 --- a/internal/command/jsonformat/structured/unknown.go +++ /dev/null @@ -1,62 +0,0 @@ -package structured - -import ( - "github.com/hashicorp/terraform/internal/command/jsonformat/computed" -) - -type ProcessUnknown func(current Change) computed.Diff -type ProcessUnknownWithBefore func(current Change, before Change) computed.Diff - -func (change Change) IsUnknown() bool { - if unknown, ok := change.Unknown.(bool); ok { - return unknown - } - return false -} - -// CheckForUnknown is a helper function that handles all common functionality -// for processing an unknown value. -// -// It returns the computed unknown diff and true if this value was unknown and -// needs to be rendered as such, otherwise it returns the second return value as -// false and the first return value should be discarded. -// -// The actual processing of unknown values happens in the ProcessUnknown and -// ProcessUnknownWithBefore functions. If a value is unknown and is being -// created, the ProcessUnknown function is called and the caller should decide -// how to create the unknown value. If a value is being updated the -// ProcessUnknownWithBefore function is called and the function provides the -// before value as if it is being deleted for the caller to handle. Note that -// values being deleted will never be marked as unknown so this case isn't -// handled. -// -// The childUnknown argument is meant to allow callers with extra information -// about the type being processed to provide a list of known children that might -// not be present in the before or after values. These values will be propagated -// as the unknown values in the before value should it be needed. -func (change Change) CheckForUnknown(childUnknown interface{}, process ProcessUnknown, processBefore ProcessUnknownWithBefore) (computed.Diff, bool) { - unknown := change.IsUnknown() - - if !unknown { - return computed.Diff{}, false - } - - // No matter what we do here, we want to treat the after value as explicit. - // This is because it is going to be null in the value, and we don't want - // the functions in this package to assume this means it has been deleted. - change.AfterExplicit = true - - if change.Before == nil { - return process(change), true - } - - // If we get here, then we have a before value. We're going to model a - // delete operation and our renderer later can render the overall change - // accurately. - before := change.AsDelete() - - // We also let our callers override the unknown values in any before, this - // is the renderers can display them as being computed instead of deleted. - before.Unknown = childUnknown - return processBefore(change, before), true -} diff --git a/internal/command/jsonplan/plan.go b/internal/command/jsonplan/plan.go deleted file mode 100644 index fca01a1d28a7..000000000000 --- a/internal/command/jsonplan/plan.go +++ /dev/null @@ -1,864 +0,0 @@ -package jsonplan - -import ( - "encoding/json" - "fmt" - "sort" - "strings" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/jsonchecks" - "github.com/hashicorp/terraform/internal/command/jsonconfig" - "github.com/hashicorp/terraform/internal/command/jsonstate" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/version" -) - -// FormatVersion represents the version of the json format and will be -// incremented for any change to this format that requires changes to a -// consuming parser. -const ( - FormatVersion = "1.1" - - ResourceInstanceReplaceBecauseCannotUpdate = "replace_because_cannot_update" - ResourceInstanceReplaceBecauseTainted = "replace_because_tainted" - ResourceInstanceReplaceByRequest = "replace_by_request" - ResourceInstanceReplaceByTriggers = "replace_by_triggers" - ResourceInstanceDeleteBecauseNoResourceConfig = "delete_because_no_resource_config" - ResourceInstanceDeleteBecauseWrongRepetition = "delete_because_wrong_repetition" - ResourceInstanceDeleteBecauseCountIndex = "delete_because_count_index" - ResourceInstanceDeleteBecauseEachKey = "delete_because_each_key" - ResourceInstanceDeleteBecauseNoModule = "delete_because_no_module" - ResourceInstanceDeleteBecauseNoMoveTarget = "delete_because_no_move_target" - ResourceInstanceReadBecauseConfigUnknown = "read_because_config_unknown" - ResourceInstanceReadBecauseDependencyPending = "read_because_dependency_pending" -) - -// Plan is the top-level representation of the json format of a plan. It includes -// the complete config and current state. -type plan struct { - FormatVersion string `json:"format_version,omitempty"` - TerraformVersion string `json:"terraform_version,omitempty"` - Variables variables `json:"variables,omitempty"` - PlannedValues stateValues `json:"planned_values,omitempty"` - // ResourceDrift and ResourceChanges are sorted in a user-friendly order - // that is undefined at this time, but consistent. - ResourceDrift []ResourceChange `json:"resource_drift,omitempty"` - ResourceChanges []ResourceChange `json:"resource_changes,omitempty"` - OutputChanges map[string]Change `json:"output_changes,omitempty"` - PriorState json.RawMessage `json:"prior_state,omitempty"` - Config json.RawMessage `json:"configuration,omitempty"` - RelevantAttributes []ResourceAttr `json:"relevant_attributes,omitempty"` - Checks json.RawMessage `json:"checks,omitempty"` -} - -func newPlan() *plan { - return &plan{ - FormatVersion: FormatVersion, - } -} - -// ResourceAttr contains the address and attribute of an external for the -// RelevantAttributes in the plan. -type ResourceAttr struct { - Resource string `json:"resource"` - Attr json.RawMessage `json:"attribute"` -} - -// Change is the representation of a proposed change for an object. -type Change struct { - // Actions are the actions that will be taken on the object selected by the - // properties below. Valid actions values are: - // ["no-op"] - // ["create"] - // ["read"] - // ["update"] - // ["delete", "create"] - // ["create", "delete"] - // ["delete"] - // The two "replace" actions are represented in this way to allow callers to - // e.g. just scan the list for "delete" to recognize all three situations - // where the object will be deleted, allowing for any new deletion - // combinations that might be added in future. - Actions []string `json:"actions,omitempty"` - - // Before and After are representations of the object value both before and - // after the action. For ["create"] and ["delete"] actions, either "before" - // or "after" is unset (respectively). For ["no-op"], the before and after - // values are identical. The "after" value will be incomplete if there are - // values within it that won't be known until after apply. - Before json.RawMessage `json:"before,omitempty"` - After json.RawMessage `json:"after,omitempty"` - - // AfterUnknown is an object value with similar structure to After, but - // with all unknown leaf values replaced with true, and all known leaf - // values omitted. This can be combined with After to reconstruct a full - // value after the action, including values which will only be known after - // apply. - AfterUnknown json.RawMessage `json:"after_unknown,omitempty"` - - // BeforeSensitive and AfterSensitive are object values with similar - // structure to Before and After, but with all sensitive leaf values - // replaced with true, and all non-sensitive leaf values omitted. These - // objects should be combined with Before and After to prevent accidental - // display of sensitive values in user interfaces. - BeforeSensitive json.RawMessage `json:"before_sensitive,omitempty"` - AfterSensitive json.RawMessage `json:"after_sensitive,omitempty"` - - // ReplacePaths is an array of arrays representing a set of paths into the - // object value which resulted in the action being "replace". This will be - // omitted if the action is not replace, or if no paths caused the - // replacement (for example, if the resource was tainted). Each path - // consists of one or more steps, each of which will be a number or a - // string. - ReplacePaths json.RawMessage `json:"replace_paths,omitempty"` -} - -type output struct { - Sensitive bool `json:"sensitive"` - Type json.RawMessage `json:"type,omitempty"` - Value json.RawMessage `json:"value,omitempty"` -} - -// variables is the JSON representation of the variables provided to the current -// plan. -type variables map[string]*variable - -type variable struct { - Value json.RawMessage `json:"value,omitempty"` -} - -// MarshalForRenderer returns the pre-json encoding changes of the requested -// plan, in a format available to the structured renderer. -// -// This function does a small part of the Marshal function, as it only returns -// the part of the plan required by the jsonformat.Plan renderer. -func MarshalForRenderer( - p *plans.Plan, - schemas *terraform.Schemas, -) (map[string]Change, []ResourceChange, []ResourceChange, []ResourceAttr, error) { - output := newPlan() - - var err error - if output.OutputChanges, err = MarshalOutputChanges(p.Changes); err != nil { - return nil, nil, nil, nil, err - } - - if output.ResourceChanges, err = MarshalResourceChanges(p.Changes.Resources, schemas); err != nil { - return nil, nil, nil, nil, err - } - - if len(p.DriftedResources) > 0 { - // In refresh-only mode, we render all resources marked as drifted, - // including those which have moved without other changes. In other plan - // modes, move-only changes will be included in the planned changes, so - // we skip them here. - var driftedResources []*plans.ResourceInstanceChangeSrc - if p.UIMode == plans.RefreshOnlyMode { - driftedResources = p.DriftedResources - } else { - for _, dr := range p.DriftedResources { - if dr.Action != plans.NoOp { - driftedResources = append(driftedResources, dr) - } - } - } - output.ResourceDrift, err = MarshalResourceChanges(driftedResources, schemas) - if err != nil { - return nil, nil, nil, nil, err - } - } - - if err := output.marshalRelevantAttrs(p); err != nil { - return nil, nil, nil, nil, err - } - - return output.OutputChanges, output.ResourceChanges, output.ResourceDrift, output.RelevantAttributes, nil -} - -// Marshal returns the json encoding of a terraform plan. -func Marshal( - config *configs.Config, - p *plans.Plan, - sf *statefile.File, - schemas *terraform.Schemas, -) ([]byte, error) { - output := newPlan() - output.TerraformVersion = version.String() - - err := output.marshalPlanVariables(p.VariableValues, config.Module.Variables) - if err != nil { - return nil, fmt.Errorf("error in marshalPlanVariables: %s", err) - } - - // output.PlannedValues - err = output.marshalPlannedValues(p.Changes, schemas) - if err != nil { - return nil, fmt.Errorf("error in marshalPlannedValues: %s", err) - } - - // output.ResourceDrift - if len(p.DriftedResources) > 0 { - // In refresh-only mode, we render all resources marked as drifted, - // including those which have moved without other changes. In other plan - // modes, move-only changes will be included in the planned changes, so - // we skip them here. - var driftedResources []*plans.ResourceInstanceChangeSrc - if p.UIMode == plans.RefreshOnlyMode { - driftedResources = p.DriftedResources - } else { - for _, dr := range p.DriftedResources { - if dr.Action != plans.NoOp { - driftedResources = append(driftedResources, dr) - } - } - } - output.ResourceDrift, err = MarshalResourceChanges(driftedResources, schemas) - if err != nil { - return nil, fmt.Errorf("error in marshaling resource drift: %s", err) - } - } - - if err := output.marshalRelevantAttrs(p); err != nil { - return nil, fmt.Errorf("error marshaling relevant attributes for external changes: %s", err) - } - - // output.ResourceChanges - if p.Changes != nil { - output.ResourceChanges, err = MarshalResourceChanges(p.Changes.Resources, schemas) - if err != nil { - return nil, fmt.Errorf("error in marshaling resource changes: %s", err) - } - } - - // output.OutputChanges - if output.OutputChanges, err = MarshalOutputChanges(p.Changes); err != nil { - return nil, fmt.Errorf("error in marshaling output changes: %s", err) - } - - // output.Checks - if p.Checks != nil && p.Checks.ConfigResults.Len() > 0 { - output.Checks = jsonchecks.MarshalCheckStates(p.Checks) - } - - // output.PriorState - if sf != nil && !sf.State.Empty() { - output.PriorState, err = jsonstate.Marshal(sf, schemas) - if err != nil { - return nil, fmt.Errorf("error marshaling prior state: %s", err) - } - } - - // output.Config - output.Config, err = jsonconfig.Marshal(config, schemas) - if err != nil { - return nil, fmt.Errorf("error marshaling config: %s", err) - } - - ret, err := json.Marshal(output) - return ret, err -} - -func (p *plan) marshalPlanVariables(vars map[string]plans.DynamicValue, decls map[string]*configs.Variable) error { - p.Variables = make(variables, len(vars)) - - for k, v := range vars { - val, err := v.Decode(cty.DynamicPseudoType) - if err != nil { - return err - } - valJSON, err := ctyjson.Marshal(val, val.Type()) - if err != nil { - return err - } - p.Variables[k] = &variable{ - Value: valJSON, - } - } - - // In Terraform v1.1 and earlier we had some confusion about which subsystem - // of Terraform was the one responsible for substituting in default values - // for unset module variables, with root module variables being handled in - // three different places while child module variables were only handled - // during the Terraform Core graph walk. - // - // For Terraform v1.2 and later we rationalized that by having the Terraform - // Core graph walk always be responsible for selecting defaults regardless - // of root vs. child module, but unfortunately our earlier accidental - // misbehavior bled out into the public interface by making the defaults - // show up in the "vars" map to this function. Those are now correctly - // omitted (so that the plan file only records the variables _actually_ - // set by the caller) but consumers of the JSON plan format may be depending - // on our old behavior and so we'll fake it here just in time so that - // outside consumers won't see a behavior change. - for name, decl := range decls { - if _, ok := p.Variables[name]; ok { - continue - } - if val := decl.Default; val != cty.NilVal { - valJSON, err := ctyjson.Marshal(val, val.Type()) - if err != nil { - return err - } - p.Variables[name] = &variable{ - Value: valJSON, - } - } - } - - if len(p.Variables) == 0 { - p.Variables = nil // omit this property if there are no variables to describe - } - - return nil -} - -// MarshalResourceChanges converts the provided internal representation of -// ResourceInstanceChangeSrc objects into the public structured JSON changes. -// -// This function is referenced directly from the structured renderer tests, to -// ensure parity between the renderers. It probably shouldn't be used anywhere -// else. -func MarshalResourceChanges(resources []*plans.ResourceInstanceChangeSrc, schemas *terraform.Schemas) ([]ResourceChange, error) { - var ret []ResourceChange - - var sortedResources []*plans.ResourceInstanceChangeSrc - sortedResources = append(sortedResources, resources...) - sort.Slice(sortedResources, func(i, j int) bool { - if !sortedResources[i].Addr.Equal(sortedResources[j].Addr) { - return sortedResources[i].Addr.Less(sortedResources[j].Addr) - } - return sortedResources[i].DeposedKey < sortedResources[j].DeposedKey - }) - - for _, rc := range sortedResources { - var r ResourceChange - addr := rc.Addr - r.Address = addr.String() - if !addr.Equal(rc.PrevRunAddr) { - r.PreviousAddress = rc.PrevRunAddr.String() - } - - dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode - // We create "delete" actions for data resources so we can clean up - // their entries in state, but this is an implementation detail that - // users shouldn't see. - if dataSource && rc.Action == plans.Delete { - continue - } - - schema, _ := schemas.ResourceTypeConfig( - rc.ProviderAddr.Provider, - addr.Resource.Resource.Mode, - addr.Resource.Resource.Type, - ) - if schema == nil { - return nil, fmt.Errorf("no schema found for %s (in provider %s)", r.Address, rc.ProviderAddr.Provider) - } - - changeV, err := rc.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - // We drop the marks from the change, as decoding is only an - // intermediate step to re-encode the values as json - changeV.Before, _ = changeV.Before.UnmarkDeep() - changeV.After, _ = changeV.After.UnmarkDeep() - - var before, after []byte - var beforeSensitive, afterSensitive []byte - var afterUnknown cty.Value - - if changeV.Before != cty.NilVal { - before, err = ctyjson.Marshal(changeV.Before, changeV.Before.Type()) - if err != nil { - return nil, err - } - marks := rc.BeforeValMarks - if schema.ContainsSensitive() { - marks = append(marks, schema.ValueMarks(changeV.Before, nil)...) - } - bs := jsonstate.SensitiveAsBool(changeV.Before.MarkWithPaths(marks)) - beforeSensitive, err = ctyjson.Marshal(bs, bs.Type()) - if err != nil { - return nil, err - } - } - if changeV.After != cty.NilVal { - if changeV.After.IsWhollyKnown() { - after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) - if err != nil { - return nil, err - } - afterUnknown = cty.EmptyObjectVal - } else { - filteredAfter := omitUnknowns(changeV.After) - if filteredAfter.IsNull() { - after = nil - } else { - after, err = ctyjson.Marshal(filteredAfter, filteredAfter.Type()) - if err != nil { - return nil, err - } - } - afterUnknown = unknownAsBool(changeV.After) - } - marks := rc.AfterValMarks - if schema.ContainsSensitive() { - marks = append(marks, schema.ValueMarks(changeV.After, nil)...) - } - as := jsonstate.SensitiveAsBool(changeV.After.MarkWithPaths(marks)) - afterSensitive, err = ctyjson.Marshal(as, as.Type()) - if err != nil { - return nil, err - } - } - - a, err := ctyjson.Marshal(afterUnknown, afterUnknown.Type()) - if err != nil { - return nil, err - } - replacePaths, err := encodePaths(rc.RequiredReplace) - if err != nil { - return nil, err - } - - r.Change = Change{ - Actions: actionString(rc.Action.String()), - Before: json.RawMessage(before), - After: json.RawMessage(after), - AfterUnknown: a, - BeforeSensitive: json.RawMessage(beforeSensitive), - AfterSensitive: json.RawMessage(afterSensitive), - ReplacePaths: replacePaths, - } - - if rc.DeposedKey != states.NotDeposed { - r.Deposed = rc.DeposedKey.String() - } - - key := addr.Resource.Key - if key != nil { - value := key.Value() - if r.Index, err = ctyjson.Marshal(value, value.Type()); err != nil { - return nil, err - } - } - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - r.Mode = jsonstate.ManagedResourceMode - case addrs.DataResourceMode: - r.Mode = jsonstate.DataResourceMode - default: - return nil, fmt.Errorf("resource %s has an unsupported mode %s", r.Address, addr.Resource.Resource.Mode.String()) - } - r.ModuleAddress = addr.Module.String() - r.Name = addr.Resource.Resource.Name - r.Type = addr.Resource.Resource.Type - r.ProviderName = rc.ProviderAddr.Provider.String() - - switch rc.ActionReason { - case plans.ResourceInstanceChangeNoReason: - r.ActionReason = "" // will be omitted in output - case plans.ResourceInstanceReplaceBecauseCannotUpdate: - r.ActionReason = ResourceInstanceReplaceBecauseCannotUpdate - case plans.ResourceInstanceReplaceBecauseTainted: - r.ActionReason = ResourceInstanceReplaceBecauseTainted - case plans.ResourceInstanceReplaceByRequest: - r.ActionReason = ResourceInstanceReplaceByRequest - case plans.ResourceInstanceReplaceByTriggers: - r.ActionReason = ResourceInstanceReplaceByTriggers - case plans.ResourceInstanceDeleteBecauseNoResourceConfig: - r.ActionReason = ResourceInstanceDeleteBecauseNoResourceConfig - case plans.ResourceInstanceDeleteBecauseWrongRepetition: - r.ActionReason = ResourceInstanceDeleteBecauseWrongRepetition - case plans.ResourceInstanceDeleteBecauseCountIndex: - r.ActionReason = ResourceInstanceDeleteBecauseCountIndex - case plans.ResourceInstanceDeleteBecauseEachKey: - r.ActionReason = ResourceInstanceDeleteBecauseEachKey - case plans.ResourceInstanceDeleteBecauseNoModule: - r.ActionReason = ResourceInstanceDeleteBecauseNoModule - case plans.ResourceInstanceDeleteBecauseNoMoveTarget: - r.ActionReason = ResourceInstanceDeleteBecauseNoMoveTarget - case plans.ResourceInstanceReadBecauseConfigUnknown: - r.ActionReason = ResourceInstanceReadBecauseConfigUnknown - case plans.ResourceInstanceReadBecauseDependencyPending: - r.ActionReason = ResourceInstanceReadBecauseDependencyPending - default: - return nil, fmt.Errorf("resource %s has an unsupported action reason %s", r.Address, rc.ActionReason) - } - - ret = append(ret, r) - - } - - return ret, nil -} - -// MarshalOutputChanges converts the provided internal representation of -// Changes objects into the structured JSON representation. -// -// This function is referenced directly from the structured renderer tests, to -// ensure parity between the renderers. It probably shouldn't be used anywhere -// else. -func MarshalOutputChanges(changes *plans.Changes) (map[string]Change, error) { - if changes == nil { - // Nothing to do! - return nil, nil - } - - outputChanges := make(map[string]Change, len(changes.Outputs)) - for _, oc := range changes.Outputs { - - // Skip output changes that are not from the root module. - // These are automatically stripped from plans that are written to disk - // elsewhere, we just need to duplicate the logic here in case anyone - // is converting this plan directly from memory. - if !oc.Addr.Module.IsRoot() { - continue - } - - changeV, err := oc.Decode() - if err != nil { - return nil, err - } - // We drop the marks from the change, as decoding is only an - // intermediate step to re-encode the values as json - changeV.Before, _ = changeV.Before.UnmarkDeep() - changeV.After, _ = changeV.After.UnmarkDeep() - - var before, after []byte - var afterUnknown cty.Value - - if changeV.Before != cty.NilVal { - before, err = ctyjson.Marshal(changeV.Before, changeV.Before.Type()) - if err != nil { - return nil, err - } - } - if changeV.After != cty.NilVal { - if changeV.After.IsWhollyKnown() { - after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) - if err != nil { - return nil, err - } - afterUnknown = cty.False - } else { - filteredAfter := omitUnknowns(changeV.After) - if filteredAfter.IsNull() { - after = nil - } else { - after, err = ctyjson.Marshal(filteredAfter, filteredAfter.Type()) - if err != nil { - return nil, err - } - } - afterUnknown = unknownAsBool(changeV.After) - } - } - - // The only information we have in the plan about output sensitivity is - // a boolean which is true if the output was or is marked sensitive. As - // a result, BeforeSensitive and AfterSensitive will be identical, and - // either false or true. - outputSensitive := cty.False - if oc.Sensitive { - outputSensitive = cty.True - } - sensitive, err := ctyjson.Marshal(outputSensitive, outputSensitive.Type()) - if err != nil { - return nil, err - } - - a, _ := ctyjson.Marshal(afterUnknown, afterUnknown.Type()) - - c := Change{ - Actions: actionString(oc.Action.String()), - Before: json.RawMessage(before), - After: json.RawMessage(after), - AfterUnknown: a, - BeforeSensitive: json.RawMessage(sensitive), - AfterSensitive: json.RawMessage(sensitive), - } - - outputChanges[oc.Addr.OutputValue.Name] = c - } - - return outputChanges, nil -} - -func (p *plan) marshalPlannedValues(changes *plans.Changes, schemas *terraform.Schemas) error { - // marshal the planned changes into a module - plan, err := marshalPlannedValues(changes, schemas) - if err != nil { - return err - } - p.PlannedValues.RootModule = plan - - // marshalPlannedOutputs - outputs, err := marshalPlannedOutputs(changes) - if err != nil { - return err - } - p.PlannedValues.Outputs = outputs - - return nil -} - -func (p *plan) marshalRelevantAttrs(plan *plans.Plan) error { - for _, ra := range plan.RelevantAttributes { - addr := ra.Resource.String() - path, err := encodePath(ra.Attr) - if err != nil { - return err - } - - p.RelevantAttributes = append(p.RelevantAttributes, ResourceAttr{addr, path}) - } - return nil -} - -// omitUnknowns recursively walks the src cty.Value and returns a new cty.Value, -// omitting any unknowns. -// -// The result also normalizes some types: all sequence types are turned into -// tuple types and all mapping types are converted to object types, since we -// assume the result of this is just going to be serialized as JSON (and thus -// lose those distinctions) anyway. -func omitUnknowns(val cty.Value) cty.Value { - ty := val.Type() - switch { - case val.IsNull(): - return val - case !val.IsKnown(): - return cty.NilVal - case ty.IsPrimitiveType(): - return val - case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): - var vals []cty.Value - it := val.ElementIterator() - for it.Next() { - _, v := it.Element() - newVal := omitUnknowns(v) - if newVal != cty.NilVal { - vals = append(vals, newVal) - } else if newVal == cty.NilVal { - // element order is how we correlate unknownness, so we must - // replace unknowns with nulls - vals = append(vals, cty.NullVal(v.Type())) - } - } - // We use tuple types always here, because the work we did above - // may have caused the individual elements to have different types, - // and we're doing this work to produce JSON anyway and JSON marshalling - // represents all of these sequence types as an array. - return cty.TupleVal(vals) - case ty.IsMapType() || ty.IsObjectType(): - vals := make(map[string]cty.Value) - it := val.ElementIterator() - for it.Next() { - k, v := it.Element() - newVal := omitUnknowns(v) - if newVal != cty.NilVal { - vals[k.AsString()] = newVal - } - } - // We use object types always here, because the work we did above - // may have caused the individual elements to have different types, - // and we're doing this work to produce JSON anyway and JSON marshalling - // represents both of these mapping types as an object. - return cty.ObjectVal(vals) - default: - // Should never happen, since the above should cover all types - panic(fmt.Sprintf("omitUnknowns cannot handle %#v", val)) - } -} - -// recursively iterate through a cty.Value, replacing unknown values (including -// null) with cty.True and known values with cty.False. -// -// The result also normalizes some types: all sequence types are turned into -// tuple types and all mapping types are converted to object types, since we -// assume the result of this is just going to be serialized as JSON (and thus -// lose those distinctions) anyway. -// -// For map/object values, all known attribute values will be omitted instead of -// returning false, as this results in a more compact serialization. -func unknownAsBool(val cty.Value) cty.Value { - ty := val.Type() - switch { - case val.IsNull(): - return cty.False - case !val.IsKnown(): - if ty.IsPrimitiveType() || ty.Equals(cty.DynamicPseudoType) { - return cty.True - } - fallthrough - case ty.IsPrimitiveType(): - return cty.BoolVal(!val.IsKnown()) - case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): - length := val.LengthInt() - if length == 0 { - // If there are no elements then we can't have unknowns - return cty.EmptyTupleVal - } - vals := make([]cty.Value, 0, length) - it := val.ElementIterator() - for it.Next() { - _, v := it.Element() - vals = append(vals, unknownAsBool(v)) - } - // The above transform may have changed the types of some of the - // elements, so we'll always use a tuple here in case we've now made - // different elements have different types. Our ultimate goal is to - // marshal to JSON anyway, and all of these sequence types are - // indistinguishable in JSON. - return cty.TupleVal(vals) - case ty.IsMapType() || ty.IsObjectType(): - var length int - switch { - case ty.IsMapType(): - length = val.LengthInt() - default: - length = len(val.Type().AttributeTypes()) - } - if length == 0 { - // If there are no elements then we can't have unknowns - return cty.EmptyObjectVal - } - vals := make(map[string]cty.Value) - it := val.ElementIterator() - for it.Next() { - k, v := it.Element() - vAsBool := unknownAsBool(v) - // Omit all of the "false"s for known values for more compact - // serialization - if !vAsBool.RawEquals(cty.False) { - vals[k.AsString()] = vAsBool - } - } - // The above transform may have changed the types of some of the - // elements, so we'll always use an object here in case we've now made - // different elements have different types. Our ultimate goal is to - // marshal to JSON anyway, and all of these mapping types are - // indistinguishable in JSON. - return cty.ObjectVal(vals) - default: - // Should never happen, since the above should cover all types - panic(fmt.Sprintf("unknownAsBool cannot handle %#v", val)) - } -} - -func actionString(action string) []string { - switch { - case action == "NoOp": - return []string{"no-op"} - case action == "Create": - return []string{"create"} - case action == "Delete": - return []string{"delete"} - case action == "Update": - return []string{"update"} - case action == "CreateThenDelete": - return []string{"create", "delete"} - case action == "Read": - return []string{"read"} - case action == "DeleteThenCreate": - return []string{"delete", "create"} - default: - return []string{action} - } -} - -// UnmarshalActions reverses the actionString function. -func UnmarshalActions(actions []string) plans.Action { - if len(actions) == 2 { - if actions[0] == "create" && actions[1] == "delete" { - return plans.CreateThenDelete - } - - if actions[0] == "delete" && actions[1] == "create" { - return plans.DeleteThenCreate - } - } - - if len(actions) == 1 { - switch actions[0] { - case "create": - return plans.Create - case "delete": - return plans.Delete - case "update": - return plans.Update - case "read": - return plans.Read - case "no-op": - return plans.NoOp - } - } - - panic("unrecognized action slice: " + strings.Join(actions, ", ")) -} - -// encodePaths lossily encodes a cty.PathSet into an array of arrays of step -// values, such as: -// -// [["length"],["triggers",0,"value"]] -// -// The lossiness is that we cannot distinguish between an IndexStep with string -// key and a GetAttr step. This is fine with JSON output, because JSON's type -// system means that those two steps are equivalent anyway: both are object -// indexes. -// -// JavaScript (or similar dynamic language) consumers of these values can -// iterate over the the steps starting from the root object to reach the -// value that each path is describing. -func encodePaths(pathSet cty.PathSet) (json.RawMessage, error) { - if pathSet.Empty() { - return nil, nil - } - - pathList := pathSet.List() - jsonPaths := make([]json.RawMessage, 0, len(pathList)) - - for _, path := range pathList { - jsonPath, err := encodePath(path) - if err != nil { - return nil, err - } - jsonPaths = append(jsonPaths, jsonPath) - } - - return json.Marshal(jsonPaths) -} - -func encodePath(path cty.Path) (json.RawMessage, error) { - steps := make([]json.RawMessage, 0, len(path)) - for _, step := range path { - switch s := step.(type) { - case cty.IndexStep: - key, err := ctyjson.Marshal(s.Key, s.Key.Type()) - if err != nil { - return nil, fmt.Errorf("Failed to marshal index step key %#v: %s", s.Key, err) - } - steps = append(steps, key) - case cty.GetAttrStep: - name, err := json.Marshal(s.Name) - if err != nil { - return nil, fmt.Errorf("Failed to marshal get attr step name %#v: %s", s.Name, err) - } - steps = append(steps, name) - default: - return nil, fmt.Errorf("Unsupported path step %#v (%t)", step, step) - } - } - return json.Marshal(steps) -} diff --git a/internal/command/jsonplan/plan_test.go b/internal/command/jsonplan/plan_test.go deleted file mode 100644 index abb6ea29d9dd..000000000000 --- a/internal/command/jsonplan/plan_test.go +++ /dev/null @@ -1,469 +0,0 @@ -package jsonplan - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" -) - -func TestOmitUnknowns(t *testing.T) { - tests := []struct { - Input cty.Value - Want cty.Value - }{ - { - cty.StringVal("hello"), - cty.StringVal("hello"), - }, - { - cty.NullVal(cty.String), - cty.NullVal(cty.String), - }, - { - cty.UnknownVal(cty.String), - cty.NilVal, - }, - { - cty.ListValEmpty(cty.String), - cty.EmptyTupleVal, - }, - { - cty.ListVal([]cty.Value{cty.StringVal("hello")}), - cty.TupleVal([]cty.Value{cty.StringVal("hello")}), - }, - { - cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), - cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), - }, - { - cty.ListVal([]cty.Value{cty.UnknownVal(cty.String)}), - cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), - }, - { - cty.ListVal([]cty.Value{cty.StringVal("hello")}), - cty.TupleVal([]cty.Value{cty.StringVal("hello")}), - }, - // - { - cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.UnknownVal(cty.String)}), - cty.TupleVal([]cty.Value{ - cty.StringVal("hello"), - cty.NullVal(cty.String), - }), - }, - { - cty.MapVal(map[string]cty.Value{ - "hello": cty.True, - "world": cty.UnknownVal(cty.Bool), - }), - cty.ObjectVal(map[string]cty.Value{ - "hello": cty.True, - }), - }, - { - cty.TupleVal([]cty.Value{ - cty.StringVal("alpha"), - cty.UnknownVal(cty.String), - cty.StringVal("charlie"), - }), - cty.TupleVal([]cty.Value{ - cty.StringVal("alpha"), - cty.NullVal(cty.String), - cty.StringVal("charlie"), - }), - }, - { - cty.SetVal([]cty.Value{ - cty.StringVal("dev"), - cty.StringVal("foo"), - cty.StringVal("stg"), - cty.UnknownVal(cty.String), - }), - cty.TupleVal([]cty.Value{ - cty.StringVal("dev"), - cty.StringVal("foo"), - cty.StringVal("stg"), - cty.NullVal(cty.String), - }), - }, - { - cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "a": cty.UnknownVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("known"), - }), - }), - cty.TupleVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("known"), - }), - cty.EmptyObjectVal, - }), - }, - } - - for _, test := range tests { - got := omitUnknowns(test.Input) - if !reflect.DeepEqual(got, test.Want) { - t.Errorf( - "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", - test.Input, got, test.Want, - ) - } - } -} - -func TestUnknownAsBool(t *testing.T) { - tests := []struct { - Input cty.Value - Want cty.Value - }{ - { - cty.StringVal("hello"), - cty.False, - }, - { - cty.NullVal(cty.String), - cty.False, - }, - { - cty.UnknownVal(cty.String), - cty.True, - }, - - { - cty.NullVal(cty.DynamicPseudoType), - cty.False, - }, - { - cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})), - cty.False, - }, - { - cty.DynamicVal, - cty.True, - }, - - { - cty.ListValEmpty(cty.String), - cty.EmptyTupleVal, - }, - { - cty.ListVal([]cty.Value{cty.StringVal("hello")}), - cty.TupleVal([]cty.Value{cty.False}), - }, - { - cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), - cty.TupleVal([]cty.Value{cty.False}), - }, - { - cty.ListVal([]cty.Value{cty.UnknownVal(cty.String)}), - cty.TupleVal([]cty.Value{cty.True}), - }, - { - cty.SetValEmpty(cty.String), - cty.EmptyTupleVal, - }, - { - cty.SetVal([]cty.Value{cty.StringVal("hello")}), - cty.TupleVal([]cty.Value{cty.False}), - }, - { - cty.SetVal([]cty.Value{cty.NullVal(cty.String)}), - cty.TupleVal([]cty.Value{cty.False}), - }, - { - cty.SetVal([]cty.Value{cty.UnknownVal(cty.String)}), - cty.TupleVal([]cty.Value{cty.True}), - }, - { - cty.EmptyTupleVal, - cty.EmptyTupleVal, - }, - { - cty.TupleVal([]cty.Value{cty.StringVal("hello")}), - cty.TupleVal([]cty.Value{cty.False}), - }, - { - cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), - cty.TupleVal([]cty.Value{cty.False}), - }, - { - cty.TupleVal([]cty.Value{cty.UnknownVal(cty.String)}), - cty.TupleVal([]cty.Value{cty.True}), - }, - { - cty.MapValEmpty(cty.String), - cty.EmptyObjectVal, - }, - { - cty.MapVal(map[string]cty.Value{"greeting": cty.StringVal("hello")}), - cty.EmptyObjectVal, - }, - { - cty.MapVal(map[string]cty.Value{"greeting": cty.NullVal(cty.String)}), - cty.EmptyObjectVal, - }, - { - cty.MapVal(map[string]cty.Value{"greeting": cty.UnknownVal(cty.String)}), - cty.ObjectVal(map[string]cty.Value{"greeting": cty.True}), - }, - { - cty.EmptyObjectVal, - cty.EmptyObjectVal, - }, - { - cty.ObjectVal(map[string]cty.Value{"greeting": cty.StringVal("hello")}), - cty.EmptyObjectVal, - }, - { - cty.ObjectVal(map[string]cty.Value{"greeting": cty.NullVal(cty.String)}), - cty.EmptyObjectVal, - }, - { - cty.ObjectVal(map[string]cty.Value{"greeting": cty.UnknownVal(cty.String)}), - cty.ObjectVal(map[string]cty.Value{"greeting": cty.True}), - }, - { - cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "a": cty.UnknownVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("known"), - }), - }), - cty.TupleVal([]cty.Value{ - cty.EmptyObjectVal, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.True, - }), - }), - }, - { - cty.SetVal([]cty.Value{ - cty.MapValEmpty(cty.String), - cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("known"), - }), - cty.MapVal(map[string]cty.Value{ - "a": cty.UnknownVal(cty.String), - }), - }), - cty.TupleVal([]cty.Value{ - cty.EmptyObjectVal, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.True, - }), - cty.EmptyObjectVal, - }), - }, - } - - for _, test := range tests { - got := unknownAsBool(test.Input) - if !reflect.DeepEqual(got, test.Want) { - t.Errorf( - "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", - test.Input, got, test.Want, - ) - } - } -} - -func TestEncodePaths(t *testing.T) { - tests := map[string]struct { - Input cty.PathSet - Want json.RawMessage - }{ - "empty set": { - cty.NewPathSet(), - json.RawMessage(nil), - }, - "index path with string and int steps": { - cty.NewPathSet(cty.IndexStringPath("boop").IndexInt(0)), - json.RawMessage(`[["boop",0]]`), - }, - "get attr path with one step": { - cty.NewPathSet(cty.GetAttrPath("triggers")), - json.RawMessage(`[["triggers"]]`), - }, - "multiple paths of different types": { - cty.NewPathSet( - cty.GetAttrPath("alpha").GetAttr("beta").GetAttr("gamma"), - cty.GetAttrPath("triggers").IndexString("name"), - cty.IndexIntPath(0).IndexInt(1).IndexInt(2).IndexInt(3), - ), - json.RawMessage(`[["alpha","beta","gamma"],["triggers","name"],[0,1,2,3]]`), - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - got, err := encodePaths(test.Input) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - if !cmp.Equal(got, test.Want) { - t.Errorf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) - } - }) - } -} - -func TestOutputs(t *testing.T) { - root := addrs.RootModuleInstance - - child, diags := addrs.ParseModuleInstanceStr("module.child") - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - tests := map[string]struct { - changes *plans.Changes - expected map[string]Change - }{ - "copies all outputs": { - changes: &plans.Changes{ - Outputs: []*plans.OutputChangeSrc{ - { - Addr: root.OutputValue("first"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - }, - }, - { - Addr: root.OutputValue("second"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - }, - }, - }, - }, - expected: map[string]Change{ - "first": { - Actions: []string{"create"}, - Before: json.RawMessage("null"), - After: json.RawMessage("null"), - AfterUnknown: json.RawMessage("false"), - BeforeSensitive: json.RawMessage("false"), - AfterSensitive: json.RawMessage("false"), - }, - "second": { - Actions: []string{"create"}, - Before: json.RawMessage("null"), - After: json.RawMessage("null"), - AfterUnknown: json.RawMessage("false"), - BeforeSensitive: json.RawMessage("false"), - AfterSensitive: json.RawMessage("false"), - }, - }, - }, - "skips non root modules": { - changes: &plans.Changes{ - Outputs: []*plans.OutputChangeSrc{ - { - Addr: root.OutputValue("first"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - }, - }, - { - Addr: child.OutputValue("second"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - }, - }, - }, - }, - expected: map[string]Change{ - "first": { - Actions: []string{"create"}, - Before: json.RawMessage("null"), - After: json.RawMessage("null"), - AfterUnknown: json.RawMessage("false"), - BeforeSensitive: json.RawMessage("false"), - AfterSensitive: json.RawMessage("false"), - }, - }, - }, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - changes, err := MarshalOutputChanges(test.changes) - if err != nil { - t.Fatalf("unexpected err: %s", err) - } - - if !cmp.Equal(changes, test.expected) { - t.Errorf("wrong result:\n %v\n", cmp.Diff(changes, test.expected)) - } - }) - } -} - -func deepObjectValue(depth int) cty.Value { - v := cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - "b": cty.NumberIntVal(2), - "c": cty.True, - "d": cty.UnknownVal(cty.String), - }) - - result := v - - for i := 0; i < depth; i++ { - result = cty.ObjectVal(map[string]cty.Value{ - "a": result, - "b": result, - "c": result, - }) - } - - return result -} - -func BenchmarkUnknownAsBool_2(b *testing.B) { - value := deepObjectValue(2) - for n := 0; n < b.N; n++ { - unknownAsBool(value) - } -} - -func BenchmarkUnknownAsBool_3(b *testing.B) { - value := deepObjectValue(3) - for n := 0; n < b.N; n++ { - unknownAsBool(value) - } -} - -func BenchmarkUnknownAsBool_5(b *testing.B) { - value := deepObjectValue(5) - for n := 0; n < b.N; n++ { - unknownAsBool(value) - } -} - -func BenchmarkUnknownAsBool_7(b *testing.B) { - value := deepObjectValue(7) - for n := 0; n < b.N; n++ { - unknownAsBool(value) - } -} - -func BenchmarkUnknownAsBool_9(b *testing.B) { - value := deepObjectValue(9) - for n := 0; n < b.N; n++ { - unknownAsBool(value) - } -} diff --git a/internal/command/jsonplan/resource.go b/internal/command/jsonplan/resource.go deleted file mode 100644 index 86ca1233ab3b..000000000000 --- a/internal/command/jsonplan/resource.go +++ /dev/null @@ -1,92 +0,0 @@ -package jsonplan - -import ( - "encoding/json" - - "github.com/hashicorp/terraform/internal/addrs" -) - -// Resource is the representation of a resource in the json plan -type resource struct { - // Address is the absolute resource address - Address string `json:"address,omitempty"` - - // Mode can be "managed" or "data" - Mode string `json:"mode,omitempty"` - - Type string `json:"type,omitempty"` - Name string `json:"name,omitempty"` - - // Index is omitted for a resource not using `count` or `for_each` - Index addrs.InstanceKey `json:"index,omitempty"` - - // ProviderName allows the property "type" to be interpreted unambiguously - // in the unusual situation where a provider offers a resource type whose - // name does not start with its own name, such as the "googlebeta" provider - // offering "google_compute_instance". - ProviderName string `json:"provider_name,omitempty"` - - // SchemaVersion indicates which version of the resource type schema the - // "values" property conforms to. - SchemaVersion uint64 `json:"schema_version"` - - // AttributeValues is the JSON representation of the attribute values of the - // resource, whose structure depends on the resource type schema. Any - // unknown values are omitted or set to null, making them indistinguishable - // from absent values. - AttributeValues attributeValues `json:"values,omitempty"` - - // SensitiveValues is similar to AttributeValues, but with all sensitive - // values replaced with true, and all non-sensitive leaf values omitted. - SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"` -} - -// ResourceChange is a description of an individual change action that Terraform -// plans to use to move from the prior state to a new state matching the -// configuration. -type ResourceChange struct { - // Address is the absolute resource address - Address string `json:"address,omitempty"` - - // PreviousAddress is the absolute address that this resource instance had - // at the conclusion of a previous run. - // - // This will typically be omitted, but will be present if the previous - // resource instance was subject to a "moved" block that we handled in the - // process of creating this plan. - // - // Note that this behavior diverges from the internal plan data structure, - // where the previous address is set equal to the current address in the - // common case, rather than being omitted. - PreviousAddress string `json:"previous_address,omitempty"` - - // ModuleAddress is the module portion of the above address. Omitted if the - // instance is in the root module. - ModuleAddress string `json:"module_address,omitempty"` - - // "managed" or "data" - Mode string `json:"mode,omitempty"` - - Type string `json:"type,omitempty"` - Name string `json:"name,omitempty"` - Index json.RawMessage `json:"index,omitempty"` - ProviderName string `json:"provider_name,omitempty"` - - // "deposed", if set, indicates that this action applies to a "deposed" - // object of the given instance rather than to its "current" object. Omitted - // for changes to the current object. - Deposed string `json:"deposed,omitempty"` - - // Change describes the change that will be made to this object - Change Change `json:"change,omitempty"` - - // ActionReason is a keyword representing some optional extra context - // for why the actions in Change.Actions were chosen. - // - // This extra detail is only for display purposes, to help a UI layer - // present some additional explanation to a human user. The possible - // values here might grow and change over time, so any consumer of this - // information should be resilient to encountering unrecognized values - // and treat them as an unspecified reason. - ActionReason string `json:"action_reason,omitempty"` -} diff --git a/internal/command/jsonplan/values.go b/internal/command/jsonplan/values.go deleted file mode 100644 index f727f8a1d4a5..000000000000 --- a/internal/command/jsonplan/values.go +++ /dev/null @@ -1,282 +0,0 @@ -package jsonplan - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/jsonstate" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" -) - -// stateValues is the common representation of resolved values for both the -// prior state (which is always complete) and the planned new state. -type stateValues struct { - Outputs map[string]output `json:"outputs,omitempty"` - RootModule module `json:"root_module,omitempty"` -} - -// attributeValues is the JSON representation of the attribute values of the -// resource, whose structure depends on the resource type schema. -type attributeValues map[string]interface{} - -func marshalAttributeValues(value cty.Value, schema *configschema.Block) attributeValues { - if value == cty.NilVal || value.IsNull() { - return nil - } - ret := make(attributeValues) - - it := value.ElementIterator() - for it.Next() { - k, v := it.Element() - vJSON, _ := ctyjson.Marshal(v, v.Type()) - ret[k.AsString()] = json.RawMessage(vJSON) - } - return ret -} - -// marshalPlannedOutputs takes a list of changes and returns a map of output -// values -func marshalPlannedOutputs(changes *plans.Changes) (map[string]output, error) { - if changes.Outputs == nil { - // No changes - we're done here! - return nil, nil - } - - ret := make(map[string]output) - - for _, oc := range changes.Outputs { - if oc.ChangeSrc.Action == plans.Delete { - continue - } - - var after, afterType []byte - changeV, err := oc.Decode() - if err != nil { - return ret, err - } - // The values may be marked, but we must rely on the Sensitive flag - // as the decoded value is only an intermediate step in transcoding - // this to a json format. - changeV.After, _ = changeV.After.UnmarkDeep() - - if changeV.After != cty.NilVal && changeV.After.IsWhollyKnown() { - ty := changeV.After.Type() - after, err = ctyjson.Marshal(changeV.After, ty) - if err != nil { - return ret, err - } - afterType, err = ctyjson.MarshalType(ty) - if err != nil { - return ret, err - } - } - - ret[oc.Addr.OutputValue.Name] = output{ - Value: json.RawMessage(after), - Type: json.RawMessage(afterType), - Sensitive: oc.Sensitive, - } - } - - return ret, nil - -} - -func marshalPlannedValues(changes *plans.Changes, schemas *terraform.Schemas) (module, error) { - var ret module - - // build two maps: - // module name -> [resource addresses] - // module -> [children modules] - moduleResourceMap := make(map[string][]addrs.AbsResourceInstance) - moduleMap := make(map[string][]addrs.ModuleInstance) - seenModules := make(map[string]bool) - - for _, resource := range changes.Resources { - // If the resource is being deleted, skip over it. - // Deposed instances are always conceptually a destroy, but if they - // were gone during refresh then the change becomes a noop. - if resource.Action != plans.Delete && resource.DeposedKey == states.NotDeposed { - containingModule := resource.Addr.Module.String() - moduleResourceMap[containingModule] = append(moduleResourceMap[containingModule], resource.Addr) - - // the root module has no parents - if !resource.Addr.Module.IsRoot() { - parent := resource.Addr.Module.Parent().String() - // we expect to see multiple resources in one module, so we - // only need to report the "parent" module for each child module - // once. - if !seenModules[containingModule] { - moduleMap[parent] = append(moduleMap[parent], resource.Addr.Module) - seenModules[containingModule] = true - } - - // If any given parent module has no resources, it needs to be - // added to the moduleMap. This walks through the current - // resources' modules' ancestors, taking advantage of the fact - // that Ancestors() returns an ordered slice, and verifies that - // each one is in the map. - ancestors := resource.Addr.Module.Ancestors() - for i, ancestor := range ancestors[:len(ancestors)-1] { - aStr := ancestor.String() - - // childStr here is the immediate child of the current step - childStr := ancestors[i+1].String() - // we likely will see multiple resources in one module, so we - // only need to report the "parent" module for each child module - // once. - if !seenModules[childStr] { - moduleMap[aStr] = append(moduleMap[aStr], ancestors[i+1]) - seenModules[childStr] = true - } - } - } - } - } - - // start with the root module - resources, err := marshalPlanResources(changes, moduleResourceMap[""], schemas) - if err != nil { - return ret, err - } - ret.Resources = resources - - childModules, err := marshalPlanModules(changes, schemas, moduleMap[""], moduleMap, moduleResourceMap) - if err != nil { - return ret, err - } - sort.Slice(childModules, func(i, j int) bool { - return childModules[i].Address < childModules[j].Address - }) - - ret.ChildModules = childModules - - return ret, nil -} - -// marshalPlanResources -func marshalPlanResources(changes *plans.Changes, ris []addrs.AbsResourceInstance, schemas *terraform.Schemas) ([]resource, error) { - var ret []resource - - for _, ri := range ris { - r := changes.ResourceInstance(ri) - if r.Action == plans.Delete { - continue - } - - resource := resource{ - Address: r.Addr.String(), - Type: r.Addr.Resource.Resource.Type, - Name: r.Addr.Resource.Resource.Name, - ProviderName: r.ProviderAddr.Provider.String(), - Index: r.Addr.Resource.Key, - } - - switch r.Addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - resource.Mode = "managed" - case addrs.DataResourceMode: - resource.Mode = "data" - default: - return nil, fmt.Errorf("resource %s has an unsupported mode %s", - r.Addr.String(), - r.Addr.Resource.Resource.Mode.String(), - ) - } - - schema, schemaVer := schemas.ResourceTypeConfig( - r.ProviderAddr.Provider, - r.Addr.Resource.Resource.Mode, - resource.Type, - ) - if schema == nil { - return nil, fmt.Errorf("no schema found for %s", r.Addr.String()) - } - resource.SchemaVersion = schemaVer - changeV, err := r.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - - // copy the marked After values so we can use these in marshalSensitiveValues - markedAfter := changeV.After - - // The values may be marked, but we must rely on the Sensitive flag - // as the decoded value is only an intermediate step in transcoding - // this to a json format. - changeV.Before, _ = changeV.Before.UnmarkDeep() - changeV.After, _ = changeV.After.UnmarkDeep() - - if changeV.After != cty.NilVal { - if changeV.After.IsWhollyKnown() { - resource.AttributeValues = marshalAttributeValues(changeV.After, schema) - } else { - knowns := omitUnknowns(changeV.After) - resource.AttributeValues = marshalAttributeValues(knowns, schema) - } - } - - s := jsonstate.SensitiveAsBool(markedAfter) - v, err := ctyjson.Marshal(s, s.Type()) - if err != nil { - return nil, err - } - resource.SensitiveValues = v - - ret = append(ret, resource) - } - - sort.Slice(ret, func(i, j int) bool { - return ret[i].Address < ret[j].Address - }) - - return ret, nil -} - -// marshalPlanModules iterates over a list of modules to recursively describe -// the full module tree. -func marshalPlanModules( - changes *plans.Changes, - schemas *terraform.Schemas, - childModules []addrs.ModuleInstance, - moduleMap map[string][]addrs.ModuleInstance, - moduleResourceMap map[string][]addrs.AbsResourceInstance, -) ([]module, error) { - - var ret []module - - for _, child := range childModules { - moduleResources := moduleResourceMap[child.String()] - // cm for child module, naming things is hard. - var cm module - // don't populate the address for the root module - if child.String() != "" { - cm.Address = child.String() - } - rs, err := marshalPlanResources(changes, moduleResources, schemas) - if err != nil { - return nil, err - } - cm.Resources = rs - - if len(moduleMap[child.String()]) > 0 { - moreChildModules, err := marshalPlanModules(changes, schemas, moduleMap[child.String()], moduleMap, moduleResourceMap) - if err != nil { - return nil, err - } - cm.ChildModules = moreChildModules - } - - ret = append(ret, cm) - } - - return ret, nil -} diff --git a/internal/command/jsonplan/values_test.go b/internal/command/jsonplan/values_test.go deleted file mode 100644 index 30b22429aec5..000000000000 --- a/internal/command/jsonplan/values_test.go +++ /dev/null @@ -1,374 +0,0 @@ -package jsonplan - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/zclconf/go-cty/cty" -) - -func TestMarshalAttributeValues(t *testing.T) { - tests := []struct { - Attr cty.Value - Schema *configschema.Block - Want attributeValues - }{ - { - cty.NilVal, - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - nil, - }, - { - cty.NullVal(cty.String), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - nil, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - attributeValues{"foo": json.RawMessage(`"bar"`)}, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - attributeValues{"foo": json.RawMessage(`null`)}, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.MapVal(map[string]cty.Value{ - "hello": cty.StringVal("world"), - }), - "baz": cty.ListVal([]cty.Value{ - cty.StringVal("goodnight"), - cty.StringVal("moon"), - }), - }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.Map(cty.String), - Required: true, - }, - "baz": { - Type: cty.List(cty.String), - Optional: true, - }, - }, - }, - attributeValues{ - "bar": json.RawMessage(`{"hello":"world"}`), - "baz": json.RawMessage(`["goodnight","moon"]`), - }, - }, - } - - for _, test := range tests { - got := marshalAttributeValues(test.Attr, test.Schema) - eq := reflect.DeepEqual(got, test.Want) - if !eq { - t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) - } - } -} - -func TestMarshalPlannedOutputs(t *testing.T) { - after, _ := plans.NewDynamicValue(cty.StringVal("after"), cty.DynamicPseudoType) - - tests := []struct { - Changes *plans.Changes - Want map[string]output - Err bool - }{ - { - &plans.Changes{}, - nil, - false, - }, - { - &plans.Changes{ - Outputs: []*plans.OutputChangeSrc{ - { - Addr: addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - After: after, - }, - Sensitive: false, - }, - }, - }, - map[string]output{ - "bar": { - Sensitive: false, - Type: json.RawMessage(`"string"`), - Value: json.RawMessage(`"after"`), - }, - }, - false, - }, - { // Delete action - &plans.Changes{ - Outputs: []*plans.OutputChangeSrc{ - { - Addr: addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - Sensitive: false, - }, - }, - }, - map[string]output{}, - false, - }, - } - - for _, test := range tests { - got, err := marshalPlannedOutputs(test.Changes) - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - eq := reflect.DeepEqual(got, test.Want) - if !eq { - t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) - } - } -} - -func TestMarshalPlanResources(t *testing.T) { - tests := map[string]struct { - Action plans.Action - Before cty.Value - After cty.Value - Want []resource - Err bool - }{ - "create with unknowns": { - Action: plans.Create, - Before: cty.NullVal(cty.EmptyObject), - After: cty.ObjectVal(map[string]cty.Value{ - "woozles": cty.UnknownVal(cty.String), - "foozles": cty.UnknownVal(cty.String), - }), - Want: []resource{{ - Address: "test_thing.example", - Mode: "managed", - Type: "test_thing", - Name: "example", - Index: addrs.InstanceKey(nil), - ProviderName: "registry.terraform.io/hashicorp/test", - SchemaVersion: 1, - AttributeValues: attributeValues{}, - SensitiveValues: json.RawMessage("{}"), - }}, - Err: false, - }, - "delete with null and nil": { - Action: plans.Delete, - Before: cty.NullVal(cty.EmptyObject), - After: cty.NilVal, - Want: nil, - Err: false, - }, - "delete": { - Action: plans.Delete, - Before: cty.ObjectVal(map[string]cty.Value{ - "woozles": cty.StringVal("foo"), - "foozles": cty.StringVal("bar"), - }), - After: cty.NullVal(cty.Object(map[string]cty.Type{ - "woozles": cty.String, - "foozles": cty.String, - })), - Want: nil, - Err: false, - }, - "update without unknowns": { - Action: plans.Update, - Before: cty.ObjectVal(map[string]cty.Value{ - "woozles": cty.StringVal("foo"), - "foozles": cty.StringVal("bar"), - }), - After: cty.ObjectVal(map[string]cty.Value{ - "woozles": cty.StringVal("baz"), - "foozles": cty.StringVal("bat"), - }), - Want: []resource{{ - Address: "test_thing.example", - Mode: "managed", - Type: "test_thing", - Name: "example", - Index: addrs.InstanceKey(nil), - ProviderName: "registry.terraform.io/hashicorp/test", - SchemaVersion: 1, - AttributeValues: attributeValues{ - "woozles": json.RawMessage(`"baz"`), - "foozles": json.RawMessage(`"bat"`), - }, - SensitiveValues: json.RawMessage("{}"), - }}, - Err: false, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - before, err := plans.NewDynamicValue(test.Before, test.Before.Type()) - if err != nil { - t.Fatal(err) - } - - after, err := plans.NewDynamicValue(test.After, test.After.Type()) - if err != nil { - t.Fatal(err) - } - testChange := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "example", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: test.Action, - Before: before, - After: after, - }, - }, - }, - } - - ris := testResourceAddrs() - - got, err := marshalPlanResources(testChange, ris, testSchemas()) - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - eq := reflect.DeepEqual(got, test.Want) - if !eq { - t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) - } - }) - } -} - -func TestMarshalPlanValuesNoopDeposed(t *testing.T) { - dynamicNull, err := plans.NewDynamicValue(cty.NullVal(cty.DynamicPseudoType), cty.DynamicPseudoType) - if err != nil { - t.Fatal(err) - } - testChange := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "example", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - DeposedKey: "12345678", - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: plans.NoOp, - Before: dynamicNull, - After: dynamicNull, - }, - }, - }, - } - - _, err = marshalPlannedValues(testChange, testSchemas()) - if err != nil { - t.Fatal(err) - } -} - -func testSchemas() *terraform.Schemas { - return &terraform.Schemas{ - Providers: map[addrs.Provider]*terraform.ProviderSchema{ - addrs.NewDefaultProvider("test"): &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "woozles": {Type: cty.String, Optional: true, Computed: true}, - "foozles": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypeSchemaVersions: map[string]uint64{ - "test_thing": 1, - }, - }, - }, - } -} - -func testResourceAddrs() []addrs.AbsResourceInstance { - return []addrs.AbsResourceInstance{ - mustAddr("test_thing.example"), - } -} - -func mustAddr(str string) addrs.AbsResourceInstance { - addr, diags := addrs.ParseAbsResourceInstanceStr(str) - if diags.HasErrors() { - panic(diags.Err()) - } - return addr -} diff --git a/internal/command/jsonprovider/attribute.go b/internal/command/jsonprovider/attribute.go deleted file mode 100644 index 61deb9c60736..000000000000 --- a/internal/command/jsonprovider/attribute.go +++ /dev/null @@ -1,67 +0,0 @@ -package jsonprovider - -import ( - "encoding/json" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -type Attribute struct { - AttributeType json.RawMessage `json:"type,omitempty"` - AttributeNestedType *NestedType `json:"nested_type,omitempty"` - Description string `json:"description,omitempty"` - DescriptionKind string `json:"description_kind,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Required bool `json:"required,omitempty"` - Optional bool `json:"optional,omitempty"` - Computed bool `json:"computed,omitempty"` - Sensitive bool `json:"sensitive,omitempty"` -} - -type NestedType struct { - Attributes map[string]*Attribute `json:"attributes,omitempty"` - NestingMode string `json:"nesting_mode,omitempty"` -} - -func marshalStringKind(sk configschema.StringKind) string { - switch sk { - default: - return "plain" - case configschema.StringMarkdown: - return "markdown" - } -} - -func marshalAttribute(attr *configschema.Attribute) *Attribute { - ret := &Attribute{ - Description: attr.Description, - DescriptionKind: marshalStringKind(attr.DescriptionKind), - Required: attr.Required, - Optional: attr.Optional, - Computed: attr.Computed, - Sensitive: attr.Sensitive, - Deprecated: attr.Deprecated, - } - - // we're not concerned about errors because at this point the schema has - // already been checked and re-checked. - if attr.Type != cty.NilType { - attrTy, _ := attr.Type.MarshalJSON() - ret.AttributeType = attrTy - } - - if attr.NestedType != nil { - nestedTy := NestedType{ - NestingMode: nestingModeString(attr.NestedType.Nesting), - } - attrs := make(map[string]*Attribute, len(attr.NestedType.Attributes)) - for k, attr := range attr.NestedType.Attributes { - attrs[k] = marshalAttribute(attr) - } - nestedTy.Attributes = attrs - ret.AttributeNestedType = &nestedTy - } - - return ret -} diff --git a/internal/command/jsonprovider/block.go b/internal/command/jsonprovider/block.go deleted file mode 100644 index e9fdcfb3eabf..000000000000 --- a/internal/command/jsonprovider/block.go +++ /dev/null @@ -1,80 +0,0 @@ -package jsonprovider - -import ( - "github.com/hashicorp/terraform/internal/configs/configschema" -) - -type Block struct { - Attributes map[string]*Attribute `json:"attributes,omitempty"` - BlockTypes map[string]*BlockType `json:"block_types,omitempty"` - Description string `json:"description,omitempty"` - DescriptionKind string `json:"description_kind,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` -} - -type BlockType struct { - NestingMode string `json:"nesting_mode,omitempty"` - Block *Block `json:"block,omitempty"` - MinItems uint64 `json:"min_items,omitempty"` - MaxItems uint64 `json:"max_items,omitempty"` -} - -func marshalBlockTypes(nestedBlock *configschema.NestedBlock) *BlockType { - if nestedBlock == nil { - return &BlockType{} - } - ret := &BlockType{ - Block: marshalBlock(&nestedBlock.Block), - MinItems: uint64(nestedBlock.MinItems), - MaxItems: uint64(nestedBlock.MaxItems), - NestingMode: nestingModeString(nestedBlock.Nesting), - } - return ret -} - -func marshalBlock(configBlock *configschema.Block) *Block { - if configBlock == nil { - return &Block{} - } - - ret := Block{ - Deprecated: configBlock.Deprecated, - Description: configBlock.Description, - DescriptionKind: marshalStringKind(configBlock.DescriptionKind), - } - - if len(configBlock.Attributes) > 0 { - attrs := make(map[string]*Attribute, len(configBlock.Attributes)) - for k, attr := range configBlock.Attributes { - attrs[k] = marshalAttribute(attr) - } - ret.Attributes = attrs - } - - if len(configBlock.BlockTypes) > 0 { - blockTypes := make(map[string]*BlockType, len(configBlock.BlockTypes)) - for k, bt := range configBlock.BlockTypes { - blockTypes[k] = marshalBlockTypes(bt) - } - ret.BlockTypes = blockTypes - } - - return &ret -} - -func nestingModeString(mode configschema.NestingMode) string { - switch mode { - case configschema.NestingSingle: - return "single" - case configschema.NestingGroup: - return "group" - case configschema.NestingList: - return "list" - case configschema.NestingSet: - return "set" - case configschema.NestingMap: - return "map" - default: - return "invalid" - } -} diff --git a/internal/command/jsonprovider/provider.go b/internal/command/jsonprovider/provider.go deleted file mode 100644 index caf42624bf38..000000000000 --- a/internal/command/jsonprovider/provider.go +++ /dev/null @@ -1,78 +0,0 @@ -package jsonprovider - -import ( - "encoding/json" - - "github.com/hashicorp/terraform/internal/terraform" -) - -// FormatVersion represents the version of the json format and will be -// incremented for any change to this format that requires changes to a -// consuming parser. -const FormatVersion = "1.0" - -// providers is the top-level object returned when exporting provider schemas -type providers struct { - FormatVersion string `json:"format_version"` - Schemas map[string]*Provider `json:"provider_schemas,omitempty"` -} - -type Provider struct { - Provider *Schema `json:"provider,omitempty"` - ResourceSchemas map[string]*Schema `json:"resource_schemas,omitempty"` - DataSourceSchemas map[string]*Schema `json:"data_source_schemas,omitempty"` -} - -func newProviders() *providers { - schemas := make(map[string]*Provider) - return &providers{ - FormatVersion: FormatVersion, - Schemas: schemas, - } -} - -// MarshalForRenderer converts the provided internation representation of the -// schema into the public structured JSON versions. -// -// This is a format that can be read by the structured plan renderer. -func MarshalForRenderer(s *terraform.Schemas) map[string]*Provider { - schemas := make(map[string]*Provider, len(s.Providers)) - for k, v := range s.Providers { - schemas[k.String()] = marshalProvider(v) - } - return schemas -} - -func Marshal(s *terraform.Schemas) ([]byte, error) { - providers := newProviders() - providers.Schemas = MarshalForRenderer(s) - ret, err := json.Marshal(providers) - return ret, err -} - -func marshalProvider(tps *terraform.ProviderSchema) *Provider { - if tps == nil { - return &Provider{} - } - - var ps *Schema - var rs, ds map[string]*Schema - - if tps.Provider != nil { - ps = marshalSchema(tps.Provider) - } - - if tps.ResourceTypes != nil { - rs = marshalSchemas(tps.ResourceTypes, tps.ResourceTypeSchemaVersions) - } - - if tps.DataSources != nil { - ds = marshalSchemas(tps.DataSources, tps.ResourceTypeSchemaVersions) - } - - return &Provider{ - Provider: ps, - ResourceSchemas: rs, - DataSourceSchemas: ds, - } -} diff --git a/internal/command/jsonprovider/provider_test.go b/internal/command/jsonprovider/provider_test.go deleted file mode 100644 index b849c37b8293..000000000000 --- a/internal/command/jsonprovider/provider_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package jsonprovider - -import ( - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/terraform" -) - -func TestMarshalProvider(t *testing.T) { - tests := []struct { - Input *terraform.ProviderSchema - Want *Provider - }{ - { - nil, - &Provider{}, - }, - { - testProvider(), - &Provider{ - Provider: &Schema{ - Block: &Block{ - Attributes: map[string]*Attribute{ - "region": { - AttributeType: json.RawMessage(`"string"`), - Required: true, - DescriptionKind: "plain", - }, - }, - DescriptionKind: "plain", - }, - }, - ResourceSchemas: map[string]*Schema{ - "test_instance": { - Version: 42, - Block: &Block{ - Attributes: map[string]*Attribute{ - "id": { - AttributeType: json.RawMessage(`"string"`), - Optional: true, - Computed: true, - DescriptionKind: "plain", - }, - "ami": { - AttributeType: json.RawMessage(`"string"`), - Optional: true, - DescriptionKind: "plain", - }, - "volumes": { - AttributeNestedType: &NestedType{ - NestingMode: "list", - Attributes: map[string]*Attribute{ - "size": { - AttributeType: json.RawMessage(`"string"`), - Required: true, - DescriptionKind: "plain", - }, - "mount_point": { - AttributeType: json.RawMessage(`"string"`), - Required: true, - DescriptionKind: "plain", - }, - }, - }, - Optional: true, - DescriptionKind: "plain", - }, - }, - BlockTypes: map[string]*BlockType{ - "network_interface": { - Block: &Block{ - Attributes: map[string]*Attribute{ - "device_index": { - AttributeType: json.RawMessage(`"string"`), - Optional: true, - DescriptionKind: "plain", - }, - "description": { - AttributeType: json.RawMessage(`"string"`), - Optional: true, - DescriptionKind: "plain", - }, - }, - DescriptionKind: "plain", - }, - NestingMode: "list", - }, - }, - DescriptionKind: "plain", - }, - }, - }, - DataSourceSchemas: map[string]*Schema{ - "test_data_source": { - Version: 3, - Block: &Block{ - Attributes: map[string]*Attribute{ - "id": { - AttributeType: json.RawMessage(`"string"`), - Optional: true, - Computed: true, - DescriptionKind: "plain", - }, - "ami": { - AttributeType: json.RawMessage(`"string"`), - Optional: true, - DescriptionKind: "plain", - }, - }, - BlockTypes: map[string]*BlockType{ - "network_interface": { - Block: &Block{ - Attributes: map[string]*Attribute{ - "device_index": { - AttributeType: json.RawMessage(`"string"`), - Optional: true, - DescriptionKind: "plain", - }, - "description": { - AttributeType: json.RawMessage(`"string"`), - Optional: true, - DescriptionKind: "plain", - }, - }, - DescriptionKind: "plain", - }, - NestingMode: "list", - }, - }, - DescriptionKind: "plain", - }, - }, - }, - }, - }, - } - - for _, test := range tests { - got := marshalProvider(test.Input) - if !cmp.Equal(got, test.Want) { - t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) - } - } -} - -func testProvider() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Required: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "volumes": { - Optional: true, - NestedType: &configschema.Object{ - Nesting: configschema.NestingList, - Attributes: map[string]*configschema.Attribute{ - "size": {Type: cty.String, Required: true}, - "mount_point": {Type: cty.String, Required: true}, - }, - }, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.String, Optional: true}, - "description": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "test_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.String, Optional: true}, - "description": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - }, - - ResourceTypeSchemaVersions: map[string]uint64{ - "test_instance": 42, - "test_data_source": 3, - }, - } -} diff --git a/internal/command/jsonprovider/schema.go b/internal/command/jsonprovider/schema.go deleted file mode 100644 index c33962a1639a..000000000000 --- a/internal/command/jsonprovider/schema.go +++ /dev/null @@ -1,38 +0,0 @@ -package jsonprovider - -import ( - "github.com/hashicorp/terraform/internal/configs/configschema" -) - -type Schema struct { - Version uint64 `json:"version"` - Block *Block `json:"block,omitempty"` -} - -// marshalSchema is a convenience wrapper around mashalBlock. Schema version -// should be set by the caller. -func marshalSchema(block *configschema.Block) *Schema { - if block == nil { - return &Schema{} - } - - var ret Schema - ret.Block = marshalBlock(block) - - return &ret -} - -func marshalSchemas(blocks map[string]*configschema.Block, rVersions map[string]uint64) map[string]*Schema { - if blocks == nil { - return map[string]*Schema{} - } - ret := make(map[string]*Schema, len(blocks)) - for k, v := range blocks { - ret[k] = marshalSchema(v) - version, ok := rVersions[k] - if ok { - ret[k].Version = version - } - } - return ret -} diff --git a/internal/command/jsonprovider/schema_test.go b/internal/command/jsonprovider/schema_test.go deleted file mode 100644 index d4fae307d5d1..000000000000 --- a/internal/command/jsonprovider/schema_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package jsonprovider - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/hashicorp/terraform/internal/configs/configschema" -) - -func TestMarshalSchemas(t *testing.T) { - tests := []struct { - Input map[string]*configschema.Block - Versions map[string]uint64 - Want map[string]*Schema - }{ - { - nil, - map[string]uint64{}, - map[string]*Schema{}, - }, - } - - for _, test := range tests { - got := marshalSchemas(test.Input, test.Versions) - if !cmp.Equal(got, test.Want) { - t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) - } - } -} - -func TestMarshalSchema(t *testing.T) { - tests := map[string]struct { - Input *configschema.Block - Want *Schema - }{ - "nil_block": { - nil, - &Schema{}, - }, - } - - for _, test := range tests { - got := marshalSchema(test.Input) - if !cmp.Equal(got, test.Want) { - t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) - } - } -} diff --git a/internal/command/jsonstate/state.go b/internal/command/jsonstate/state.go deleted file mode 100644 index 5cd4793d02e0..000000000000 --- a/internal/command/jsonstate/state.go +++ /dev/null @@ -1,553 +0,0 @@ -package jsonstate - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/jsonchecks" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terraform" -) - -const ( - // FormatVersion represents the version of the json format and will be - // incremented for any change to this format that requires changes to a - // consuming parser. - FormatVersion = "1.0" - - ManagedResourceMode = "managed" - DataResourceMode = "data" -) - -// state is the top-level representation of the json format of a terraform -// state. -type state struct { - FormatVersion string `json:"format_version,omitempty"` - TerraformVersion string `json:"terraform_version,omitempty"` - Values *stateValues `json:"values,omitempty"` - Checks json.RawMessage `json:"checks,omitempty"` -} - -// stateValues is the common representation of resolved values for both the prior -// state (which is always complete) and the planned new state. -type stateValues struct { - Outputs map[string]Output `json:"outputs,omitempty"` - RootModule Module `json:"root_module,omitempty"` -} - -type Output struct { - Sensitive bool `json:"sensitive"` - Value json.RawMessage `json:"value,omitempty"` - Type json.RawMessage `json:"type,omitempty"` -} - -// Module is the representation of a module in state. This can be the root module -// or a child module -type Module struct { - // Resources are sorted in a user-friendly order that is undefined at this - // time, but consistent. - Resources []Resource `json:"resources,omitempty"` - - // Address is the absolute module address, omitted for the root module - Address string `json:"address,omitempty"` - - // Each module object can optionally have its own nested "child_modules", - // recursively describing the full module tree. - ChildModules []Module `json:"child_modules,omitempty"` -} - -// Resource is the representation of a resource in the state. -type Resource struct { - // Address is the absolute resource address - Address string `json:"address,omitempty"` - - // Mode can be "managed" or "data" - Mode string `json:"mode,omitempty"` - - Type string `json:"type,omitempty"` - Name string `json:"name,omitempty"` - - // Index is omitted for a resource not using `count` or `for_each`. - Index json.RawMessage `json:"index,omitempty"` - - // ProviderName allows the property "type" to be interpreted unambiguously - // in the unusual situation where a provider offers a resource type whose - // name does not start with its own name, such as the "googlebeta" provider - // offering "google_compute_instance". - ProviderName string `json:"provider_name"` - - // SchemaVersion indicates which version of the resource type schema the - // "values" property conforms to. - SchemaVersion uint64 `json:"schema_version"` - - // AttributeValues is the JSON representation of the attribute values of the - // resource, whose structure depends on the resource type schema. Any - // unknown values are omitted or set to null, making them indistinguishable - // from absent values. - AttributeValues AttributeValues `json:"values,omitempty"` - - // SensitiveValues is similar to AttributeValues, but with all sensitive - // values replaced with true, and all non-sensitive leaf values omitted. - SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"` - - // DependsOn contains a list of the resource's dependencies. The entries are - // addresses relative to the containing module. - DependsOn []string `json:"depends_on,omitempty"` - - // Tainted is true if the resource is tainted in terraform state. - Tainted bool `json:"tainted,omitempty"` - - // Deposed is set if the resource is deposed in terraform state. - DeposedKey string `json:"deposed_key,omitempty"` -} - -// AttributeValues is the JSON representation of the attribute values of the -// resource, whose structure depends on the resource type schema. -type AttributeValues map[string]json.RawMessage - -func marshalAttributeValues(value cty.Value) AttributeValues { - // unmark our value to show all values - value, _ = value.UnmarkDeep() - - if value == cty.NilVal || value.IsNull() { - return nil - } - - ret := make(AttributeValues) - - it := value.ElementIterator() - for it.Next() { - k, v := it.Element() - vJSON, _ := ctyjson.Marshal(v, v.Type()) - ret[k.AsString()] = json.RawMessage(vJSON) - } - return ret -} - -// newState() returns a minimally-initialized state -func newState() *state { - return &state{ - FormatVersion: FormatVersion, - } -} - -// MarshalForRenderer returns the pre-json encoding changes of the state, in a -// format available to the structured renderer. -func MarshalForRenderer(sf *statefile.File, schemas *terraform.Schemas) (Module, map[string]Output, error) { - if sf.State.Modules == nil { - // Empty state case. - return Module{}, nil, nil - } - - outputs, err := MarshalOutputs(sf.State.RootModule().OutputValues) - if err != nil { - return Module{}, nil, err - } - - root, err := marshalRootModule(sf.State, schemas) - if err != nil { - return Module{}, nil, err - } - - return root, outputs, err -} - -// Marshal returns the json encoding of a terraform state. -func Marshal(sf *statefile.File, schemas *terraform.Schemas) ([]byte, error) { - output := newState() - - if sf == nil || sf.State.Empty() { - ret, err := json.Marshal(output) - return ret, err - } - - if sf.TerraformVersion != nil { - output.TerraformVersion = sf.TerraformVersion.String() - } - - // output.StateValues - err := output.marshalStateValues(sf.State, schemas) - if err != nil { - return nil, err - } - - // output.Checks - if sf.State.CheckResults != nil && sf.State.CheckResults.ConfigResults.Len() > 0 { - output.Checks = jsonchecks.MarshalCheckStates(sf.State.CheckResults) - } - - ret, err := json.Marshal(output) - return ret, err -} - -func (jsonstate *state) marshalStateValues(s *states.State, schemas *terraform.Schemas) error { - var sv stateValues - var err error - - // only marshal the root module outputs - sv.Outputs, err = MarshalOutputs(s.RootModule().OutputValues) - if err != nil { - return err - } - - // use the state and module map to build up the module structure - sv.RootModule, err = marshalRootModule(s, schemas) - if err != nil { - return err - } - - jsonstate.Values = &sv - return nil -} - -// MarshalOutputs translates a map of states.OutputValue to a map of jsonstate.Output, -// which are defined for json encoding. -func MarshalOutputs(outputs map[string]*states.OutputValue) (map[string]Output, error) { - if outputs == nil { - return nil, nil - } - - ret := make(map[string]Output) - for k, v := range outputs { - ty := v.Value.Type() - ov, err := ctyjson.Marshal(v.Value, ty) - if err != nil { - return ret, err - } - ot, err := ctyjson.MarshalType(ty) - if err != nil { - return ret, err - } - ret[k] = Output{ - Value: ov, - Type: ot, - Sensitive: v.Sensitive, - } - } - - return ret, nil -} - -func marshalRootModule(s *states.State, schemas *terraform.Schemas) (Module, error) { - var ret Module - var err error - - ret.Address = "" - rs, err := marshalResources(s.RootModule().Resources, addrs.RootModuleInstance, schemas) - if err != nil { - return ret, err - } - ret.Resources = rs - - // build a map of module -> set[child module addresses] - moduleChildSet := make(map[string]map[string]struct{}) - for _, mod := range s.Modules { - if mod.Addr.IsRoot() { - continue - } else { - for childAddr := mod.Addr; !childAddr.IsRoot(); childAddr = childAddr.Parent() { - if _, ok := moduleChildSet[childAddr.Parent().String()]; !ok { - moduleChildSet[childAddr.Parent().String()] = map[string]struct{}{} - } - moduleChildSet[childAddr.Parent().String()][childAddr.String()] = struct{}{} - } - } - } - - // transform the previous map into map of module -> [child module addresses] - moduleMap := make(map[string][]addrs.ModuleInstance) - for parent, children := range moduleChildSet { - for child := range children { - childModuleInstance, diags := addrs.ParseModuleInstanceStr(child) - if diags.HasErrors() { - return ret, diags.Err() - } - moduleMap[parent] = append(moduleMap[parent], childModuleInstance) - } - } - - // use the state and module map to build up the module structure - ret.ChildModules, err = marshalModules(s, schemas, moduleMap[""], moduleMap) - return ret, err -} - -// marshalModules is an ungainly recursive function to build a module structure -// out of terraform state. -func marshalModules( - s *states.State, - schemas *terraform.Schemas, - modules []addrs.ModuleInstance, - moduleMap map[string][]addrs.ModuleInstance, -) ([]Module, error) { - var ret []Module - for _, child := range modules { - // cm for child module, naming things is hard. - cm := Module{Address: child.String()} - - // the module may be resourceless and contain only submodules, it will then be nil here - stateMod := s.Module(child) - if stateMod != nil { - rs, err := marshalResources(stateMod.Resources, stateMod.Addr, schemas) - if err != nil { - return nil, err - } - cm.Resources = rs - } - - if moduleMap[child.String()] != nil { - moreChildModules, err := marshalModules(s, schemas, moduleMap[child.String()], moduleMap) - if err != nil { - return nil, err - } - cm.ChildModules = moreChildModules - } - - ret = append(ret, cm) - } - - // sort the child modules by address for consistency. - sort.Slice(ret, func(i, j int) bool { - return ret[i].Address < ret[j].Address - }) - - return ret, nil -} - -func marshalResources(resources map[string]*states.Resource, module addrs.ModuleInstance, schemas *terraform.Schemas) ([]Resource, error) { - var ret []Resource - - var sortedResources []*states.Resource - for _, r := range resources { - sortedResources = append(sortedResources, r) - } - sort.Slice(sortedResources, func(i, j int) bool { - return sortedResources[i].Addr.Less(sortedResources[j].Addr) - }) - - for _, r := range sortedResources { - - var sortedKeys []addrs.InstanceKey - for k := range r.Instances { - sortedKeys = append(sortedKeys, k) - } - sort.Slice(sortedKeys, func(i, j int) bool { - return addrs.InstanceKeyLess(sortedKeys[i], sortedKeys[j]) - }) - - for _, k := range sortedKeys { - ri := r.Instances[k] - - var err error - - resAddr := r.Addr.Resource - - current := Resource{ - Address: r.Addr.Instance(k).String(), - Type: resAddr.Type, - Name: resAddr.Name, - ProviderName: r.ProviderConfig.Provider.String(), - } - - if k != nil { - index := k.Value() - if current.Index, err = ctyjson.Marshal(index, index.Type()); err != nil { - return nil, err - } - } - - switch resAddr.Mode { - case addrs.ManagedResourceMode: - current.Mode = ManagedResourceMode - case addrs.DataResourceMode: - current.Mode = DataResourceMode - default: - return ret, fmt.Errorf("resource %s has an unsupported mode %s", - resAddr.String(), - resAddr.Mode.String(), - ) - } - - schema, version := schemas.ResourceTypeConfig( - r.ProviderConfig.Provider, - resAddr.Mode, - resAddr.Type, - ) - - // It is possible that the only instance is deposed - if ri.Current != nil { - if version != ri.Current.SchemaVersion { - return nil, fmt.Errorf("schema version %d for %s in state does not match version %d from the provider", ri.Current.SchemaVersion, resAddr, version) - } - - current.SchemaVersion = ri.Current.SchemaVersion - - if schema == nil { - return nil, fmt.Errorf("no schema found for %s (in provider %s)", resAddr.String(), r.ProviderConfig.Provider) - } - riObj, err := ri.Current.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - - current.AttributeValues = marshalAttributeValues(riObj.Value) - - value, marks := riObj.Value.UnmarkDeepWithPaths() - if schema.ContainsSensitive() { - marks = append(marks, schema.ValueMarks(value, nil)...) - } - s := SensitiveAsBool(value.MarkWithPaths(marks)) - v, err := ctyjson.Marshal(s, s.Type()) - if err != nil { - return nil, err - } - current.SensitiveValues = v - - if len(riObj.Dependencies) > 0 { - dependencies := make([]string, len(riObj.Dependencies)) - for i, v := range riObj.Dependencies { - dependencies[i] = v.String() - } - current.DependsOn = dependencies - } - - if riObj.Status == states.ObjectTainted { - current.Tainted = true - } - ret = append(ret, current) - } - - var sortedDeposedKeys []string - for k := range ri.Deposed { - sortedDeposedKeys = append(sortedDeposedKeys, string(k)) - } - sort.Strings(sortedDeposedKeys) - - for _, deposedKey := range sortedDeposedKeys { - rios := ri.Deposed[states.DeposedKey(deposedKey)] - - // copy the base fields from the current instance - deposed := Resource{ - Address: current.Address, - Type: current.Type, - Name: current.Name, - ProviderName: current.ProviderName, - Mode: current.Mode, - Index: current.Index, - } - - riObj, err := rios.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - - deposed.AttributeValues = marshalAttributeValues(riObj.Value) - - value, marks := riObj.Value.UnmarkDeepWithPaths() - if schema.ContainsSensitive() { - marks = append(marks, schema.ValueMarks(value, nil)...) - } - s := SensitiveAsBool(value.MarkWithPaths(marks)) - v, err := ctyjson.Marshal(s, s.Type()) - if err != nil { - return nil, err - } - deposed.SensitiveValues = v - - if len(riObj.Dependencies) > 0 { - dependencies := make([]string, len(riObj.Dependencies)) - for i, v := range riObj.Dependencies { - dependencies[i] = v.String() - } - deposed.DependsOn = dependencies - } - - if riObj.Status == states.ObjectTainted { - deposed.Tainted = true - } - deposed.DeposedKey = deposedKey - ret = append(ret, deposed) - } - } - } - - return ret, nil -} - -func SensitiveAsBool(val cty.Value) cty.Value { - if val.HasMark(marks.Sensitive) { - return cty.True - } - - ty := val.Type() - switch { - case val.IsNull(), ty.IsPrimitiveType(), ty.Equals(cty.DynamicPseudoType): - return cty.False - case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): - if !val.IsKnown() { - // If the collection is unknown we can't say anything about the - // sensitivity of its contents - return cty.EmptyTupleVal - } - length := val.LengthInt() - if length == 0 { - // If there are no elements then we can't have sensitive values - return cty.EmptyTupleVal - } - vals := make([]cty.Value, 0, length) - it := val.ElementIterator() - for it.Next() { - _, v := it.Element() - vals = append(vals, SensitiveAsBool(v)) - } - // The above transform may have changed the types of some of the - // elements, so we'll always use a tuple here in case we've now made - // different elements have different types. Our ultimate goal is to - // marshal to JSON anyway, and all of these sequence types are - // indistinguishable in JSON. - return cty.TupleVal(vals) - case ty.IsMapType() || ty.IsObjectType(): - if !val.IsKnown() { - // If the map/object is unknown we can't say anything about the - // sensitivity of its attributes - return cty.EmptyObjectVal - } - var length int - switch { - case ty.IsMapType(): - length = val.LengthInt() - default: - length = len(val.Type().AttributeTypes()) - } - if length == 0 { - // If there are no elements then we can't have sensitive values - return cty.EmptyObjectVal - } - vals := make(map[string]cty.Value) - it := val.ElementIterator() - for it.Next() { - k, v := it.Element() - s := SensitiveAsBool(v) - // Omit all of the "false"s for non-sensitive values for more - // compact serialization - if !s.RawEquals(cty.False) { - vals[k.AsString()] = s - } - } - // The above transform may have changed the types of some of the - // elements, so we'll always use an object here in case we've now made - // different elements have different types. Our ultimate goal is to - // marshal to JSON anyway, and all of these mapping types are - // indistinguishable in JSON. - return cty.ObjectVal(vals) - default: - // Should never happen, since the above should cover all types - panic(fmt.Sprintf("sensitiveAsBool cannot handle %#v", val)) - } -} diff --git a/internal/command/jsonstate/state_test.go b/internal/command/jsonstate/state_test.go deleted file mode 100644 index 60b234185ce7..000000000000 --- a/internal/command/jsonstate/state_test.go +++ /dev/null @@ -1,1044 +0,0 @@ -package jsonstate - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" -) - -func TestMarshalOutputs(t *testing.T) { - tests := []struct { - Outputs map[string]*states.OutputValue - Want map[string]Output - Err bool - }{ - { - nil, - nil, - false, - }, - { - map[string]*states.OutputValue{ - "test": { - Sensitive: true, - Value: cty.StringVal("sekret"), - }, - }, - map[string]Output{ - "test": { - Sensitive: true, - Value: json.RawMessage(`"sekret"`), - Type: json.RawMessage(`"string"`), - }, - }, - false, - }, - { - map[string]*states.OutputValue{ - "test": { - Sensitive: false, - Value: cty.StringVal("not_so_sekret"), - }, - }, - map[string]Output{ - "test": { - Sensitive: false, - Value: json.RawMessage(`"not_so_sekret"`), - Type: json.RawMessage(`"string"`), - }, - }, - false, - }, - { - map[string]*states.OutputValue{ - "mapstring": { - Sensitive: false, - Value: cty.MapVal(map[string]cty.Value{ - "beep": cty.StringVal("boop"), - }), - }, - "setnumber": { - Sensitive: false, - Value: cty.SetVal([]cty.Value{ - cty.NumberIntVal(3), - cty.NumberIntVal(5), - cty.NumberIntVal(7), - cty.NumberIntVal(11), - }), - }, - }, - map[string]Output{ - "mapstring": { - Sensitive: false, - Value: json.RawMessage(`{"beep":"boop"}`), - Type: json.RawMessage(`["map","string"]`), - }, - "setnumber": { - Sensitive: false, - Value: json.RawMessage(`[3,5,7,11]`), - Type: json.RawMessage(`["set","number"]`), - }, - }, - false, - }, - } - - for _, test := range tests { - got, err := MarshalOutputs(test.Outputs) - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - if !cmp.Equal(test.Want, got) { - t.Fatalf("wrong result:\n%s", cmp.Diff(test.Want, got)) - } - } -} - -func TestMarshalAttributeValues(t *testing.T) { - tests := []struct { - Attr cty.Value - Want AttributeValues - }{ - { - cty.NilVal, - nil, - }, - { - cty.NullVal(cty.String), - nil, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - }), - AttributeValues{"foo": json.RawMessage(`"bar"`)}, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - AttributeValues{"foo": json.RawMessage(`null`)}, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.MapVal(map[string]cty.Value{ - "hello": cty.StringVal("world"), - }), - "baz": cty.ListVal([]cty.Value{ - cty.StringVal("goodnight"), - cty.StringVal("moon"), - }), - }), - AttributeValues{ - "bar": json.RawMessage(`{"hello":"world"}`), - "baz": json.RawMessage(`["goodnight","moon"]`), - }, - }, - // Marked values - { - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.MapVal(map[string]cty.Value{ - "hello": cty.StringVal("world"), - }), - "baz": cty.ListVal([]cty.Value{ - cty.StringVal("goodnight"), - cty.StringVal("moon").Mark(marks.Sensitive), - }), - }), - AttributeValues{ - "bar": json.RawMessage(`{"hello":"world"}`), - "baz": json.RawMessage(`["goodnight","moon"]`), - }, - }, - } - - for _, test := range tests { - got := marshalAttributeValues(test.Attr) - eq := reflect.DeepEqual(got, test.Want) - if !eq { - t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) - } - } -} - -func TestMarshalResources(t *testing.T) { - deposedKey := states.NewDeposedKey() - tests := map[string]struct { - Resources map[string]*states.Resource - Schemas *terraform.Schemas - Want []Resource - Err bool - }{ - "nil": { - nil, - nil, - nil, - false, - }, - "single resource": { - map[string]*states.Resource{ - "test_thing.baz": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.NoKey: { - Current: &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - []Resource{ - { - Address: "test_thing.bar", - Mode: "managed", - Type: "test_thing", - Name: "bar", - Index: nil, - ProviderName: "registry.terraform.io/hashicorp/test", - AttributeValues: AttributeValues{ - "foozles": json.RawMessage(`null`), - "woozles": json.RawMessage(`"confuzles"`), - }, - SensitiveValues: json.RawMessage("{\"foozles\":true}"), - }, - }, - false, - }, - "single resource_with_sensitive": { - map[string]*states.Resource{ - "test_thing.baz": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.NoKey: { - Current: &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"woozles":"confuzles","foozles":"sensuzles"}`), - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - []Resource{ - { - Address: "test_thing.bar", - Mode: "managed", - Type: "test_thing", - Name: "bar", - Index: nil, - ProviderName: "registry.terraform.io/hashicorp/test", - AttributeValues: AttributeValues{ - "foozles": json.RawMessage(`"sensuzles"`), - "woozles": json.RawMessage(`"confuzles"`), - }, - SensitiveValues: json.RawMessage("{\"foozles\":true}"), - }, - }, - false, - }, - "resource with marks": { - map[string]*states.Resource{ - "test_thing.bar": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.NoKey: { - Current: &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"foozles":"confuzles"}`), - AttrSensitivePaths: []cty.PathValueMarks{{ - Path: cty.Path{cty.GetAttrStep{Name: "foozles"}}, - Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - []Resource{ - { - Address: "test_thing.bar", - Mode: "managed", - Type: "test_thing", - Name: "bar", - Index: nil, - ProviderName: "registry.terraform.io/hashicorp/test", - AttributeValues: AttributeValues{ - "foozles": json.RawMessage(`"confuzles"`), - "woozles": json.RawMessage(`null`), - }, - SensitiveValues: json.RawMessage(`{"foozles":true}`), - }, - }, - false, - }, - "single resource wrong schema": { - map[string]*states.Resource{ - "test_thing.baz": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.NoKey: { - Current: &states.ResourceInstanceObjectSrc{ - SchemaVersion: 1, - Status: states.ObjectReady, - AttrsJSON: []byte(`{"woozles":["confuzles"]}`), - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - nil, - true, - }, - "resource with count": { - map[string]*states.Resource{ - "test_thing.bar": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.IntKey(0): { - Current: &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - []Resource{ - { - Address: "test_thing.bar[0]", - Mode: "managed", - Type: "test_thing", - Name: "bar", - Index: json.RawMessage(`0`), - ProviderName: "registry.terraform.io/hashicorp/test", - AttributeValues: AttributeValues{ - "foozles": json.RawMessage(`null`), - "woozles": json.RawMessage(`"confuzles"`), - }, - SensitiveValues: json.RawMessage("{\"foozles\":true}"), - }, - }, - false, - }, - "resource with for_each": { - map[string]*states.Resource{ - "test_thing.bar": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.StringKey("rockhopper"): { - Current: &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - []Resource{ - { - Address: "test_thing.bar[\"rockhopper\"]", - Mode: "managed", - Type: "test_thing", - Name: "bar", - Index: json.RawMessage(`"rockhopper"`), - ProviderName: "registry.terraform.io/hashicorp/test", - AttributeValues: AttributeValues{ - "foozles": json.RawMessage(`null`), - "woozles": json.RawMessage(`"confuzles"`), - }, - SensitiveValues: json.RawMessage("{\"foozles\":true}"), - }, - }, - false, - }, - "deposed resource": { - map[string]*states.Resource{ - "test_thing.baz": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.NoKey: { - Deposed: map[states.DeposedKey]*states.ResourceInstanceObjectSrc{ - states.DeposedKey(deposedKey): { - Status: states.ObjectReady, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - []Resource{ - { - Address: "test_thing.bar", - Mode: "managed", - Type: "test_thing", - Name: "bar", - Index: nil, - ProviderName: "registry.terraform.io/hashicorp/test", - DeposedKey: deposedKey.String(), - AttributeValues: AttributeValues{ - "foozles": json.RawMessage(`null`), - "woozles": json.RawMessage(`"confuzles"`), - }, - SensitiveValues: json.RawMessage("{\"foozles\":true}"), - }, - }, - false, - }, - "deposed and current resource": { - map[string]*states.Resource{ - "test_thing.baz": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.NoKey: { - Deposed: map[states.DeposedKey]*states.ResourceInstanceObjectSrc{ - states.DeposedKey(deposedKey): { - Status: states.ObjectReady, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - }, - Current: &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - []Resource{ - { - Address: "test_thing.bar", - Mode: "managed", - Type: "test_thing", - Name: "bar", - Index: nil, - ProviderName: "registry.terraform.io/hashicorp/test", - AttributeValues: AttributeValues{ - "foozles": json.RawMessage(`null`), - "woozles": json.RawMessage(`"confuzles"`), - }, - SensitiveValues: json.RawMessage("{\"foozles\":true}"), - }, - { - Address: "test_thing.bar", - Mode: "managed", - Type: "test_thing", - Name: "bar", - Index: nil, - ProviderName: "registry.terraform.io/hashicorp/test", - DeposedKey: deposedKey.String(), - AttributeValues: AttributeValues{ - "foozles": json.RawMessage(`null`), - "woozles": json.RawMessage(`"confuzles"`), - }, - SensitiveValues: json.RawMessage("{\"foozles\":true}"), - }, - }, - false, - }, - "resource with marked map attr": { - map[string]*states.Resource{ - "test_map_attr.bar": { - Addr: addrs.AbsResource{ - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_map_attr", - Name: "bar", - }, - }, - Instances: map[addrs.InstanceKey]*states.ResourceInstance{ - addrs.NoKey: { - Current: &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"data":{"woozles":"confuzles"}}`), - AttrSensitivePaths: []cty.PathValueMarks{{ - Path: cty.Path{cty.GetAttrStep{Name: "data"}}, - Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - }, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - testSchemas(), - []Resource{ - { - Address: "test_map_attr.bar", - Mode: "managed", - Type: "test_map_attr", - Name: "bar", - Index: nil, - ProviderName: "registry.terraform.io/hashicorp/test", - AttributeValues: AttributeValues{ - "data": json.RawMessage(`{"woozles":"confuzles"}`), - }, - SensitiveValues: json.RawMessage(`{"data":true}`), - }, - }, - false, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - got, err := marshalResources(test.Resources, addrs.RootModuleInstance, test.Schemas) - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - diff := cmp.Diff(got, test.Want) - if diff != "" { - t.Fatalf("wrong result: %s\n", diff) - } - - }) - } -} - -func TestMarshalModules_basic(t *testing.T) { - childModule, _ := addrs.ParseModuleInstanceStr("module.child") - subModule, _ := addrs.ParseModuleInstanceStr("module.submodule") - testState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(childModule), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: childModule.Module(), - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(subModule), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: subModule.Module(), - }, - ) - }) - moduleMap := make(map[string][]addrs.ModuleInstance) - moduleMap[""] = []addrs.ModuleInstance{childModule, subModule} - - got, err := marshalModules(testState, testSchemas(), moduleMap[""], moduleMap) - - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) - } - - if len(got) != 2 { - t.Fatalf("wrong result! got %d modules, expected 2", len(got)) - } - - if got[0].Address != "module.child" || got[1].Address != "module.submodule" { - t.Fatalf("wrong result! got %#v\n", got) - } - -} - -func TestMarshalModules_nested(t *testing.T) { - childModule, _ := addrs.ParseModuleInstanceStr("module.child") - subModule, _ := addrs.ParseModuleInstanceStr("module.child.module.submodule") - testState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(childModule), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: childModule.Module(), - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(subModule), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: subModule.Module(), - }, - ) - }) - moduleMap := make(map[string][]addrs.ModuleInstance) - moduleMap[""] = []addrs.ModuleInstance{childModule} - moduleMap[childModule.String()] = []addrs.ModuleInstance{subModule} - - got, err := marshalModules(testState, testSchemas(), moduleMap[""], moduleMap) - - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) - } - - if len(got) != 1 { - t.Fatalf("wrong result! got %d modules, expected 1", len(got)) - } - - if got[0].Address != "module.child" { - t.Fatalf("wrong result! got %#v\n", got) - } - - if got[0].ChildModules[0].Address != "module.child.module.submodule" { - t.Fatalf("wrong result! got %#v\n", got) - } -} - -func TestMarshalModules_parent_no_resources(t *testing.T) { - subModule, _ := addrs.ParseModuleInstanceStr("module.child.module.submodule") - testState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(subModule), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: subModule.Module(), - }, - ) - }) - got, err := marshalRootModule(testState, testSchemas()) - - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) - } - - if len(got.ChildModules) != 1 { - t.Fatalf("wrong result! got %d modules, expected 1", len(got.ChildModules)) - } - - if got.ChildModules[0].Address != "module.child" { - t.Fatalf("wrong result! got %#v\n", got) - } - - if got.ChildModules[0].ChildModules[0].Address != "module.child.module.submodule" { - t.Fatalf("wrong result! got %#v\n", got) - } -} - -func testSchemas() *terraform.Schemas { - return &terraform.Schemas{ - Providers: map[addrs.Provider]*terraform.ProviderSchema{ - addrs.NewDefaultProvider("test"): { - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "woozles": {Type: cty.String, Optional: true, Computed: true}, - "foozles": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, - }, - }, - "test_map_attr": { - Attributes: map[string]*configschema.Attribute{ - "data": {Type: cty.Map(cty.String), Optional: true, Computed: true, Sensitive: true}, - }, - }, - }, - }, - }, - } -} - -func TestSensitiveAsBool(t *testing.T) { - tests := []struct { - Input cty.Value - Want cty.Value - }{ - { - cty.StringVal("hello"), - cty.False, - }, - { - cty.NullVal(cty.String), - cty.False, - }, - { - cty.StringVal("hello").Mark(marks.Sensitive), - cty.True, - }, - { - cty.NullVal(cty.String).Mark(marks.Sensitive), - cty.True, - }, - - { - cty.NullVal(cty.DynamicPseudoType).Mark(marks.Sensitive), - cty.True, - }, - { - cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})), - cty.False, - }, - { - cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})).Mark(marks.Sensitive), - cty.True, - }, - { - cty.DynamicVal, - cty.False, - }, - { - cty.DynamicVal.Mark(marks.Sensitive), - cty.True, - }, - - { - cty.ListValEmpty(cty.String), - cty.EmptyTupleVal, - }, - { - cty.ListValEmpty(cty.String).Mark(marks.Sensitive), - cty.True, - }, - { - cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friend").Mark(marks.Sensitive), - }), - cty.TupleVal([]cty.Value{ - cty.False, - cty.True, - }), - }, - { - cty.SetValEmpty(cty.String), - cty.EmptyTupleVal, - }, - { - cty.SetValEmpty(cty.String).Mark(marks.Sensitive), - cty.True, - }, - { - cty.SetVal([]cty.Value{cty.StringVal("hello")}), - cty.TupleVal([]cty.Value{cty.False}), - }, - { - cty.SetVal([]cty.Value{cty.StringVal("hello").Mark(marks.Sensitive)}), - cty.True, - }, - { - cty.EmptyTupleVal.Mark(marks.Sensitive), - cty.True, - }, - { - cty.TupleVal([]cty.Value{ - cty.StringVal("hello"), - cty.StringVal("friend").Mark(marks.Sensitive), - }), - cty.TupleVal([]cty.Value{ - cty.False, - cty.True, - }), - }, - { - cty.MapValEmpty(cty.String), - cty.EmptyObjectVal, - }, - { - cty.MapValEmpty(cty.String).Mark(marks.Sensitive), - cty.True, - }, - { - cty.MapVal(map[string]cty.Value{ - "greeting": cty.StringVal("hello"), - "animal": cty.StringVal("horse"), - }), - cty.EmptyObjectVal, - }, - { - cty.MapVal(map[string]cty.Value{ - "greeting": cty.StringVal("hello"), - "animal": cty.StringVal("horse").Mark(marks.Sensitive), - }), - cty.ObjectVal(map[string]cty.Value{ - "animal": cty.True, - }), - }, - { - cty.MapVal(map[string]cty.Value{ - "greeting": cty.StringVal("hello"), - "animal": cty.StringVal("horse").Mark(marks.Sensitive), - }).Mark(marks.Sensitive), - cty.True, - }, - { - cty.EmptyObjectVal, - cty.EmptyObjectVal, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "greeting": cty.StringVal("hello"), - "animal": cty.StringVal("horse"), - }), - cty.EmptyObjectVal, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "greeting": cty.StringVal("hello"), - "animal": cty.StringVal("horse").Mark(marks.Sensitive), - }), - cty.ObjectVal(map[string]cty.Value{ - "animal": cty.True, - }), - }, - { - cty.ObjectVal(map[string]cty.Value{ - "greeting": cty.StringVal("hello"), - "animal": cty.StringVal("horse").Mark(marks.Sensitive), - }).Mark(marks.Sensitive), - cty.True, - }, - { - cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "a": cty.UnknownVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("known").Mark(marks.Sensitive), - }), - }), - cty.TupleVal([]cty.Value{ - cty.EmptyObjectVal, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.True, - }), - }), - }, - { - cty.ListVal([]cty.Value{ - cty.MapValEmpty(cty.String), - cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("known").Mark(marks.Sensitive), - }), - cty.MapVal(map[string]cty.Value{ - "a": cty.UnknownVal(cty.String), - }), - }), - cty.TupleVal([]cty.Value{ - cty.EmptyObjectVal, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.True, - }), - cty.EmptyObjectVal, - }), - }, - { - cty.ObjectVal(map[string]cty.Value{ - "list": cty.UnknownVal(cty.List(cty.String)), - "set": cty.UnknownVal(cty.Set(cty.Bool)), - "tuple": cty.UnknownVal(cty.Tuple([]cty.Type{cty.String, cty.Number})), - "map": cty.UnknownVal(cty.Map(cty.String)), - "object": cty.UnknownVal(cty.Object(map[string]cty.Type{"a": cty.String})), - }), - cty.ObjectVal(map[string]cty.Value{ - "list": cty.EmptyTupleVal, - "set": cty.EmptyTupleVal, - "tuple": cty.EmptyTupleVal, - "map": cty.EmptyObjectVal, - "object": cty.EmptyObjectVal, - }), - }, - } - - for _, test := range tests { - got := SensitiveAsBool(test.Input) - if !reflect.DeepEqual(got, test.Want) { - t.Errorf( - "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", - test.Input, got, test.Want, - ) - } - } -} diff --git a/internal/command/output.go b/internal/command/output.go deleted file mode 100644 index 0bcde54e8fae..000000000000 --- a/internal/command/output.go +++ /dev/null @@ -1,123 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// OutputCommand is a Command implementation that reads an output -// from a Terraform state and prints it. -type OutputCommand struct { - Meta -} - -func (c *OutputCommand) Run(rawArgs []string) int { - // Parse and apply global view arguments - common, rawArgs := arguments.ParseView(rawArgs) - c.View.Configure(common) - - // Parse and validate flags - args, diags := arguments.ParseOutput(rawArgs) - if diags.HasErrors() { - c.View.Diagnostics(diags) - c.View.HelpPrompt("output") - return 1 - } - - view := views.NewOutput(args.ViewType, c.View) - - // Fetch data from state - outputs, diags := c.Outputs(args.StatePath) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Render the view - viewDiags := view.Output(args.Name, outputs) - diags = diags.Append(viewDiags) - - view.Diagnostics(diags) - - if diags.HasErrors() { - return 1 - } - - return 0 -} - -func (c *OutputCommand) Outputs(statePath string) (map[string]*states.OutputValue, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Allow state path override - if statePath != "" { - c.Meta.statePath = statePath - } - - // Load the backend - b, backendDiags := c.Backend(nil) - diags = diags.Append(backendDiags) - if diags.HasErrors() { - return nil, diags - } - - // This is a read-only command - c.ignoreRemoteVersionConflict(b) - - env, err := c.Workspace() - if err != nil { - diags = diags.Append(fmt.Errorf("Error selecting workspace: %s", err)) - return nil, diags - } - - // Get the state - stateStore, err := b.StateMgr(env) - if err != nil { - diags = diags.Append(fmt.Errorf("Failed to load state: %s", err)) - return nil, diags - } - - output, err := stateStore.GetRootOutputValues() - if err != nil { - return nil, diags.Append(err) - } - - return output, diags -} - -func (c *OutputCommand) Help() string { - helpText := ` -Usage: terraform [global options] output [options] [NAME] - - Reads an output variable from a Terraform state file and prints - the value. With no additional arguments, output will display all - the outputs for the root module. If NAME is not specified, all - outputs are printed. - -Options: - - -state=path Path to the state file to read. Defaults to - "terraform.tfstate". Ignored when remote - state is used. - - -no-color If specified, output won't contain any color. - - -json If specified, machine readable output will be - printed in JSON format. - - -raw For value types that can be automatically - converted to a string, will print the raw - string directly, rather than a human-oriented - representation of the value. -` - return strings.TrimSpace(helpText) -} - -func (c *OutputCommand) Synopsis() string { - return "Show output values from your root module" -} diff --git a/internal/command/output_test.go b/internal/command/output_test.go deleted file mode 100644 index d3d742c59b51..000000000000 --- a/internal/command/output_test.go +++ /dev/null @@ -1,322 +0,0 @@ -package command - -import ( - "os" - "path/filepath" - "strings" - "testing" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" -) - -func TestOutput(t *testing.T) { - originalState := states.BuildState(func(s *states.SyncState) { - s.SetOutputValue( - addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("bar"), - false, - ) - }) - - statePath := testStateFile(t, originalState) - - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "foo", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stderr()) - } - - actual := strings.TrimSpace(output.Stdout()) - if actual != `"bar"` { - t.Fatalf("bad: %#v", actual) - } -} - -func TestOutput_json(t *testing.T) { - originalState := states.BuildState(func(s *states.SyncState) { - s.SetOutputValue( - addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("bar"), - false, - ) - }) - - statePath := testStateFile(t, originalState) - - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-json", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stderr()) - } - - actual := strings.TrimSpace(output.Stdout()) - expected := "{\n \"foo\": {\n \"sensitive\": false,\n \"type\": \"string\",\n \"value\": \"bar\"\n }\n}" - if actual != expected { - t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func TestOutput_emptyOutputs(t *testing.T) { - originalState := states.NewState() - statePath := testStateFile(t, originalState) - - p := testProvider() - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-no-color", - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stderr()) - } - // Warning diagnostics should go to stdout - if got, want := output.Stdout(), "Warning: No outputs found"; !strings.Contains(got, want) { - t.Fatalf("bad output: expected to contain %q, got:\n%s", want, got) - } -} - -func TestOutput_badVar(t *testing.T) { - originalState := states.BuildState(func(s *states.SyncState) { - s.SetOutputValue( - addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("bar"), - false, - ) - }) - statePath := testStateFile(t, originalState) - - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "bar", - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: \n%s", output.Stderr()) - } -} - -func TestOutput_blank(t *testing.T) { - originalState := states.BuildState(func(s *states.SyncState) { - s.SetOutputValue( - addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("bar"), - false, - ) - s.SetOutputValue( - addrs.OutputValue{Name: "name"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("john-doe"), - false, - ) - }) - statePath := testStateFile(t, originalState) - - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "", - } - - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stderr()) - } - - expectedOutput := "foo = \"bar\"\nname = \"john-doe\"\n" - if got := output.Stdout(); got != expectedOutput { - t.Fatalf("wrong output\ngot: %#v\nwant: %#v", got, expectedOutput) - } -} - -func TestOutput_manyArgs(t *testing.T) { - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "bad", - "bad", - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: \n%s", output.Stdout()) - } -} - -func TestOutput_noArgs(t *testing.T) { - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stdout()) - } -} - -func TestOutput_noState(t *testing.T) { - originalState := states.NewState() - statePath := testStateFile(t, originalState) - - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "foo", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stderr()) - } -} - -func TestOutput_noVars(t *testing.T) { - originalState := states.NewState() - - statePath := testStateFile(t, originalState) - - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "bar", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stderr()) - } -} - -func TestOutput_stateDefault(t *testing.T) { - originalState := states.BuildState(func(s *states.SyncState) { - s.SetOutputValue( - addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("bar"), - false, - ) - }) - - // Write the state file in a temporary directory with the - // default filename. - td := testTempDir(t) - statePath := filepath.Join(td, DefaultStateFilename) - - f, err := os.Create(statePath) - if err != nil { - t.Fatalf("err: %s", err) - } - err = writeStateForTesting(originalState, f) - f.Close() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Change to that directory - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(filepath.Dir(statePath)); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - - view, done := testView(t) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "foo", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: \n%s", output.Stderr()) - } - - actual := strings.TrimSpace(output.Stdout()) - if actual != `"bar"` { - t.Fatalf("bad: %#v", actual) - } -} diff --git a/internal/command/plan.go b/internal/command/plan.go deleted file mode 100644 index 674e9a8ff26d..000000000000 --- a/internal/command/plan.go +++ /dev/null @@ -1,278 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// PlanCommand is a Command implementation that compares a Terraform -// configuration to an actual infrastructure and shows the differences. -type PlanCommand struct { - Meta -} - -func (c *PlanCommand) Run(rawArgs []string) int { - // Parse and apply global view arguments - common, rawArgs := arguments.ParseView(rawArgs) - c.View.Configure(common) - - // Propagate -no-color for legacy use of Ui. The remote backend and - // cloud package use this; it should be removed when/if they are - // migrated to views. - c.Meta.color = !common.NoColor - c.Meta.Color = c.Meta.color - - // Parse and validate flags - args, diags := arguments.ParsePlan(rawArgs) - - // Instantiate the view, even if there are flag errors, so that we render - // diagnostics according to the desired view - view := views.NewPlan(args.ViewType, c.View) - - if diags.HasErrors() { - view.Diagnostics(diags) - view.HelpPrompt() - return 1 - } - - // Check for user-supplied plugin path - var err error - if c.pluginPath, err = c.loadPluginPath(); err != nil { - diags = diags.Append(err) - view.Diagnostics(diags) - return 1 - } - - // FIXME: the -input flag value is needed to initialize the backend and the - // operation, but there is no clear path to pass this value down, so we - // continue to mutate the Meta object state for now. - c.Meta.input = args.InputEnabled - - // FIXME: the -parallelism flag is used to control the concurrency of - // Terraform operations. At the moment, this value is used both to - // initialize the backend via the ContextOpts field inside CLIOpts, and to - // set a largely unused field on the Operation request. Again, there is no - // clear path to pass this value down, so we continue to mutate the Meta - // object state for now. - c.Meta.parallelism = args.Operation.Parallelism - - diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) - - // Prepare the backend with the backend-specific arguments - be, beDiags := c.PrepareBackend(args.State, args.ViewType) - diags = diags.Append(beDiags) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Build the operation request - opReq, opDiags := c.OperationRequest(be, view, args.ViewType, args.Operation, args.OutPath) - diags = diags.Append(opDiags) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Collect variable value and add them to the operation request - diags = diags.Append(c.GatherVariables(opReq, args.Vars)) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Before we delegate to the backend, we'll print any warning diagnostics - // we've accumulated here, since the backend will start fresh with its own - // diagnostics. - view.Diagnostics(diags) - diags = nil - - // Perform the operation - op, err := c.RunOperation(be, opReq) - if err != nil { - diags = diags.Append(err) - view.Diagnostics(diags) - return 1 - } - - if op.Result != backend.OperationSuccess { - return op.Result.ExitStatus() - } - if args.DetailedExitCode && !op.PlanEmpty { - return 2 - } - - return op.Result.ExitStatus() -} - -func (c *PlanCommand) PrepareBackend(args *arguments.State, viewType arguments.ViewType) (backend.Enhanced, tfdiags.Diagnostics) { - // FIXME: we need to apply the state arguments to the meta object here - // because they are later used when initializing the backend. Carving a - // path to pass these arguments to the functions that need them is - // difficult but would make their use easier to understand. - c.Meta.applyStateArguments(args) - - backendConfig, diags := c.loadBackendConfig(".") - if diags.HasErrors() { - return nil, diags - } - - // Load the backend - be, beDiags := c.Backend(&BackendOpts{ - Config: backendConfig, - ViewType: viewType, - }) - diags = diags.Append(beDiags) - if beDiags.HasErrors() { - return nil, diags - } - - return be, diags -} - -func (c *PlanCommand) OperationRequest( - be backend.Enhanced, - view views.Plan, - viewType arguments.ViewType, - args *arguments.Operation, - planOutPath string, -) (*backend.Operation, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Build the operation - opReq := c.Operation(be, viewType) - opReq.ConfigDir = "." - opReq.PlanMode = args.PlanMode - opReq.Hooks = view.Hooks() - opReq.PlanRefresh = args.Refresh - opReq.PlanOutPath = planOutPath - opReq.Targets = args.Targets - opReq.ForceReplace = args.ForceReplace - opReq.Type = backend.OperationTypePlan - opReq.View = view.Operation() - - var err error - opReq.ConfigLoader, err = c.initConfigLoader() - if err != nil { - diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %s", err)) - return nil, diags - } - - return opReq, diags -} - -func (c *PlanCommand) GatherVariables(opReq *backend.Operation, args *arguments.Vars) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // FIXME the arguments package currently trivially gathers variable related - // arguments in a heterogenous slice, in order to minimize the number of - // code paths gathering variables during the transition to this structure. - // Once all commands that gather variables have been converted to this - // structure, we could move the variable gathering code to the arguments - // package directly, removing this shim layer. - - varArgs := args.All() - items := make([]rawFlag, len(varArgs)) - for i := range varArgs { - items[i].Name = varArgs[i].Name - items[i].Value = varArgs[i].Value - } - c.Meta.variableArgs = rawFlags{items: &items} - opReq.Variables, diags = c.collectVariableValues() - - return diags -} - -func (c *PlanCommand) Help() string { - helpText := ` -Usage: terraform [global options] plan [options] - - Generates a speculative execution plan, showing what actions Terraform - would take to apply the current configuration. This command will not - actually perform the planned actions. - - You can optionally save the plan to a file, which you can then pass to - the "apply" command to perform exactly the actions described in the plan. - -Plan Customization Options: - - The following options customize how Terraform will produce its plan. You - can also use these options when you run "terraform apply" without passing - it a saved plan, in order to plan and apply in a single command. - - -destroy Select the "destroy" planning mode, which creates a plan - to destroy all objects currently managed by this - Terraform configuration instead of the usual behavior. - - -refresh-only Select the "refresh only" planning mode, which checks - whether remote objects still match the outcome of the - most recent Terraform apply but does not propose any - actions to undo any changes made outside of Terraform. - - -refresh=false Skip checking for external changes to remote objects - while creating the plan. This can potentially make - planning faster, but at the expense of possibly planning - against a stale record of the remote system state. - - -replace=resource Force replacement of a particular resource instance using - its resource address. If the plan would've normally - produced an update or no-op action for this instance, - Terraform will plan to replace it instead. You can use - this option multiple times to replace more than one object. - - -target=resource Limit the planning operation to only the given module, - resource, or resource instance and all of its - dependencies. You can use this option multiple times to - include more than one object. This is for exceptional - use only. - - -var 'foo=bar' Set a value for one of the input variables in the root - module of the configuration. Use this option more than - once to set more than one variable. - - -var-file=filename Load variable values from the given file, in addition - to the default files terraform.tfvars and *.auto.tfvars. - Use this option more than once to include more than one - variables file. - -Other Options: - - -compact-warnings If Terraform produces any warnings that are not - accompanied by errors, shows them in a more compact form - that includes only the summary messages. - - -detailed-exitcode Return detailed exit codes when the command exits. This - will change the meaning of exit codes to: - 0 - Succeeded, diff is empty (no changes) - 1 - Errored - 2 - Succeeded, there is a diff - - -input=true Ask for input for variables if not directly set. - - -lock=false Don't hold a state lock during the operation. This is - dangerous if others might concurrently run commands - against the same workspace. - - -lock-timeout=0s Duration to retry a state lock. - - -no-color If specified, output won't contain any color. - - -out=path Write a plan file to the given path. This can be used as - input to the "apply" command. - - -parallelism=n Limit the number of concurrent operations. Defaults to 10. - - -state=statefile A legacy option used for the local backend only. See the - local backend's documentation for more information. -` - return strings.TrimSpace(helpText) -} - -func (c *PlanCommand) Synopsis() string { - return "Show changes required by the current configuration" -} diff --git a/internal/command/plan_test.go b/internal/command/plan_test.go deleted file mode 100644 index 7ab3324dc39e..000000000000 --- a/internal/command/plan_test.go +++ /dev/null @@ -1,1622 +0,0 @@ -package command - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - "sync" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - backendinit "github.com/hashicorp/terraform/internal/backend/init" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestPlan(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } -} - -func TestPlan_lockedState(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - unlock, err := testLockState(t, testDataDir, filepath.Join(td, DefaultStateFilename)) - if err != nil { - t.Fatal(err) - } - defer unlock() - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - if code == 0 { - t.Fatal("expected error", done(t).Stdout()) - } - - output := done(t).Stderr() - if !strings.Contains(output, "lock") { - t.Fatal("command output does not look like a lock error:", output) - } -} - -func TestPlan_plan(t *testing.T) { - testCwd(t) - - planPath := testPlanFileNoop(t) - - p := testProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{planPath} - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("wrong exit status %d; want 1\nstderr: %s", code, output.Stderr()) - } -} - -func TestPlan_destroy(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"bar"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - outPath := testTempFile(t) - statePath := testStateFile(t, originalState) - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-destroy", - "-out", outPath, - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - plan := testReadPlan(t, outPath) - for _, rc := range plan.Changes.Resources { - if got, want := rc.Action, plans.Delete; got != want { - t.Fatalf("wrong action %s for %s; want %s\nplanned change: %s", got, rc.Addr, want, spew.Sdump(rc)) - } - } -} - -func TestPlan_noState(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Verify that refresh was called - if p.ReadResourceCalled { - t.Fatal("ReadResource should not be called") - } - - // Verify that the provider was called with the existing state - actual := p.PlanResourceChangeRequest.PriorState - expected := cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["test_instance"].Block.ImpliedType()) - if !expected.RawEquals(actual) { - t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func TestPlan_outPath(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - outPath := filepath.Join(td, "test.plan") - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ - PlannedState: cty.NullVal(cty.EmptyObject), - } - - args := []string{ - "-out", outPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - testReadPlan(t, outPath) // will call t.Fatal itself if the file cannot be read -} - -func TestPlan_outPathNoChange(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - // Aside from "id" (which is computed) the values here must - // exactly match the values in the "plan" test fixture in order - // to produce the empty plan we need for this test. - AttrsJSON: []byte(`{"id":"bar","ami":"bar","network_interface":[{"description":"Main network interface","device_index":"0"}]}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - statePath := testStateFile(t, originalState) - - outPath := filepath.Join(td, "test.plan") - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-out", outPath, - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - plan := testReadPlan(t, outPath) - if !plan.Changes.Empty() { - t.Fatalf("Expected empty plan to be written to plan file, got: %s", spew.Sdump(plan)) - } -} - -func TestPlan_outPathWithError(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-fail-condition"), td) - defer testChdir(t, td)() - - outPath := filepath.Join(td, "test.plan") - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ - PlannedState: cty.NullVal(cty.EmptyObject), - } - - args := []string{ - "-out", outPath, - } - code := c.Run(args) - output := done(t) - if code == 0 { - t.Fatal("expected non-zero exit status", output) - } - - plan := testReadPlan(t, outPath) // will call t.Fatal itself if the file cannot be read - if !plan.Errored { - t.Fatal("plan should be marked with Errored") - } - - if plan.Checks == nil { - t.Fatal("plan contains no checks") - } - - // the checks should only contain one failure - results := plan.Checks.ConfigResults.Elements() - if len(results) != 1 { - t.Fatal("incorrect number of check results", len(results)) - } - if results[0].Value.Status != checks.StatusFail { - t.Errorf("incorrect status, got %s", results[0].Value.Status) - } -} - -// When using "-out" with a backend, the plan should encode the backend config -func TestPlan_outBackend(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-out-backend"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"bar","ami":"bar"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - - // Set up our backend state - dataState, srv := testBackendState(t, originalState, 200) - defer srv.Close() - testStateFileRemote(t, dataState) - - outPath := "foo" - p := testProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "ami": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }, - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-out", outPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Logf("stdout: %s", output.Stdout()) - t.Fatalf("plan command failed with exit code %d\n\n%s", code, output.Stderr()) - } - - plan := testReadPlan(t, outPath) - if !plan.Changes.Empty() { - t.Fatalf("Expected empty plan to be written to plan file, got: %s", spew.Sdump(plan)) - } - - if got, want := plan.Backend.Type, "http"; got != want { - t.Errorf("wrong backend type %q; want %q", got, want) - } - if got, want := plan.Backend.Workspace, "default"; got != want { - t.Errorf("wrong backend workspace %q; want %q", got, want) - } - { - httpBackend := backendinit.Backend("http")() - schema := httpBackend.ConfigSchema() - got, err := plan.Backend.Config.Decode(schema.ImpliedType()) - if err != nil { - t.Fatalf("failed to decode backend config in plan: %s", err) - } - want, err := dataState.Backend.Config(schema) - if err != nil { - t.Fatalf("failed to decode cached config: %s", err) - } - if !want.RawEquals(got) { - t.Errorf("wrong backend config\ngot: %#v\nwant: %#v", got, want) - } - } -} - -func TestPlan_refreshFalse(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-refresh=false", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if p.ReadResourceCalled { - t.Fatal("ReadResource should not have been called") - } -} - -func TestPlan_state(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - originalState := testState() - statePath := testStateFile(t, originalState) - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Verify that the provider was called with the existing state - actual := p.PlanResourceChangeRequest.PriorState - expected := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "ami": cty.NullVal(cty.String), - "network_interface": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "device_index": cty.String, - "description": cty.String, - })), - }) - if !expected.RawEquals(actual) { - t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func TestPlan_stateDefault(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - // Generate state and move it to the default path - originalState := testState() - statePath := testStateFile(t, originalState) - os.Rename(statePath, path.Join(td, "terraform.tfstate")) - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Verify that the provider was called with the existing state - actual := p.PlanResourceChangeRequest.PriorState - expected := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "ami": cty.NullVal(cty.String), - "network_interface": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "device_index": cty.String, - "description": cty.String, - })), - }) - if !expected.RawEquals(actual) { - t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func TestPlan_validate(t *testing.T) { - // This is triggered by not asking for input so we have to set this to false - test = false - defer func() { test = true }() - - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-invalid"), td) - defer testChdir(t, td)() - - p := testProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - }, - }, - }, - }, - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{"-no-color"} - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - actual := output.Stderr() - if want := "Error: Invalid count argument"; !strings.Contains(actual, want) { - t.Fatalf("unexpected error output\ngot:\n%s\n\nshould contain: %s", actual, want) - } - if want := "9: count = timestamp()"; !strings.Contains(actual, want) { - t.Fatalf("unexpected error output\ngot:\n%s\n\nshould contain: %s", actual, want) - } -} - -func TestPlan_vars(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-vars"), td) - defer testChdir(t, td)() - - p := planVarsFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - actual := "" - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - actual = req.ProposedNewState.GetAttr("value").AsString() - resp.PlannedState = req.ProposedNewState - return - } - - args := []string{ - "-var", "foo=bar", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if actual != "bar" { - t.Fatal("didn't work") - } -} - -func TestPlan_varsInvalid(t *testing.T) { - testCases := []struct { - args []string - wantErr string - }{ - { - []string{"-var", "foo"}, - `The given -var option "foo" is not correctly specified.`, - }, - { - []string{"-var", "foo = bar"}, - `Variable name "foo " is invalid due to trailing space.`, - }, - } - - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-vars"), td) - defer testChdir(t, td)() - - for _, tc := range testCases { - t.Run(strings.Join(tc.args, " "), func(t *testing.T) { - p := planVarsFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - code := c.Run(tc.args) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) - } - - got := output.Stderr() - if !strings.Contains(got, tc.wantErr) { - t.Fatalf("bad error output, want %q, got:\n%s", tc.wantErr, got) - } - }) - } -} - -func TestPlan_varsUnset(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-vars"), td) - defer testChdir(t, td)() - - // The plan command will prompt for interactive input of var.foo. - // We'll answer "bar" to that prompt, which should then allow this - // configuration to apply even though var.foo doesn't have a - // default value and there are no -var arguments on our command line. - - // This will (helpfully) panic if more than one variable is requested during plan: - // https://github.com/hashicorp/terraform/issues/26027 - close := testInteractiveInput(t, []string{"bar"}) - defer close() - - p := planVarsFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } -} - -// This test adds a required argument to the test provider to validate -// processing of user input: -// https://github.com/hashicorp/terraform/issues/26035 -func TestPlan_providerArgumentUnset(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - // Disable test mode so input would be asked - test = false - defer func() { test = true }() - - // The plan command will prompt for interactive input of provider.test.region - defaultInputReader = bytes.NewBufferString("us-east-1\n") - - p := planFixtureProvider() - // override the planFixtureProvider schema to include a required provider argument - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Required: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true, Computed: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.String, Optional: true}, - "description": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "test_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Required: true, - }, - "valid": { - Type: cty.Bool, - Computed: true, - }, - }, - }, - }, - }, - } - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } -} - -// Test that terraform properly merges provider configuration that's split -// between config files and interactive input variables. -// https://github.com/hashicorp/terraform/issues/28956 -func TestPlan_providerConfigMerge(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-provider-input"), td) - defer testChdir(t, td)() - - // Disable test mode so input would be asked - test = false - defer func() { test = true }() - - // The plan command will prompt for interactive input of provider.test.region - defaultInputReader = bytes.NewBufferString("us-east-1\n") - - p := planFixtureProvider() - // override the planFixtureProvider schema to include a required provider argument and a nested block - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Required: true}, - "url": {Type: cty.String, Required: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "auth": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "user": {Type: cty.String, Required: true}, - "password": {Type: cty.String, Required: true}, - }, - }, - }, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - }, - }, - }, - }, - } - - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("configure provider not called") - } - - // For this test, we want to confirm that we've sent the expected config - // value *to* the provider. - got := p.ConfigureProviderRequest.Config - want := cty.ObjectVal(map[string]cty.Value{ - "auth": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "user": cty.StringVal("one"), - "password": cty.StringVal("onepw"), - }), - cty.ObjectVal(map[string]cty.Value{ - "user": cty.StringVal("two"), - "password": cty.StringVal("twopw"), - }), - }), - "region": cty.StringVal("us-east-1"), - "url": cty.StringVal("example.com"), - }) - - if !got.RawEquals(want) { - t.Fatal("wrong provider config") - } - -} - -func TestPlan_varFile(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-vars"), td) - defer testChdir(t, td)() - - varFilePath := testTempFile(t) - if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - p := planVarsFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - actual := "" - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - actual = req.ProposedNewState.GetAttr("value").AsString() - resp.PlannedState = req.ProposedNewState - return - } - - args := []string{ - "-var-file", varFilePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if actual != "bar" { - t.Fatal("didn't work") - } -} - -func TestPlan_varFileDefault(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-vars"), td) - defer testChdir(t, td)() - - varFilePath := filepath.Join(td, "terraform.tfvars") - if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - p := planVarsFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - actual := "" - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - actual = req.ProposedNewState.GetAttr("value").AsString() - resp.PlannedState = req.ProposedNewState - return - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if actual != "bar" { - t.Fatal("didn't work") - } -} - -func TestPlan_varFileWithDecls(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-vars"), td) - defer testChdir(t, td)() - - varFilePath := testTempFile(t) - if err := ioutil.WriteFile(varFilePath, []byte(planVarFileWithDecl), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - p := planVarsFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-var-file", varFilePath, - } - code := c.Run(args) - output := done(t) - if code == 0 { - t.Fatalf("succeeded; want failure\n\n%s", output.Stdout()) - } - - msg := output.Stderr() - if got, want := msg, "Variable declaration in .tfvars file"; !strings.Contains(got, want) { - t.Fatalf("missing expected error message\nwant message containing %q\ngot:\n%s", want, got) - } -} - -func TestPlan_detailedExitcode(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - t.Run("return 1", func(t *testing.T) { - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - // Running plan without setting testingOverrides is similar to plan without init - View: view, - }, - } - code := c.Run([]string{"-detailed-exitcode"}) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - }) - - t.Run("return 2", func(t *testing.T) { - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - code := c.Run([]string{"-detailed-exitcode"}) - output := done(t) - if code != 2 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - }) -} - -func TestPlan_detailedExitcode_emptyDiff(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-emptydiff"), td) - defer testChdir(t, td)() - - p := testProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{"-detailed-exitcode"} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } -} - -func TestPlan_shutdown(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-shutdown"), td) - defer testChdir(t, td)() - - cancelled := make(chan struct{}) - shutdownCh := make(chan struct{}) - - p := testProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - ShutdownCh: shutdownCh, - }, - } - - p.StopFn = func() error { - close(cancelled) - return nil - } - - var once sync.Once - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - once.Do(func() { - shutdownCh <- struct{}{} - }) - - // Because of the internal lock in the MockProvider, we can't - // coordinate directly with the calling of Stop, and making the - // MockProvider concurrent is disruptive to a lot of existing tests. - // Wait here a moment to help make sure the main goroutine gets to the - // Stop call before we exit, or the plan may finish before it can be - // canceled. - time.Sleep(200 * time.Millisecond) - - s := req.ProposedNewState.AsValueMap() - s["ami"] = cty.StringVal("bar") - resp.PlannedState = cty.ObjectVal(s) - return - } - - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - code := c.Run([]string{}) - output := done(t) - if code != 1 { - t.Errorf("wrong exit code %d; want 1\noutput:\n%s", code, output.Stdout()) - } - - select { - case <-cancelled: - default: - t.Error("command not cancelled") - } -} - -func TestPlan_init_required(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - // Running plan without setting testingOverrides is similar to plan without init - View: view, - }, - } - - args := []string{"-no-color"} - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("expected error, got success") - } - got := output.Stderr() - if !(strings.Contains(got, "terraform init") && strings.Contains(got, "provider registry.terraform.io/hashicorp/test: required by this configuration but no version is selected")) { - t.Fatal("wrong error message in output:", got) - } -} - -// Config with multiple resources, targeting plan of a subset -func TestPlan_targeted(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("apply-targeted"), td) - defer testChdir(t, td)() - - p := testProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-target", "test_instance.foo", - "-target", "test_instance.baz", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if got, want := output.Stdout(), "3 to add, 0 to change, 0 to destroy"; !strings.Contains(got, want) { - t.Fatalf("bad change summary, want %q, got:\n%s", want, got) - } -} - -// Diagnostics for invalid -target flags -func TestPlan_targetFlagsDiags(t *testing.T) { - testCases := map[string]string{ - "test_instance.": "Dot must be followed by attribute name.", - "test_instance": "Resource specification must include a resource type and name.", - } - - for target, wantDiag := range testCases { - t.Run(target, func(t *testing.T) { - td := testTempDir(t) - defer os.RemoveAll(td) - defer testChdir(t, td)() - - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - View: view, - }, - } - - args := []string{ - "-target", target, - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) - } - - got := output.Stderr() - if !strings.Contains(got, target) { - t.Fatalf("bad error output, want %q, got:\n%s", target, got) - } - if !strings.Contains(got, wantDiag) { - t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) - } - }) - } -} - -func TestPlan_replace(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan-replace"), td) - defer testChdir(t, td)() - - originalState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "a", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"hello"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - statePath := testStateFile(t, originalState) - - p := testProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-state", statePath, - "-no-color", - "-replace", "test_instance.a", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("wrong exit code %d\n\n%s", code, output.Stderr()) - } - - stdout := output.Stdout() - if got, want := stdout, "1 to add, 0 to change, 1 to destroy"; !strings.Contains(got, want) { - t.Errorf("wrong plan summary\ngot output:\n%s\n\nwant substring: %s", got, want) - } - if got, want := stdout, "test_instance.a will be replaced, as requested"; !strings.Contains(got, want) { - t.Errorf("missing replace explanation\ngot output:\n%s\n\nwant substring: %s", got, want) - } -} - -// Verify that the parallelism flag allows no more than the desired number of -// concurrent calls to PlanResourceChange. -func TestPlan_parallelism(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("parallelism"), td) - defer testChdir(t, td)() - - par := 4 - - // started is a semaphore that we use to ensure that we never have more - // than "par" plan operations happening concurrently - started := make(chan struct{}, par) - - // beginCtx is used as a starting gate to hold back PlanResourceChange - // calls until we reach the desired concurrency. The cancel func "begin" is - // called once we reach the desired concurrency, allowing all apply calls - // to proceed in unison. - beginCtx, begin := context.WithCancel(context.Background()) - - // Since our mock provider has its own mutex preventing concurrent calls - // to ApplyResourceChange, we need to use a number of separate providers - // here. They will all have the same mock implementation function assigned - // but crucially they will each have their own mutex. - providerFactories := map[addrs.Provider]providers.Factory{} - for i := 0; i < 10; i++ { - name := fmt.Sprintf("test%d", i) - provider := &terraform.MockProvider{} - provider.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - name + "_instance": {Block: &configschema.Block{}}, - }, - } - provider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - // If we ever have more than our intended parallelism number of - // plan operations running concurrently, the semaphore will fail. - select { - case started <- struct{}{}: - defer func() { - <-started - }() - default: - t.Fatal("too many concurrent apply operations") - } - - // If we never reach our intended parallelism, the context will - // never be canceled and the test will time out. - if len(started) >= par { - begin() - } - <-beginCtx.Done() - - // do some "work" - // Not required for correctness, but makes it easier to spot a - // failure when there is more overlap. - time.Sleep(10 * time.Millisecond) - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - providerFactories[addrs.NewDefaultProvider(name)] = providers.FactoryFixed(provider) - } - testingOverrides := &testingOverrides{ - Providers: providerFactories, - } - - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: testingOverrides, - View: view, - }, - } - - args := []string{ - fmt.Sprintf("-parallelism=%d", par), - } - - res := c.Run(args) - output := done(t) - if res != 0 { - t.Fatal(output.Stdout()) - } -} - -func TestPlan_warnings(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - t.Run("full warnings", func(t *testing.T) { - p := planWarningsFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - code := c.Run([]string{}) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - // the output should contain 3 warnings (returned by planWarningsFixtureProvider()) - wantWarnings := []string{ - "warning 1", - "warning 2", - "warning 3", - } - for _, want := range wantWarnings { - if !strings.Contains(output.Stdout(), want) { - t.Errorf("missing warning %s", want) - } - } - }) - - t.Run("compact warnings", func(t *testing.T) { - p := planWarningsFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - code := c.Run([]string{"-compact-warnings"}) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - // the output should contain 3 warnings (returned by planWarningsFixtureProvider()) - // and the message that plan was run with -compact-warnings - wantWarnings := []string{ - "warning 1", - "warning 2", - "warning 3", - "To see the full warning notes, run Terraform without -compact-warnings.", - } - for _, want := range wantWarnings { - if !strings.Contains(output.Stdout(), want) { - t.Errorf("missing warning %s", want) - } - } - }) -} - -func TestPlan_jsonGoldenReference(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("plan"), td) - defer testChdir(t, td)() - - p := planFixtureProvider() - view, done := testView(t) - c := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-json", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - checkGoldenReference(t, output, "plan") -} - -// planFixtureSchema returns a schema suitable for processing the -// configuration in testdata/plan . This schema should be -// assigned to a mock provider named "test". -func planFixtureSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.String, Optional: true}, - "description": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "test_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Required: true, - }, - "valid": { - Type: cty.Bool, - Computed: true, - }, - }, - }, - }, - }, - } -} - -// planFixtureProvider returns a mock provider that is configured for basic -// operation with the configuration in testdata/plan. This mock has -// GetSchemaResponse and PlanResourceChangeFn populated, with the plan -// step just passing through the new object proposed by Terraform Core. -func planFixtureProvider() *terraform.MockProvider { - p := testProvider() - p.GetProviderSchemaResponse = planFixtureSchema() - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("zzzzz"), - "valid": cty.BoolVal(true), - }), - } - } - return p -} - -// planVarsFixtureSchema returns a schema suitable for processing the -// configuration in testdata/plan-vars . This schema should be -// assigned to a mock provider named "test". -func planVarsFixtureSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } -} - -// planVarsFixtureProvider returns a mock provider that is configured for basic -// operation with the configuration in testdata/plan-vars. This mock has -// GetSchemaResponse and PlanResourceChangeFn populated, with the plan -// step just passing through the new object proposed by Terraform Core. -func planVarsFixtureProvider() *terraform.MockProvider { - p := testProvider() - p.GetProviderSchemaResponse = planVarsFixtureSchema() - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("zzzzz"), - "valid": cty.BoolVal(true), - }), - } - } - return p -} - -// planFixtureProvider returns a mock provider that is configured for basic -// operation with the configuration in testdata/plan. This mock has -// GetSchemaResponse and PlanResourceChangeFn populated, returning 3 warnings. -func planWarningsFixtureProvider() *terraform.MockProvider { - p := testProvider() - p.GetProviderSchemaResponse = planFixtureSchema() - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics{ - tfdiags.SimpleWarning("warning 1"), - tfdiags.SimpleWarning("warning 2"), - tfdiags.SimpleWarning("warning 3"), - }, - PlannedState: req.ProposedNewState, - } - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("zzzzz"), - "valid": cty.BoolVal(true), - }), - } - } - return p -} - -const planVarFile = ` -foo = "bar" -` - -const planVarFileWithDecl = ` -foo = "bar" - -variable "nope" { -} -` diff --git a/internal/command/refresh.go b/internal/command/refresh.go deleted file mode 100644 index 825981f142e9..000000000000 --- a/internal/command/refresh.go +++ /dev/null @@ -1,227 +0,0 @@ -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// RefreshCommand is a cli.Command implementation that refreshes the state -// file. -type RefreshCommand struct { - Meta -} - -func (c *RefreshCommand) Run(rawArgs []string) int { - var diags tfdiags.Diagnostics - - // Parse and apply global view arguments - common, rawArgs := arguments.ParseView(rawArgs) - c.View.Configure(common) - - // Propagate -no-color for legacy use of Ui. The remote backend and - // cloud package use this; it should be removed when/if they are - // migrated to views. - c.Meta.color = !common.NoColor - c.Meta.Color = c.Meta.color - - // Parse and validate flags - args, diags := arguments.ParseRefresh(rawArgs) - - // Instantiate the view, even if there are flag errors, so that we render - // diagnostics according to the desired view - view := views.NewRefresh(args.ViewType, c.View) - - if diags.HasErrors() { - view.Diagnostics(diags) - view.HelpPrompt() - return 1 - } - - // Check for user-supplied plugin path - var err error - if c.pluginPath, err = c.loadPluginPath(); err != nil { - diags = diags.Append(err) - view.Diagnostics(diags) - return 1 - } - - // FIXME: the -input flag value is needed to initialize the backend and the - // operation, but there is no clear path to pass this value down, so we - // continue to mutate the Meta object state for now. - c.Meta.input = args.InputEnabled - - // FIXME: the -parallelism flag is used to control the concurrency of - // Terraform operations. At the moment, this value is used both to - // initialize the backend via the ContextOpts field inside CLIOpts, and to - // set a largely unused field on the Operation request. Again, there is no - // clear path to pass this value down, so we continue to mutate the Meta - // object state for now. - c.Meta.parallelism = args.Operation.Parallelism - - // Prepare the backend with the backend-specific arguments - be, beDiags := c.PrepareBackend(args.State, args.ViewType) - diags = diags.Append(beDiags) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Build the operation request - opReq, opDiags := c.OperationRequest(be, view, args.ViewType, args.Operation) - diags = diags.Append(opDiags) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Collect variable value and add them to the operation request - diags = diags.Append(c.GatherVariables(opReq, args.Vars)) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Before we delegate to the backend, we'll print any warning diagnostics - // we've accumulated here, since the backend will start fresh with its own - // diagnostics. - view.Diagnostics(diags) - diags = nil - - // Perform the operation - op, err := c.RunOperation(be, opReq) - if err != nil { - diags = diags.Append(err) - view.Diagnostics(diags) - return 1 - } - - if op.State != nil { - view.Outputs(op.State.RootModule().OutputValues) - } - - return op.Result.ExitStatus() -} - -func (c *RefreshCommand) PrepareBackend(args *arguments.State, viewType arguments.ViewType) (backend.Enhanced, tfdiags.Diagnostics) { - // FIXME: we need to apply the state arguments to the meta object here - // because they are later used when initializing the backend. Carving a - // path to pass these arguments to the functions that need them is - // difficult but would make their use easier to understand. - c.Meta.applyStateArguments(args) - - backendConfig, diags := c.loadBackendConfig(".") - if diags.HasErrors() { - return nil, diags - } - - // Load the backend - be, beDiags := c.Backend(&BackendOpts{ - Config: backendConfig, - ViewType: viewType, - }) - diags = diags.Append(beDiags) - if beDiags.HasErrors() { - return nil, diags - } - - return be, diags -} - -func (c *RefreshCommand) OperationRequest(be backend.Enhanced, view views.Refresh, viewType arguments.ViewType, args *arguments.Operation, -) (*backend.Operation, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Build the operation - opReq := c.Operation(be, viewType) - opReq.ConfigDir = "." - opReq.Hooks = view.Hooks() - opReq.Targets = args.Targets - opReq.Type = backend.OperationTypeRefresh - opReq.View = view.Operation() - - var err error - opReq.ConfigLoader, err = c.initConfigLoader() - if err != nil { - diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %s", err)) - return nil, diags - } - - return opReq, diags -} - -func (c *RefreshCommand) GatherVariables(opReq *backend.Operation, args *arguments.Vars) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // FIXME the arguments package currently trivially gathers variable related - // arguments in a heterogenous slice, in order to minimize the number of - // code paths gathering variables during the transition to this structure. - // Once all commands that gather variables have been converted to this - // structure, we could move the variable gathering code to the arguments - // package directly, removing this shim layer. - - varArgs := args.All() - items := make([]rawFlag, len(varArgs)) - for i := range varArgs { - items[i].Name = varArgs[i].Name - items[i].Value = varArgs[i].Value - } - c.Meta.variableArgs = rawFlags{items: &items} - opReq.Variables, diags = c.collectVariableValues() - - return diags -} - -func (c *RefreshCommand) Help() string { - helpText := ` -Usage: terraform [global options] refresh [options] - - Update the state file of your infrastructure with metadata that matches - the physical resources they are tracking. - - This will not modify your infrastructure, but it can modify your - state file to update metadata. This metadata might cause new changes - to occur when you generate a plan or call apply next. - -Options: - - -compact-warnings If Terraform produces any warnings that are not - accompanied by errors, show them in a more compact form - that includes only the summary messages. - - -input=true Ask for input for variables if not directly set. - - -lock=false Don't hold a state lock during the operation. This is - dangerous if others might concurrently run commands - against the same workspace. - - -lock-timeout=0s Duration to retry a state lock. - - -no-color If specified, output won't contain any color. - - -parallelism=n Limit the number of concurrent operations. Defaults to 10. - - -target=resource Resource to target. Operation will be limited to this - resource and its dependencies. This flag can be used - multiple times. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. - - -state, state-out, and -backup are legacy options supported for the local - backend only. For more information, see the local backend's documentation. -` - return strings.TrimSpace(helpText) -} - -func (c *RefreshCommand) Synopsis() string { - return "Update the state to match remote systems" -} diff --git a/internal/command/refresh_test.go b/internal/command/refresh_test.go deleted file mode 100644 index 1598bbd299c7..000000000000 --- a/internal/command/refresh_test.go +++ /dev/null @@ -1,975 +0,0 @@ -package command - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -var equateEmpty = cmpopts.EquateEmpty() - -func TestRefresh(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.GetProviderSchemaResponse = refreshFixtureSchema() - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - }), - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !p.ReadResourceCalled { - t.Fatal("ReadResource should have been called") - } - - f, err := os.Open(statePath) - if err != nil { - t.Fatalf("err: %s", err) - } - - newStateFile, err := statefile.Read(f) - f.Close() - if err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(newStateFile.State.String()) - expected := strings.TrimSpace(testRefreshStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestRefresh_empty(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh-empty"), td) - defer testChdir(t, td)() - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - }), - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if p.ReadResourceCalled { - t.Fatal("ReadResource should not have been called") - } -} - -func TestRefresh_lockedState(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - unlock, err := testLockState(t, testDataDir, statePath) - if err != nil { - t.Fatal(err) - } - defer unlock() - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.GetProviderSchemaResponse = refreshFixtureSchema() - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - }), - } - - args := []string{ - "-state", statePath, - } - - code := c.Run(args) - output := done(t) - if code == 0 { - t.Fatal("expected error") - } - - got := output.Stderr() - if !strings.Contains(got, "lock") { - t.Fatal("command output does not look like a lock error:", got) - } -} - -func TestRefresh_cwd(t *testing.T) { - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(testFixturePath("refresh")); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - - state := testState() - statePath := testStateFile(t, state) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.GetProviderSchemaResponse = refreshFixtureSchema() - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - }), - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !p.ReadResourceCalled { - t.Fatal("ReadResource should have been called") - } - - f, err := os.Open(statePath) - if err != nil { - t.Fatalf("err: %s", err) - } - - newStateFile, err := statefile.Read(f) - f.Close() - if err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(newStateFile.State.String()) - expected := strings.TrimSpace(testRefreshCwdStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestRefresh_defaultState(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh"), td) - defer testChdir(t, td)() - - originalState := testState() - - // Write the state file in a temporary directory with the - // default filename. - statePath := testStateFile(t, originalState) - - localState := statemgr.NewFilesystem(statePath) - if err := localState.RefreshState(); err != nil { - t.Fatal(err) - } - s := localState.State() - if s == nil { - t.Fatal("empty test state") - } - - // Change to that directory - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(filepath.Dir(statePath)); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.GetProviderSchemaResponse = refreshFixtureSchema() - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - }), - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !p.ReadResourceCalled { - t.Fatal("ReadResource should have been called") - } - - newState := testStateRead(t, statePath) - - actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current - expected := &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"), - Dependencies: []addrs.ConfigResource{}, - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) - } - - backupState := testStateRead(t, statePath+DefaultBackupExtension) - - actual = backupState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current - expected = originalState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) - } -} - -func TestRefresh_outPath(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - // Output path - outf, err := ioutil.TempFile(td, "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - outPath := outf.Name() - outf.Close() - os.Remove(outPath) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.GetProviderSchemaResponse = refreshFixtureSchema() - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - }), - } - - args := []string{ - "-state", statePath, - "-state-out", outPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - newState := testStateRead(t, statePath) - if !reflect.DeepEqual(newState, state) { - t.Fatalf("bad: %#v", newState) - } - - newState = testStateRead(t, outPath) - actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current - expected := &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"), - Dependencies: []addrs.ConfigResource{}, - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) - } - - if _, err := os.Stat(outPath + DefaultBackupExtension); !os.IsNotExist(err) { - if err != nil { - t.Fatalf("failed to test for backup file: %s", err) - } - t.Fatalf("backup file exists, but it should not because output file did not initially exist") - } -} - -func TestRefresh_var(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh-var"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - p.GetProviderSchemaResponse = refreshVarFixtureSchema() - - args := []string{ - "-var", "foo=bar", - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("configure should be called") - } - if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { - t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestRefresh_varFile(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh-var"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - p.GetProviderSchemaResponse = refreshVarFixtureSchema() - - varFilePath := testTempFile(t) - if err := ioutil.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - args := []string{ - "-var-file", varFilePath, - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("configure should be called") - } - if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { - t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestRefresh_varFileDefault(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh-var"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - p.GetProviderSchemaResponse = refreshVarFixtureSchema() - - varFilePath := filepath.Join(td, "terraform.tfvars") - if err := ioutil.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil { - t.Fatalf("err: %s", err) - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("configure should be called") - } - if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { - t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestRefresh_varsUnset(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh-unset-var"), td) - defer testChdir(t, td)() - - // Disable test mode so input would be asked - test = false - defer func() { test = true }() - - defaultInputReader = bytes.NewBufferString("bar\n") - - state := testState() - statePath := testStateFile(t, state) - - p := testProvider() - ui := new(cli.MockUi) - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - Ui: ui, - View: view, - }, - } - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } -} - -func TestRefresh_backup(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - // Output path - outf, err := ioutil.TempFile(td, "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - outPath := outf.Name() - defer outf.Close() - - // Need to put some state content in the output file so that there's - // something to back up. - err = statefile.Write(statefile.New(state, "baz", 0), outf) - if err != nil { - t.Fatalf("error writing initial output state file %s", err) - } - - // Backup path - backupf, err := ioutil.TempFile(td, "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - backupPath := backupf.Name() - backupf.Close() - os.Remove(backupPath) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.GetProviderSchemaResponse = refreshFixtureSchema() - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("changed"), - }), - } - - args := []string{ - "-state", statePath, - "-state-out", outPath, - "-backup", backupPath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - newState := testStateRead(t, statePath) - if !cmp.Equal(newState, state, cmpopts.EquateEmpty()) { - t.Fatalf("got:\n%s\nexpected:\n%s\n", newState, state) - } - - newState = testStateRead(t, outPath) - actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current - expected := &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"changed\"\n }"), - Dependencies: []addrs.ConfigResource{}, - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) - } - - backupState := testStateRead(t, backupPath) - actualStr := strings.TrimSpace(backupState.String()) - expectedStr := strings.TrimSpace(state.String()) - if actualStr != expectedStr { - t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) - } -} - -func TestRefresh_disableBackup(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - // Output path - outf, err := ioutil.TempFile(td, "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - outPath := outf.Name() - outf.Close() - os.Remove(outPath) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - p.GetProviderSchemaResponse = refreshFixtureSchema() - p.ReadResourceFn = nil - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yes"), - }), - } - - args := []string{ - "-state", statePath, - "-state-out", outPath, - "-backup", "-", - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - newState := testStateRead(t, statePath) - if !cmp.Equal(state, newState, equateEmpty) { - spew.Config.DisableMethods = true - fmt.Println(cmp.Diff(state, newState, equateEmpty)) - t.Fatalf("bad: %s", newState) - } - - newState = testStateRead(t, outPath) - actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current - expected := &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte("{\n \"ami\": null,\n \"id\": \"yes\"\n }"), - Dependencies: []addrs.ConfigResource{}, - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) - } - - // Ensure there is no backup - _, err = os.Stat(outPath + DefaultBackupExtension) - if err == nil || !os.IsNotExist(err) { - t.Fatalf("backup should not exist") - } - _, err = os.Stat("-") - if err == nil || !os.IsNotExist(err) { - t.Fatalf("backup should not exist") - } -} - -func TestRefresh_displaysOutputs(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh-output"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - p := testProvider() - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - args := []string{ - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - // Test that outputs were displayed - outputValue := "foo.example.com" - actual := output.Stdout() - if !strings.Contains(actual, outputValue) { - t.Fatalf("Expected:\n%s\n\nTo include: %q", actual, outputValue) - } -} - -// Config with multiple resources, targeting refresh of a subset -func TestRefresh_targeted(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("refresh-targeted"), td) - defer testChdir(t, td)() - - state := testState() - statePath := testStateFile(t, state) - - p := testProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args := []string{ - "-target", "test_instance.foo", - "-state", statePath, - } - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - got := output.Stdout() - if want := "test_instance.foo: Refreshing"; !strings.Contains(got, want) { - t.Fatalf("expected output to contain %q, got:\n%s", want, got) - } - if doNotWant := "test_instance.bar: Refreshing"; strings.Contains(got, doNotWant) { - t.Fatalf("expected output not to contain %q, got:\n%s", doNotWant, got) - } -} - -// Diagnostics for invalid -target flags -func TestRefresh_targetFlagsDiags(t *testing.T) { - testCases := map[string]string{ - "test_instance.": "Dot must be followed by attribute name.", - "test_instance": "Resource specification must include a resource type and name.", - } - - for target, wantDiag := range testCases { - t.Run(target, func(t *testing.T) { - td := testTempDir(t) - defer os.RemoveAll(td) - defer testChdir(t, td)() - - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - View: view, - }, - } - - args := []string{ - "-target", target, - } - code := c.Run(args) - output := done(t) - if code != 1 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - - got := output.Stderr() - if !strings.Contains(got, target) { - t.Fatalf("bad error output, want %q, got:\n%s", target, got) - } - if !strings.Contains(got, wantDiag) { - t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) - } - }) - } -} - -func TestRefresh_warnings(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("apply"), td) - defer testChdir(t, td)() - - p := testProvider() - p.GetProviderSchemaResponse = refreshFixtureSchema() - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - Diagnostics: tfdiags.Diagnostics{ - tfdiags.SimpleWarning("warning 1"), - tfdiags.SimpleWarning("warning 2"), - }, - } - } - - t.Run("full warnings", func(t *testing.T) { - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - code := c.Run([]string{}) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - wantWarnings := []string{ - "warning 1", - "warning 2", - } - for _, want := range wantWarnings { - if !strings.Contains(output.Stdout(), want) { - t.Errorf("missing warning %s", want) - } - } - }) - - t.Run("compact warnings", func(t *testing.T) { - view, done := testView(t) - c := &RefreshCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - code := c.Run([]string{"-compact-warnings"}) - output := done(t) - if code != 0 { - t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) - } - // the output should contain 2 warnings and a message about -compact-warnings - wantWarnings := []string{ - "warning 1", - "warning 2", - "To see the full warning notes, run Terraform without -compact-warnings.", - } - for _, want := range wantWarnings { - if !strings.Contains(output.Stdout(), want) { - t.Errorf("missing warning %s", want) - } - } - }) -} - -// configuration in testdata/refresh . This schema should be -// assigned to a mock provider named "test". -func refreshFixtureSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } -} - -// refreshVarFixtureSchema returns a schema suitable for processing the -// configuration in testdata/refresh-var . This schema should be -// assigned to a mock provider named "test". -func refreshVarFixtureSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - }, - }, - }, - }, - } -} - -const refreshVarFile = ` -foo = "bar" -` - -const testRefreshStr = ` -test_instance.foo: - ID = yes - provider = provider["registry.terraform.io/hashicorp/test"] -` -const testRefreshCwdStr = ` -test_instance.foo: - ID = yes - provider = provider["registry.terraform.io/hashicorp/test"] -` diff --git a/internal/command/show.go b/internal/command/show.go deleted file mode 100644 index 3e58a979a870..000000000000 --- a/internal/command/show.go +++ /dev/null @@ -1,238 +0,0 @@ -package command - -import ( - "fmt" - "os" - "strings" - - "github.com/hashicorp/terraform/internal/backend" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ShowCommand is a Command implementation that reads and outputs the -// contents of a Terraform plan or state file. -type ShowCommand struct { - Meta -} - -func (c *ShowCommand) Run(rawArgs []string) int { - // Parse and apply global view arguments - common, rawArgs := arguments.ParseView(rawArgs) - c.View.Configure(common) - - // Parse and validate flags - args, diags := arguments.ParseShow(rawArgs) - if diags.HasErrors() { - c.View.Diagnostics(diags) - c.View.HelpPrompt("show") - return 1 - } - - // Set up view - view := views.NewShow(args.ViewType, c.View) - - // Check for user-supplied plugin path - var err error - if c.pluginPath, err = c.loadPluginPath(); err != nil { - diags = diags.Append(fmt.Errorf("error loading plugin path: %s", err)) - view.Diagnostics(diags) - return 1 - } - - // Get the data we need to display - plan, stateFile, config, schemas, showDiags := c.show(args.Path) - diags = diags.Append(showDiags) - if showDiags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - // Display the data - return view.Display(config, plan, stateFile, schemas) -} - -func (c *ShowCommand) Help() string { - helpText := ` -Usage: terraform [global options] show [options] [path] - - Reads and outputs a Terraform state or plan file in a human-readable - form. If no path is specified, the current state will be shown. - -Options: - - -no-color If specified, output won't contain any color. - -json If specified, output the Terraform plan or state in - a machine-readable form. - -` - return strings.TrimSpace(helpText) -} - -func (c *ShowCommand) Synopsis() string { - return "Show the current state or a saved plan" -} - -func (c *ShowCommand) show(path string) (*plans.Plan, *statefile.File, *configs.Config, *terraform.Schemas, tfdiags.Diagnostics) { - var diags, showDiags tfdiags.Diagnostics - var plan *plans.Plan - var stateFile *statefile.File - var config *configs.Config - var schemas *terraform.Schemas - - // No plan file or state file argument provided, - // so get the latest state snapshot - if path == "" { - stateFile, showDiags = c.showFromLatestStateSnapshot() - diags = diags.Append(showDiags) - if showDiags.HasErrors() { - return plan, stateFile, config, schemas, diags - } - } - - // Plan file or state file argument provided, - // so try to load the argument as a plan file first. - // If that fails, try to load it as a statefile. - if path != "" { - plan, stateFile, config, showDiags = c.showFromPath(path) - diags = diags.Append(showDiags) - if showDiags.HasErrors() { - return plan, stateFile, config, schemas, diags - } - } - - // Get schemas, if possible - if config != nil || stateFile != nil { - schemas, diags = c.MaybeGetSchemas(stateFile.State, config) - if diags.HasErrors() { - return plan, stateFile, config, schemas, diags - } - } - - return plan, stateFile, config, schemas, diags -} -func (c *ShowCommand) showFromLatestStateSnapshot() (*statefile.File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Load the backend - b, backendDiags := c.Backend(nil) - diags = diags.Append(backendDiags) - if backendDiags.HasErrors() { - return nil, diags - } - c.ignoreRemoteVersionConflict(b) - - // Load the workspace - workspace, err := c.Workspace() - if err != nil { - diags = diags.Append(fmt.Errorf("error selecting workspace: %s", err)) - return nil, diags - } - - // Get the latest state snapshot from the backend for the current workspace - stateFile, stateErr := getStateFromBackend(b, workspace) - if stateErr != nil { - diags = diags.Append(stateErr) - return nil, diags - } - - return stateFile, diags -} - -func (c *ShowCommand) showFromPath(path string) (*plans.Plan, *statefile.File, *configs.Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var planErr, stateErr error - var plan *plans.Plan - var stateFile *statefile.File - var config *configs.Config - - // Try to get the plan file and associated data from - // the path argument. If that fails, try to get the - // statefile from the path argument. - plan, stateFile, config, planErr = getPlanFromPath(path) - if planErr != nil { - stateFile, stateErr = getStateFromPath(path) - if stateErr != nil { - diags = diags.Append( - tfdiags.Sourceless( - tfdiags.Error, - "Failed to read the given file as a state or plan file", - fmt.Sprintf("State read error: %s\n\nPlan read error: %s", stateErr, planErr), - ), - ) - return nil, nil, nil, diags - } - } - return plan, stateFile, config, diags -} - -// getPlanFromPath returns a plan, statefile, and config if the user-supplied -// path points to a plan file. If both plan and error are nil, the path is likely -// a directory. An error could suggest that the given path points to a statefile. -func getPlanFromPath(path string) (*plans.Plan, *statefile.File, *configs.Config, error) { - planReader, err := planfile.Open(path) - if err != nil { - return nil, nil, nil, err - } - - // Get plan - plan, err := planReader.ReadPlan() - if err != nil { - return nil, nil, nil, err - } - - // Get statefile - stateFile, err := planReader.ReadStateFile() - if err != nil { - return nil, nil, nil, err - } - - // Get config - config, diags := planReader.ReadConfig() - if diags.HasErrors() { - return nil, nil, nil, diags.Err() - } - - return plan, stateFile, config, err -} - -// getStateFromPath returns a statefile if the user-supplied path points to a statefile. -func getStateFromPath(path string) (*statefile.File, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("Error loading statefile: %s", err) - } - defer file.Close() - - var stateFile *statefile.File - stateFile, err = statefile.Read(file) - if err != nil { - return nil, fmt.Errorf("Error reading %s as a statefile: %s", path, err) - } - return stateFile, nil -} - -// getStateFromBackend returns the State for the current workspace, if available. -func getStateFromBackend(b backend.Backend, workspace string) (*statefile.File, error) { - // Get the state store for the given workspace - stateStore, err := b.StateMgr(workspace) - if err != nil { - return nil, fmt.Errorf("Failed to load state manager: %s", err) - } - - // Refresh the state store with the latest state snapshot from persistent storage - if err := stateStore.RefreshState(); err != nil { - return nil, fmt.Errorf("Failed to load state: %s", err) - } - - // Get the latest state snapshot and return it - stateFile := statemgr.Export(stateStore) - return stateFile, nil -} diff --git a/internal/command/show_test.go b/internal/command/show_test.go deleted file mode 100644 index 169b75dc1b4e..000000000000 --- a/internal/command/show_test.go +++ /dev/null @@ -1,1156 +0,0 @@ -package command - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/version" - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" -) - -func TestShow_badArgs(t *testing.T) { - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "bad", - "bad", - "-no-color", - } - - code := c.Run(args) - output := done(t) - - if code != 1 { - t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) - } -} - -func TestShow_noArgsNoState(t *testing.T) { - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - code := c.Run([]string{}) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } - - got := output.Stdout() - want := `No state.` - if !strings.Contains(got, want) { - t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) - } -} - -func TestShow_noArgsWithState(t *testing.T) { - // Get a temp cwd - testCwd(t) - // Create the default state - testStateFileDefault(t, testState()) - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(showFixtureProvider()), - View: view, - }, - } - - code := c.Run([]string{}) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } - - got := output.Stdout() - want := `# test_instance.foo:` - if !strings.Contains(got, want) { - t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) - } -} - -func TestShow_argsWithState(t *testing.T) { - // Create the default state - statePath := testStateFile(t, testState()) - stateDir := filepath.Dir(statePath) - defer os.RemoveAll(stateDir) - defer testChdir(t, stateDir)() - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(showFixtureProvider()), - View: view, - }, - } - - path := filepath.Base(statePath) - args := []string{ - path, - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } -} - -// https://github.com/hashicorp/terraform/issues/21462 -func TestShow_argsWithStateAliasedProvider(t *testing.T) { - // Create the default state with aliased resource - testState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - // The weird whitespace here is reflective of how this would - // get written out in a real state file, due to the indentation - // of all of the containing wrapping objects and arrays. - AttrsJSON: []byte("{\n \"id\": \"bar\"\n }"), - Status: states.ObjectReady, - Dependencies: []addrs.ConfigResource{}, - }, - addrs.RootModuleInstance.ProviderConfigAliased(addrs.NewDefaultProvider("test"), "alias"), - ) - }) - - statePath := testStateFile(t, testState) - stateDir := filepath.Dir(statePath) - defer os.RemoveAll(stateDir) - defer testChdir(t, stateDir)() - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(showFixtureProvider()), - View: view, - }, - } - - path := filepath.Base(statePath) - args := []string{ - path, - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } - - got := output.Stdout() - want := `# missing schema for provider \"test.alias\"` - if strings.Contains(got, want) { - t.Fatalf("unexpected output\ngot: %s", got) - } -} - -func TestShow_argsPlanFileDoesNotExist(t *testing.T) { - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "doesNotExist.tfplan", - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 1 { - t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) - } - - got := output.Stderr() - want := `Plan read error: open doesNotExist.tfplan:` - if !strings.Contains(got, want) { - t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) - } -} - -func TestShow_argsStatefileDoesNotExist(t *testing.T) { - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "doesNotExist.tfstate", - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 1 { - t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) - } - - got := output.Stderr() - want := `State read error: Error loading statefile:` - if !strings.Contains(got, want) { - t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) - } -} - -func TestShow_json_argsPlanFileDoesNotExist(t *testing.T) { - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "-json", - "doesNotExist.tfplan", - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 1 { - t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) - } - - got := output.Stderr() - want := `Plan read error: open doesNotExist.tfplan:` - if !strings.Contains(got, want) { - t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) - } -} - -func TestShow_json_argsStatefileDoesNotExist(t *testing.T) { - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - "-json", - "doesNotExist.tfstate", - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 1 { - t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) - } - - got := output.Stderr() - want := `State read error: Error loading statefile:` - if !strings.Contains(got, want) { - t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) - } -} - -func TestShow_planNoop(t *testing.T) { - planPath := testPlanFileNoop(t) - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - planPath, - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } - - got := output.Stdout() - want := `No changes. Your infrastructure matches the configuration.` - if !strings.Contains(got, want) { - t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) - } -} - -func TestShow_planWithChanges(t *testing.T) { - planPathWithChanges := showFixturePlanFile(t, plans.DeleteThenCreate) - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(showFixtureProvider()), - View: view, - }, - } - - args := []string{ - planPathWithChanges, - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } - - got := output.Stdout() - want := `test_instance.foo must be replaced` - if !strings.Contains(got, want) { - t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) - } -} - -func TestShow_planWithForceReplaceChange(t *testing.T) { - // The main goal of this test is to see that the "replace by request" - // resource instance action reason can round-trip through a plan file and - // be reflected correctly in the "terraform show" output, the same way - // as it would appear in "terraform plan" output. - - _, snap := testModuleWithSnapshot(t, "show") - plannedVal := cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("bar"), - }) - priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) - if err != nil { - t.Fatal(err) - } - plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) - if err != nil { - t.Fatal(err) - } - plan := testPlan(t) - plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - Before: priorValRaw, - After: plannedValRaw, - }, - ActionReason: plans.ResourceInstanceReplaceByRequest, - }) - planFilePath := testPlanFile( - t, - snap, - states.NewState(), - plan, - ) - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(showFixtureProvider()), - View: view, - }, - } - - args := []string{ - planFilePath, - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } - - got := output.Stdout() - want := `test_instance.foo will be replaced, as requested` - if !strings.Contains(got, want) { - t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) - } - - want = `Plan: 1 to add, 0 to change, 1 to destroy.` - if !strings.Contains(got, want) { - t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) - } - -} - -func TestShow_plan_json(t *testing.T) { - planPath := showFixturePlanFile(t, plans.Create) - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(showFixtureProvider()), - View: view, - }, - } - - args := []string{ - "-json", - planPath, - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } -} - -func TestShow_state(t *testing.T) { - originalState := testState() - root := originalState.RootModule() - root.SetOutputValue("test", cty.ObjectVal(map[string]cty.Value{ - "attr": cty.NullVal(cty.DynamicPseudoType), - "null": cty.NullVal(cty.String), - "list": cty.ListVal([]cty.Value{cty.NullVal(cty.Number)}), - }), false) - - statePath := testStateFile(t, originalState) - defer os.RemoveAll(filepath.Dir(statePath)) - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(showFixtureProvider()), - View: view, - }, - } - - args := []string{ - statePath, - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } -} - -func TestShow_json_output(t *testing.T) { - fixtureDir := "testdata/show-json" - testDirs, err := ioutil.ReadDir(fixtureDir) - if err != nil { - t.Fatal(err) - } - - for _, entry := range testDirs { - if !entry.IsDir() { - continue - } - - t.Run(entry.Name(), func(t *testing.T) { - td := t.TempDir() - inputDir := filepath.Join(fixtureDir, entry.Name()) - testCopyDir(t, inputDir, td) - defer testChdir(t, td)() - - expectError := strings.Contains(entry.Name(), "error") - - providerSource, close := newMockProviderSource(t, map[string][]string{ - "test": {"1.2.3"}, - "hashicorp2/test": {"1.2.3"}, - }) - defer close() - - p := showFixtureProvider() - - // init - ui := new(cli.MockUi) - ic := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - Ui: ui, - ProviderSource: providerSource, - }, - } - if code := ic.Run([]string{}); code != 0 { - if expectError { - // this should error, but not panic. - return - } - t.Fatalf("init failed\n%s", ui.ErrorWriter) - } - - // plan - planView, planDone := testView(t) - pc := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: planView, - ProviderSource: providerSource, - }, - } - - args := []string{ - "-out=terraform.plan", - } - - code := pc.Run(args) - planOutput := planDone(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, planOutput.Stderr()) - } - - // show - showView, showDone := testView(t) - sc := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: showView, - ProviderSource: providerSource, - }, - } - - args = []string{ - "-json", - "terraform.plan", - } - defer os.Remove("terraform.plan") - code = sc.Run(args) - showOutput := showDone(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) - } - - // compare view output to wanted output - var got, want plan - - gotString := showOutput.Stdout() - json.Unmarshal([]byte(gotString), &got) - - wantFile, err := os.Open("output.json") - if err != nil { - t.Fatalf("unexpected err: %s", err) - } - defer wantFile.Close() - byteValue, err := ioutil.ReadAll(wantFile) - if err != nil { - t.Fatalf("unexpected err: %s", err) - } - json.Unmarshal([]byte(byteValue), &want) - - // Disregard format version to reduce needless test fixture churn - want.FormatVersion = got.FormatVersion - - if !cmp.Equal(got, want) { - t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) - } - }) - } -} - -func TestShow_json_output_sensitive(t *testing.T) { - td := t.TempDir() - inputDir := "testdata/show-json-sensitive" - testCopyDir(t, inputDir, td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{"test": {"1.2.3"}}) - defer close() - - p := showFixtureSensitiveProvider() - - // init - ui := new(cli.MockUi) - ic := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - Ui: ui, - ProviderSource: providerSource, - }, - } - if code := ic.Run([]string{}); code != 0 { - t.Fatalf("init failed\n%s", ui.ErrorWriter) - } - - // plan - planView, planDone := testView(t) - pc := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: planView, - ProviderSource: providerSource, - }, - } - - args := []string{ - "-out=terraform.plan", - } - code := pc.Run(args) - planOutput := planDone(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, planOutput.Stderr()) - } - - // show - showView, showDone := testView(t) - sc := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: showView, - ProviderSource: providerSource, - }, - } - - args = []string{ - "-json", - "terraform.plan", - } - defer os.Remove("terraform.plan") - code = sc.Run(args) - showOutput := showDone(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) - } - - // compare ui output to wanted output - var got, want plan - - gotString := showOutput.Stdout() - json.Unmarshal([]byte(gotString), &got) - - wantFile, err := os.Open("output.json") - if err != nil { - t.Fatalf("unexpected err: %s", err) - } - defer wantFile.Close() - byteValue, err := ioutil.ReadAll(wantFile) - if err != nil { - t.Fatalf("unexpected err: %s", err) - } - json.Unmarshal([]byte(byteValue), &want) - - // Disregard format version to reduce needless test fixture churn - want.FormatVersion = got.FormatVersion - - if !cmp.Equal(got, want) { - t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) - } -} - -// Failing conditions are only present in JSON output for refresh-only plans, -// so we test that separately here. -func TestShow_json_output_conditions_refresh_only(t *testing.T) { - td := t.TempDir() - inputDir := "testdata/show-json/conditions" - testCopyDir(t, inputDir, td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{"test": {"1.2.3"}}) - defer close() - - p := showFixtureSensitiveProvider() - - // init - ui := new(cli.MockUi) - ic := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - Ui: ui, - ProviderSource: providerSource, - }, - } - if code := ic.Run([]string{}); code != 0 { - t.Fatalf("init failed\n%s", ui.ErrorWriter) - } - - // plan - planView, planDone := testView(t) - pc := &PlanCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: planView, - ProviderSource: providerSource, - }, - } - - args := []string{ - "-refresh-only", - "-out=terraform.plan", - "-var=ami=bad-ami", - "-state=for-refresh.tfstate", - } - code := pc.Run(args) - planOutput := planDone(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, planOutput.Stderr()) - } - - // show - showView, showDone := testView(t) - sc := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: showView, - ProviderSource: providerSource, - }, - } - - args = []string{ - "-json", - "terraform.plan", - } - defer os.Remove("terraform.plan") - code = sc.Run(args) - showOutput := showDone(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) - } - - // compare JSON output to wanted output - var got, want plan - - gotString := showOutput.Stdout() - json.Unmarshal([]byte(gotString), &got) - - wantFile, err := os.Open("output-refresh-only.json") - if err != nil { - t.Fatalf("unexpected err: %s", err) - } - defer wantFile.Close() - byteValue, err := ioutil.ReadAll(wantFile) - if err != nil { - t.Fatalf("unexpected err: %s", err) - } - json.Unmarshal([]byte(byteValue), &want) - - // Disregard format version to reduce needless test fixture churn - want.FormatVersion = got.FormatVersion - - if !cmp.Equal(got, want) { - t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) - } -} - -// similar test as above, without the plan -func TestShow_json_output_state(t *testing.T) { - fixtureDir := "testdata/show-json-state" - testDirs, err := ioutil.ReadDir(fixtureDir) - if err != nil { - t.Fatal(err) - } - - for _, entry := range testDirs { - if !entry.IsDir() { - continue - } - - t.Run(entry.Name(), func(t *testing.T) { - td := t.TempDir() - inputDir := filepath.Join(fixtureDir, entry.Name()) - testCopyDir(t, inputDir, td) - defer testChdir(t, td)() - - providerSource, close := newMockProviderSource(t, map[string][]string{ - "test": {"1.2.3"}, - }) - defer close() - - p := showFixtureProvider() - - // init - ui := new(cli.MockUi) - ic := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - Ui: ui, - ProviderSource: providerSource, - }, - } - if code := ic.Run([]string{}); code != 0 { - t.Fatalf("init failed\n%s", ui.ErrorWriter) - } - - // show - showView, showDone := testView(t) - sc := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: showView, - ProviderSource: providerSource, - }, - } - - code := sc.Run([]string{"-json"}) - showOutput := showDone(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) - } - - // compare ui output to wanted output - type state struct { - FormatVersion string `json:"format_version,omitempty"` - TerraformVersion string `json:"terraform_version"` - Values map[string]interface{} `json:"values,omitempty"` - SensitiveValues map[string]bool `json:"sensitive_values,omitempty"` - } - var got, want state - - gotString := showOutput.Stdout() - json.Unmarshal([]byte(gotString), &got) - - wantFile, err := os.Open("output.json") - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - defer wantFile.Close() - byteValue, err := ioutil.ReadAll(wantFile) - if err != nil { - t.Fatalf("unexpected err: %s", err) - } - json.Unmarshal([]byte(byteValue), &want) - - if !cmp.Equal(got, want) { - t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) - } - }) - } -} - -func TestShow_planWithNonDefaultStateLineage(t *testing.T) { - // Create a temporary working directory that is empty - td := t.TempDir() - testCopyDir(t, testFixturePath("show"), td) - defer testChdir(t, td)() - - // Write default state file with a testing lineage ("fake-for-testing") - testStateFileDefault(t, testState()) - - // Create a plan with a different lineage, which we should still be able - // to show - _, snap := testModuleWithSnapshot(t, "show") - state := testState() - plan := testPlan(t) - stateMeta := statemgr.SnapshotMeta{ - Lineage: "fake-for-plan", - Serial: 1, - TerraformVersion: version.SemVer, - } - planPath := testPlanFileMatchState(t, snap, state, plan, stateMeta) - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{ - planPath, - "-no-color", - } - code := c.Run(args) - output := done(t) - - if code != 0 { - t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) - } - - got := output.Stdout() - want := `No changes. Your infrastructure matches the configuration.` - if !strings.Contains(got, want) { - t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) - } -} - -func TestShow_corruptStatefile(t *testing.T) { - td := t.TempDir() - inputDir := "testdata/show-corrupt-statefile" - testCopyDir(t, inputDir, td) - defer testChdir(t, td)() - - view, done := testView(t) - c := &ShowCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - code := c.Run([]string{}) - output := done(t) - - if code != 1 { - t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) - } - - got := output.Stderr() - want := `Unsupported state file format` - if !strings.Contains(got, want) { - t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) - } -} - -// showFixtureSchema returns a schema suitable for processing the configuration -// in testdata/show. This schema should be assigned to a mock provider -// named "test". -func showFixtureSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } -} - -// showFixtureSensitiveSchema returns a schema suitable for processing the configuration -// in testdata/show. This schema should be assigned to a mock provider -// named "test". It includes a sensitive attribute. -func showFixtureSensitiveSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "password": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - }, - }, - } -} - -// showFixtureProvider returns a mock provider that is configured for basic -// operation with the configuration in testdata/show. This mock has -// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, -// with the plan/apply steps just passing through the data determined by -// Terraform Core. -func showFixtureProvider() *terraform.MockProvider { - p := testProvider() - p.GetProviderSchemaResponse = showFixtureSchema() - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - idVal := req.PriorState.GetAttr("id") - amiVal := req.PriorState.GetAttr("ami") - if amiVal.RawEquals(cty.StringVal("refresh-me")) { - amiVal = cty.StringVal("refreshed") - } - return providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": idVal, - "ami": amiVal, - }), - Private: req.Private, - } - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - // this is a destroy plan, - if req.ProposedNewState.IsNull() { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp - } - - idVal := req.ProposedNewState.GetAttr("id") - amiVal := req.ProposedNewState.GetAttr("ami") - if idVal.IsNull() { - idVal = cty.UnknownVal(cty.String) - } - var reqRep []cty.Path - if amiVal.RawEquals(cty.StringVal("force-replace")) { - reqRep = append(reqRep, cty.GetAttrPath("ami")) - } - return providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "id": idVal, - "ami": amiVal, - }), - RequiresReplace: reqRep, - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - idVal := req.PlannedState.GetAttr("id") - amiVal := req.PlannedState.GetAttr("ami") - if !idVal.IsKnown() { - idVal = cty.StringVal("placeholder") - } - return providers.ApplyResourceChangeResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": idVal, - "ami": amiVal, - }), - } - } - return p -} - -// showFixtureSensitiveProvider returns a mock provider that is configured for basic -// operation with the configuration in testdata/show. This mock has -// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, -// with the plan/apply steps just passing through the data determined by -// Terraform Core. It also has a sensitive attribute in the provider schema. -func showFixtureSensitiveProvider() *terraform.MockProvider { - p := testProvider() - p.GetProviderSchemaResponse = showFixtureSensitiveSchema() - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - idVal := req.ProposedNewState.GetAttr("id") - if idVal.IsNull() { - idVal = cty.UnknownVal(cty.String) - } - return providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "id": idVal, - "ami": req.ProposedNewState.GetAttr("ami"), - "password": req.ProposedNewState.GetAttr("password"), - }), - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - idVal := req.PlannedState.GetAttr("id") - if !idVal.IsKnown() { - idVal = cty.StringVal("placeholder") - } - return providers.ApplyResourceChangeResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": idVal, - "ami": req.PlannedState.GetAttr("ami"), - "password": req.PlannedState.GetAttr("password"), - }), - } - } - return p -} - -// showFixturePlanFile creates a plan file at a temporary location containing a -// single change to create or update the test_instance.foo that is included in the "show" -// test fixture, returning the location of that plan file. -// `action` is the planned change you would like to elicit -func showFixturePlanFile(t *testing.T, action plans.Action) string { - _, snap := testModuleWithSnapshot(t, "show") - plannedVal := cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "ami": cty.StringVal("bar"), - }) - priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) - if err != nil { - t.Fatal(err) - } - plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) - if err != nil { - t.Fatal(err) - } - plan := testPlan(t) - plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: action, - Before: priorValRaw, - After: plannedValRaw, - }, - }) - return testPlanFile( - t, - snap, - states.NewState(), - plan, - ) -} - -// this simplified plan struct allows us to preserve field order when marshaling -// the command output. NOTE: we are leaving "terraform_version" out of this test -// to avoid needing to constantly update the expected output; as a potential -// TODO we could write a jsonplan compare function. -type plan struct { - FormatVersion string `json:"format_version,omitempty"` - Variables map[string]interface{} `json:"variables,omitempty"` - PlannedValues map[string]interface{} `json:"planned_values,omitempty"` - ResourceDrift []interface{} `json:"resource_drift,omitempty"` - ResourceChanges []interface{} `json:"resource_changes,omitempty"` - OutputChanges map[string]interface{} `json:"output_changes,omitempty"` - PriorState priorState `json:"prior_state,omitempty"` - Config map[string]interface{} `json:"configuration,omitempty"` -} - -type priorState struct { - FormatVersion string `json:"format_version,omitempty"` - Values map[string]interface{} `json:"values,omitempty"` - SensitiveValues map[string]bool `json:"sensitive_values,omitempty"` -} diff --git a/internal/command/state_test.go b/internal/command/state_test.go deleted file mode 100644 index cd2e830125db..000000000000 --- a/internal/command/state_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package command - -import ( - "path/filepath" - "regexp" - "sort" - "testing" - - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -// testStateBackups returns the list of backups in order of creation -// (oldest first) in the given directory. -func testStateBackups(t *testing.T, dir string) []string { - // Find all the backups - list, err := filepath.Glob(filepath.Join(dir, "*"+DefaultBackupExtension)) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Sort them which will put them naturally in the right order - sort.Strings(list) - - return list -} - -func TestStateDefaultBackupExtension(t *testing.T) { - testCwd(t) - - s, err := (&StateMeta{}).State() - if err != nil { - t.Fatal(err) - } - - backupPath := s.(*statemgr.Filesystem).BackupPath() - match := regexp.MustCompile(`terraform\.tfstate\.\d+\.backup$`).MatchString - if !match(backupPath) { - t.Fatal("Bad backup path:", backupPath) - } -} diff --git a/internal/command/test.go b/internal/command/test.go deleted file mode 100644 index 1f18689f1bc5..000000000000 --- a/internal/command/test.go +++ /dev/null @@ -1,730 +0,0 @@ -package command - -import ( - "context" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/moduletest" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providercache" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// TestCommand is the implementation of "terraform test". -type TestCommand struct { - Meta -} - -func (c *TestCommand) Run(rawArgs []string) int { - // Parse and apply global view arguments - common, rawArgs := arguments.ParseView(rawArgs) - c.View.Configure(common) - - args, diags := arguments.ParseTest(rawArgs) - view := views.NewTest(c.View, args.Output) - if diags.HasErrors() { - view.Diagnostics(diags) - return 1 - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - `The "terraform test" command is experimental`, - "We'd like to invite adventurous module authors to write integration tests for their modules using this command, but all of the behaviors of this command are currently experimental and may change based on feedback.\n\nFor more information on the testing experiment, including ongoing research goals and avenues for feedback, see:\n https://www.terraform.io/docs/language/modules/testing-experiment.html", - )) - - ctx, cancel := c.InterruptibleContext() - defer cancel() - - results, moreDiags := c.run(ctx, args) - diags = diags.Append(moreDiags) - - initFailed := diags.HasErrors() - view.Diagnostics(diags) - diags = view.Results(results) - resultsFailed := diags.HasErrors() - view.Diagnostics(diags) // possible additional errors from saving the results - - var testsFailed bool - for _, suite := range results { - for _, component := range suite.Components { - for _, assertion := range component.Assertions { - if !assertion.Outcome.SuiteCanPass() { - testsFailed = true - } - } - } - } - - // Lots of things can possibly have failed - if initFailed || resultsFailed || testsFailed { - return 1 - } - return 0 -} - -func (c *TestCommand) run(ctx context.Context, args arguments.Test) (results map[string]*moduletest.Suite, diags tfdiags.Diagnostics) { - suiteNames, err := c.collectSuiteNames() - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error while searching for test configurations", - fmt.Sprintf("While attempting to scan the 'tests' subdirectory for potential test configurations, Terraform encountered an error: %s.", err), - )) - return nil, diags - } - - ret := make(map[string]*moduletest.Suite, len(suiteNames)) - for _, suiteName := range suiteNames { - if ctx.Err() != nil { - // If the context has already failed in some way then we'll - // halt early and report whatever's already happened. - break - } - suite, moreDiags := c.runSuite(ctx, suiteName) - diags = diags.Append(moreDiags) - ret[suiteName] = suite - } - - return ret, diags -} - -func (c *TestCommand) runSuite(ctx context.Context, suiteName string) (*moduletest.Suite, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := moduletest.Suite{ - Name: suiteName, - Components: map[string]*moduletest.Component{}, - } - - // In order to make this initial round of "terraform test" pretty self - // contained while it's experimental, it's largely just mimicking what - // would happen when running the main Terraform workflow commands, which - // comes at the expense of a few irritants that we'll hopefully resolve - // in future iterations as the design solidifies: - // - We need to install remote modules separately for each of the - // test suites, because we don't have any sense of a shared cache - // of modules that multiple configurations can refer to at once. - // - We _do_ have a sense of a cache of remote providers, but it's fixed - // at being specifically a two-level cache (global vs. directory-specific) - // and so we can't easily capture a third level of "all of the test suites - // for this module" that sits between the two. Consequently, we need to - // dynamically choose between creating a directory-specific "global" - // cache or using the user's existing global cache, to avoid any - // situation were we'd be re-downloading the same providers for every - // one of the test suites. - // - We need to do something a bit horrid in order to have our test - // provider instance persist between the plan and apply steps, because - // normally that is the exact opposite of what we want. - // The above notes are here mainly as an aid to someone who might be - // planning a subsequent phase of this R&D effort, to help distinguish - // between things we're doing here because they are valuable vs. things - // we're doing just to make it work without doing any disruptive - // refactoring. - - suiteDirs, moreDiags := c.prepareSuiteDir(ctx, suiteName) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - // Generate a special failure representing the test initialization - // having failed, since we therefore won'tbe able to run the actual - // tests defined inside. - ret.Components["(init)"] = &moduletest.Component{ - Assertions: map[string]*moduletest.Assertion{ - "(init)": { - Outcome: moduletest.Error, - Description: "terraform init", - Message: "failed to install test suite dependencies", - Diagnostics: diags, - }, - }, - } - return &ret, nil - } - - // When we run the suite itself, we collect up diagnostics associated - // with individual components, so ret.Components may or may not contain - // failed/errored components after runTestSuite returns. - var finalState *states.State - ret.Components, finalState = c.runTestSuite(ctx, suiteDirs) - - // Regardless of the success or failure of the test suite, if there are - // any objects left in the state then we'll generate a top-level error - // about each one to minimize the chance of the user failing to notice - // that there are leftover objects that might continue to cost money - // unless manually deleted. - for _, ms := range finalState.Modules { - for _, rs := range ms.Resources { - for instanceKey, is := range rs.Instances { - var objs []*states.ResourceInstanceObjectSrc - if is.Current != nil { - objs = append(objs, is.Current) - } - for _, obj := range is.Deposed { - objs = append(objs, obj) - } - for _, obj := range objs { - // Unfortunately we don't have provider schemas out here - // and so we're limited in what we can achieve with these - // ResourceInstanceObjectSrc values, but we can try some - // heuristicy things to try to give some useful information - // in common cases. - var k, v string - if ty, err := ctyjson.ImpliedType(obj.AttrsJSON); err == nil { - if approxV, err := ctyjson.Unmarshal(obj.AttrsJSON, ty); err == nil { - k, v = format.ObjectValueIDOrName(approxV) - } - } - - var detail string - if k != "" { - // We can be more specific if we were able to infer - // an identifying attribute for this object. - detail = fmt.Sprintf( - "Due to errors during destroy, test suite %q has left behind an object for %s, with the following identity:\n %s = %q\n\nYou will need to delete this object manually in the remote system, or else it may have an ongoing cost.", - suiteName, - rs.Addr.Instance(instanceKey), - k, v, - ) - } else { - // If our heuristics for finding a suitable identifier - // failed then unfortunately we must be more vague. - // (We can't just print the entire object, because it - // might be overly large and it might contain sensitive - // values.) - detail = fmt.Sprintf( - "Due to errors during destroy, test suite %q has left behind an object for %s. You will need to delete this object manually in the remote system, or else it may have an ongoing cost.", - suiteName, - rs.Addr.Instance(instanceKey), - ) - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to clean up after tests", - detail, - )) - } - } - } - } - - return &ret, diags -} - -func (c *TestCommand) prepareSuiteDir(ctx context.Context, suiteName string) (testCommandSuiteDirs, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - configDir := filepath.Join("tests", suiteName) - log.Printf("[TRACE] terraform test: Prepare directory for suite %q in %s", suiteName, configDir) - - suiteDirs := testCommandSuiteDirs{ - SuiteName: suiteName, - ConfigDir: configDir, - } - - // Before we can run a test suite we need to make sure that we have all of - // its dependencies available, so the following is essentially an - // abbreviated form of what happens during "terraform init", with some - // extra trickery in places. - - // First, module installation. This will include linking in the module - // under test, but also includes grabbing the dependencies of that module - // if it has any. - suiteDirs.ModulesDir = filepath.Join(configDir, ".terraform", "modules") - os.MkdirAll(suiteDirs.ModulesDir, 0755) // if this fails then we'll ignore it and let InstallModules below fail instead - reg := c.registryClient() - moduleInst := initwd.NewModuleInstaller(suiteDirs.ModulesDir, reg) - _, moreDiags := moduleInst.InstallModules(ctx, configDir, true, nil) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - return suiteDirs, diags - } - - // The installer puts the files in a suitable place on disk, but we - // still need to actually load the configuration. We need to do this - // with a separate config loader because the Meta.configLoader instance - // is intended for interacting with the current working directory, not - // with the test suite subdirectories. - loader, err := configload.NewLoader(&configload.Config{ - ModulesDir: suiteDirs.ModulesDir, - Services: c.Services, - }) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create test configuration loader", - fmt.Sprintf("Failed to prepare loader for test configuration %s: %s.", configDir, err), - )) - return suiteDirs, diags - } - cfg, hclDiags := loader.LoadConfig(configDir) - diags = diags.Append(hclDiags) - if diags.HasErrors() { - return suiteDirs, diags - } - suiteDirs.Config = cfg - - // With the full configuration tree available, we can now install - // the necessary providers. We'll use a separate local cache directory - // here, because the test configuration might have additional requirements - // compared to the module itself. - suiteDirs.ProvidersDir = filepath.Join(configDir, ".terraform", "providers") - os.MkdirAll(suiteDirs.ProvidersDir, 0755) // if this fails then we'll ignore it and operations below fail instead - localCacheDir := providercache.NewDir(suiteDirs.ProvidersDir) - providerInst := c.providerInstaller().Clone(localCacheDir) - if !providerInst.HasGlobalCacheDir() { - // If the user already configured a global cache directory then we'll - // just use it for caching the test providers too, because then we - // can potentially reuse cache entries they already have. However, - // if they didn't configure one then we'll still establish one locally - // in the working directory, which we'll then share across all tests - // to avoid downloading the same providers repeatedly. - cachePath := filepath.Join(c.DataDir(), "testing-providers") // note this is _not_ under the suite dir - err := os.MkdirAll(cachePath, 0755) - // If we were unable to create the directory for any reason then we'll - // just proceed without a cache, at the expense of repeated downloads. - // (With that said, later installing might end up failing for the - // same reason anyway...) - if err == nil || os.IsExist(err) { - cacheDir := providercache.NewDir(cachePath) - providerInst.SetGlobalCacheDir(cacheDir) - } - } - reqs, hclDiags := cfg.ProviderRequirements() - diags = diags.Append(hclDiags) - if diags.HasErrors() { - return suiteDirs, diags - } - - // For test suites we only retain the "locks" in memory for the duration - // for one run, just to make sure that we use the same providers when we - // eventually run the test suite. - locks := depsfile.NewLocks() - evts := &providercache.InstallerEvents{ - QueryPackagesFailure: func(provider addrs.Provider, err error) { - if err != nil && addrs.IsDefaultProvider(provider) && provider.Type == "test" { - // This is some additional context for the failure error - // we'll generate afterwards. Not the most ideal UX but - // good enough for this prototype implementation, to help - // hint about the special builtin provider we use here. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Probably-unintended reference to \"hashicorp/test\" provider", - "For the purposes of this experimental implementation of module test suites, you must use the built-in test provider terraform.io/builtin/test, which requires an explicit required_providers declaration.", - )) - } - }, - } - ctx = evts.OnContext(ctx) - locks, err = providerInst.EnsureProviderVersions(ctx, locks, reqs, providercache.InstallUpgrades) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to install required providers", - fmt.Sprintf("Couldn't install necessary providers for test configuration %s: %s.", configDir, err), - )) - return suiteDirs, diags - } - suiteDirs.ProviderLocks = locks - suiteDirs.ProviderCache = localCacheDir - - return suiteDirs, diags -} - -func (c *TestCommand) runTestSuite(ctx context.Context, suiteDirs testCommandSuiteDirs) (map[string]*moduletest.Component, *states.State) { - log.Printf("[TRACE] terraform test: Run test suite %q", suiteDirs.SuiteName) - - ret := make(map[string]*moduletest.Component) - - // To collect test results we'll use an instance of the special "test" - // provider, which records the intention to make a test assertion during - // planning and then hopefully updates that to an actual assertion result - // during apply, unless an apply error causes the graph walk to exit early. - // For this to work correctly, we must ensure we're using the same provider - // instance for both plan and apply. - testProvider := moduletest.NewProvider() - - // synthError is a helper to return early with a synthetic failing - // component, for problems that prevent us from even discovering what an - // appropriate component and assertion name might be. - state := states.NewState() - synthError := func(name string, desc string, msg string, diags tfdiags.Diagnostics) (map[string]*moduletest.Component, *states.State) { - key := "(" + name + ")" // parens ensure this can't conflict with an actual component/assertion key - ret[key] = &moduletest.Component{ - Assertions: map[string]*moduletest.Assertion{ - key: { - Outcome: moduletest.Error, - Description: desc, - Message: msg, - Diagnostics: diags, - }, - }, - } - return ret, state - } - - // NOTE: This function intentionally deviates from the usual pattern of - // gradually appending more diagnostics to the same diags, because - // here we're associating each set of diagnostics with the specific - // operation it belongs to. - - providerFactories, diags := c.testSuiteProviders(suiteDirs, testProvider) - if diags.HasErrors() { - // It should be unusual to get in here, because testSuiteProviders - // should rely only on things guaranteed by prepareSuiteDir, but - // since we're doing external I/O here there is always the risk that - // the filesystem changes or fails between setting up and using the - // providers. - return synthError( - "init", - "terraform init", - "failed to resolve the required providers", - diags, - ) - } - - plan, diags := c.testSuitePlan(ctx, suiteDirs, providerFactories) - if diags.HasErrors() { - // It should be unusual to get in here, because testSuitePlan - // should rely only on things guaranteed by prepareSuiteDir, but - // since we're doing external I/O here there is always the risk that - // the filesystem changes or fails between setting up and using the - // providers. - return synthError( - "plan", - "terraform plan", - "failed to create a plan", - diags, - ) - } - - // Now we'll apply the plan. Once we try to apply, we might've created - // real remote objects, and so we must try to run destroy even if the - // apply returns errors, and we must return whatever state we end up - // with so the caller can generate additional loud errors if anything - // is left in it. - - state, diags = c.testSuiteApply(ctx, plan, suiteDirs, providerFactories) - if diags.HasErrors() { - // We don't return here, unlike the others above, because we want to - // continue to the destroy below even if there are apply errors. - synthError( - "apply", - "terraform apply", - "failed to apply the created plan", - diags, - ) - } - - // By the time we get here, the test provider will have gathered up all - // of the planned assertions and the final results for any assertions that - // were not blocked by an error. This also resets the provider so that - // the destroy operation below won't get tripped up on stale results. - ret = testProvider.Reset() - - state, diags = c.testSuiteDestroy(ctx, state, suiteDirs, providerFactories) - if diags.HasErrors() { - synthError( - "destroy", - "terraform destroy", - "failed to destroy objects created during test (NOTE: leftover remote objects may still exist)", - diags, - ) - } - - return ret, state -} - -func (c *TestCommand) testSuiteProviders(suiteDirs testCommandSuiteDirs, testProvider *moduletest.Provider) (map[addrs.Provider]providers.Factory, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := make(map[addrs.Provider]providers.Factory) - - // We can safely use the internal providers returned by Meta here because - // the built-in provider versions can never vary based on the configuration - // and thus we don't need to worry about potential version differences - // between main module and test suite modules. - for name, factory := range c.internalProviders() { - ret[addrs.NewBuiltInProvider(name)] = factory - } - - // For the remaining non-builtin providers, we'll just take whatever we - // recorded earlier in the in-memory-only "lock file". All of these should - // typically still be available because we would've only just installed - // them, but this could fail if e.g. the filesystem has been somehow - // damaged in the meantime. - for provider, lock := range suiteDirs.ProviderLocks.AllProviders() { - version := lock.Version() - cached := suiteDirs.ProviderCache.ProviderVersion(provider, version) - if cached == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Required provider not found", - fmt.Sprintf("Although installation previously succeeded for %s v%s, it no longer seems to be present in the cache directory.", provider.ForDisplay(), version.String()), - )) - continue // potentially collect up multiple errors - } - - // NOTE: We don't consider the checksums for test suite dependencies, - // because we're creating a fresh "lock file" each time we run anyway - // and so they wouldn't actually guarantee anything useful. - - ret[provider] = providerFactory(cached) - } - - // We'll replace the test provider instance with the one our caller - // provided, so it'll be able to interrogate the test results directly. - ret[addrs.NewBuiltInProvider("test")] = func() (providers.Interface, error) { - return testProvider, nil - } - - return ret, diags -} - -type testSuiteRunContext struct { - Core *terraform.Context - - PlanMode plans.Mode - Config *configs.Config - InputState *states.State - Changes *plans.Changes -} - -func (c *TestCommand) testSuiteContext(suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory, state *states.State, plan *plans.Plan, destroy bool) (*testSuiteRunContext, tfdiags.Diagnostics) { - var changes *plans.Changes - if plan != nil { - changes = plan.Changes - } - - planMode := plans.NormalMode - if destroy { - planMode = plans.DestroyMode - } - - tfCtx, diags := terraform.NewContext(&terraform.ContextOpts{ - Providers: providerFactories, - - // We just use the provisioners from the main Meta here, because - // unlike providers provisioner plugins are not automatically - // installable anyway, and so we'll need to hunt for them in the same - // legacy way that normal Terraform operations do. - Provisioners: c.provisionerFactories(), - - Meta: &terraform.ContextMeta{ - Env: "test_" + suiteDirs.SuiteName, - }, - }) - if diags.HasErrors() { - return nil, diags - } - return &testSuiteRunContext{ - Core: tfCtx, - - PlanMode: planMode, - Config: suiteDirs.Config, - InputState: state, - Changes: changes, - }, diags -} - -func (c *TestCommand) testSuitePlan(ctx context.Context, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*plans.Plan, tfdiags.Diagnostics) { - log.Printf("[TRACE] terraform test: create plan for suite %q", suiteDirs.SuiteName) - runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, nil, nil, false) - if diags.HasErrors() { - return nil, diags - } - - // We'll also validate as part of planning, to ensure that the test - // configuration would pass "terraform validate". This is actually - // largely redundant with the runCtx.Core.Plan call below, but was - // included here originally because Plan did _originally_ assume that - // an earlier Validate had already passed, but now does its own - // validation work as (mostly) a superset of validate. - moreDiags := runCtx.Core.Validate(runCtx.Config) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - return nil, diags - } - - plan, moreDiags := runCtx.Core.Plan( - runCtx.Config, runCtx.InputState, &terraform.PlanOpts{Mode: runCtx.PlanMode}, - ) - diags = diags.Append(moreDiags) - return plan, diags -} - -func (c *TestCommand) testSuiteApply(ctx context.Context, plan *plans.Plan, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*states.State, tfdiags.Diagnostics) { - log.Printf("[TRACE] terraform test: apply plan for suite %q", suiteDirs.SuiteName) - runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, nil, plan, false) - if diags.HasErrors() { - // To make things easier on the caller, we'll return a valid empty - // state even in this case. - return states.NewState(), diags - } - - state, moreDiags := runCtx.Core.Apply(plan, runCtx.Config) - diags = diags.Append(moreDiags) - return state, diags -} - -func (c *TestCommand) testSuiteDestroy(ctx context.Context, state *states.State, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*states.State, tfdiags.Diagnostics) { - log.Printf("[TRACE] terraform test: plan to destroy any existing objects for suite %q", suiteDirs.SuiteName) - runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, state, nil, true) - if diags.HasErrors() { - return state, diags - } - - plan, moreDiags := runCtx.Core.Plan( - runCtx.Config, runCtx.InputState, &terraform.PlanOpts{Mode: runCtx.PlanMode}, - ) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - return state, diags - } - - log.Printf("[TRACE] terraform test: apply the plan to destroy any existing objects for suite %q", suiteDirs.SuiteName) - runCtx, moreDiags = c.testSuiteContext(suiteDirs, providerFactories, state, plan, true) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - return state, diags - } - - state, moreDiags = runCtx.Core.Apply(plan, runCtx.Config) - diags = diags.Append(moreDiags) - return state, diags -} - -func (c *TestCommand) collectSuiteNames() ([]string, error) { - items, err := ioutil.ReadDir("tests") - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - - ret := make([]string, 0, len(items)) - for _, item := range items { - if !item.IsDir() { - continue - } - name := item.Name() - suitePath := filepath.Join("tests", name) - tfFiles, err := filepath.Glob(filepath.Join(suitePath, "*.tf")) - if err != nil { - // We'll just ignore it and treat it like a dir with no .tf files - tfFiles = nil - } - tfJSONFiles, err := filepath.Glob(filepath.Join(suitePath, "*.tf.json")) - if err != nil { - // We'll just ignore it and treat it like a dir with no .tf.json files - tfJSONFiles = nil - } - if (len(tfFiles) + len(tfJSONFiles)) == 0 { - // Not a test suite, then. - continue - } - ret = append(ret, name) - } - - return ret, nil -} - -func (c *TestCommand) Help() string { - helpText := ` -Usage: terraform test [options] - - This is an experimental command to help with automated integration - testing of shared modules. The usage and behavior of this command is - likely to change in breaking ways in subsequent releases, as we - are currently using this command primarily for research purposes. - - In its current experimental form, "test" will look under the current - working directory for a subdirectory called "tests", and then within - that directory search for one or more subdirectories that contain - ".tf" or ".tf.json" files. For any that it finds, it will perform - Terraform operations similar to the following sequence of commands - in each of those directories: - terraform validate - terraform apply - terraform destroy - - The test configurations should not declare any input variables and - should at least contain a call to the module being tested, which - will always be available at the path ../.. due to the expected - filesystem layout. - - The tests are considered to be successful if all of the above steps - succeed. - - Test configurations may optionally include uses of the special - built-in test provider terraform.io/builtin/test, which allows - writing explicit test assertions which must also all pass in order - for the test run to be considered successful. - - This initial implementation is intended as a minimally-viable - product to use for further research and experimentation, and in - particular it currently lacks the following capabilities that we - expect to consider in later iterations, based on feedback: - - Testing of subsequent updates to existing infrastructure, - where currently it only supports initial creation and - then destruction. - - Testing top-level modules that are intended to be used for - "real" environments, which typically have hard-coded values - that don't permit creating a separate "copy" for testing. - - Some sort of support for unit test runs that don't interact - with remote systems at all, e.g. for use in checking pull - requests from untrusted contributors. - - In the meantime, we'd like to hear feedback from module authors - who have tried writing some experimental tests for their modules - about what sorts of tests you were able to write, what sorts of - tests you weren't able to write, and any tests that you were - able to write but that were difficult to model in some way. - -Options: - - -compact-warnings Use a more compact representation for warnings, if - this command produces only warnings and no errors. - - -junit-xml=FILE In addition to the usual output, also write test - results to the given file path in JUnit XML format. - This format is commonly supported by CI systems, and - they typically expect to be given a filename to search - for in the test workspace after the test run finishes. - - -no-color Don't include virtual terminal formatting sequences in - the output. -` - return strings.TrimSpace(helpText) -} - -func (c *TestCommand) Synopsis() string { - return "Experimental support for module integration testing" -} - -type testCommandSuiteDirs struct { - SuiteName string - - ConfigDir string - ModulesDir string - ProvidersDir string - - Config *configs.Config - ProviderCache *providercache.Dir - ProviderLocks *depsfile.Locks -} diff --git a/internal/command/test_test.go b/internal/command/test_test.go deleted file mode 100644 index 17ae6da67ab1..000000000000 --- a/internal/command/test_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package command - -import ( - "bytes" - "io/ioutil" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/terminal" -) - -// These are the main tests for the "terraform test" command. -func TestTest(t *testing.T) { - t.Run("passes", func(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("test-passes"), td) - defer testChdir(t, td)() - - streams, close := terminal.StreamsForTesting(t) - cmd := &TestCommand{ - Meta: Meta{ - Streams: streams, - View: views.NewView(streams), - }, - } - exitStatus := cmd.Run([]string{"-junit-xml=junit.xml", "-no-color"}) - outp := close(t) - if got, want := exitStatus, 0; got != want { - t.Fatalf("wrong exit status %d; want %d\nstderr:\n%s", got, want, outp.Stderr()) - } - - gotStdout := strings.TrimSpace(outp.Stdout()) - wantStdout := strings.TrimSpace(` -Warning: The "terraform test" command is experimental - -We'd like to invite adventurous module authors to write integration tests for -their modules using this command, but all of the behaviors of this command -are currently experimental and may change based on feedback. - -For more information on the testing experiment, including ongoing research -goals and avenues for feedback, see: - https://www.terraform.io/docs/language/modules/testing-experiment.html -`) - if diff := cmp.Diff(wantStdout, gotStdout); diff != "" { - t.Errorf("wrong stdout\n%s", diff) - } - - gotStderr := strings.TrimSpace(outp.Stderr()) - wantStderr := strings.TrimSpace(` -Success! All of the test assertions passed. -`) - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong stderr\n%s", diff) - } - - gotXMLSrc, err := ioutil.ReadFile("junit.xml") - if err != nil { - t.Fatal(err) - } - gotXML := string(bytes.TrimSpace(gotXMLSrc)) - wantXML := strings.TrimSpace(` - - 0 - 0 - 1 - - hello - 1 - 0 - 0 - 0 - - output - foo - - - -`) - if diff := cmp.Diff(wantXML, gotXML); diff != "" { - t.Errorf("wrong JUnit XML\n%s", diff) - } - }) - t.Run("fails", func(t *testing.T) { - td := t.TempDir() - testCopyDir(t, testFixturePath("test-fails"), td) - defer testChdir(t, td)() - - streams, close := terminal.StreamsForTesting(t) - cmd := &TestCommand{ - Meta: Meta{ - Streams: streams, - View: views.NewView(streams), - }, - } - exitStatus := cmd.Run([]string{"-junit-xml=junit.xml", "-no-color"}) - outp := close(t) - if got, want := exitStatus, 1; got != want { - t.Fatalf("wrong exit status %d; want %d\nstderr:\n%s", got, want, outp.Stderr()) - } - - gotStdout := strings.TrimSpace(outp.Stdout()) - wantStdout := strings.TrimSpace(` -Warning: The "terraform test" command is experimental - -We'd like to invite adventurous module authors to write integration tests for -their modules using this command, but all of the behaviors of this command -are currently experimental and may change based on feedback. - -For more information on the testing experiment, including ongoing research -goals and avenues for feedback, see: - https://www.terraform.io/docs/language/modules/testing-experiment.html -`) - if diff := cmp.Diff(wantStdout, gotStdout); diff != "" { - t.Errorf("wrong stdout\n%s", diff) - } - - gotStderr := strings.TrimSpace(outp.Stderr()) - wantStderr := strings.TrimSpace(` -─── Failed: hello.foo.output (output "foo" value) ─────────────────────────── -wrong value - got: "foo value boop" - want: "foo not boop" - -───────────────────────────────────────────────────────────────────────────── -`) - if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { - t.Errorf("wrong stderr\n%s", diff) - } - - gotXMLSrc, err := ioutil.ReadFile("junit.xml") - if err != nil { - t.Fatal(err) - } - gotXML := string(bytes.TrimSpace(gotXMLSrc)) - wantXML := strings.TrimSpace(` - - 0 - 1 - 1 - - hello - 1 - 0 - 0 - 1 - - output - foo - - wrong value got: "foo value boop" want: "foo not boop" - - - - -`) - if diff := cmp.Diff(wantXML, gotXML); diff != "" { - t.Errorf("wrong JUnit XML\n%s", diff) - } - }) - -} diff --git a/internal/command/testdata/login-oauth-server/main.go b/internal/command/testdata/login-oauth-server/main.go deleted file mode 100644 index 105936c4f8e6..000000000000 --- a/internal/command/testdata/login-oauth-server/main.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build ignore -// +build ignore - -// This file is a helper for those doing _manual_ testing of "terraform login" -// and/or "terraform logout" and want to start up a test OAuth server in a -// separate process for convenience: -// -// go run ./command/testdata/login-oauth-server/main.go :8080 -// -// This is _not_ the main way to use this oauthserver package. For automated -// test code, import it as a normal Go package instead: -// -// import oauthserver "github.com/hashicorp/terraform/internal/command/testdata/login-oauth-server" - -package main - -import ( - "fmt" - "net" - "net/http" - "os" - - oauthserver "github.com/hashicorp/terraform/internal/command/testdata/login-oauth-server" -) - -func main() { - if len(os.Args) < 2 { - fmt.Fprintln(os.Stderr, "Usage: go run ./command/testdata/login-oauth-server/main.go ") - os.Exit(1) - } - - host, port, err := net.SplitHostPort(os.Args[1]) - if err != nil { - fmt.Fprintln(os.Stderr, "Invalid address: %s", err) - os.Exit(1) - } - - if host == "" { - host = "127.0.0.1" - } - addr := fmt.Sprintf("%s:%s", host, port) - - fmt.Printf("Will listen on %s...\n", addr) - fmt.Printf( - configExampleFmt, - fmt.Sprintf("http://%s:%s/authz", host, port), - fmt.Sprintf("http://%s:%s/token", host, port), - fmt.Sprintf("http://%s:%s/revoke", host, port), - ) - - server := &http.Server{ - Addr: addr, - Handler: oauthserver.Handler, - } - err = server.ListenAndServe() - fmt.Fprintln(os.Stderr, err.Error()) -} - -const configExampleFmt = ` -host "login-test.example.com" { - services = { - "login.v1" = { - authz = %q - token = %q - client = "placeholder" - grant_types = ["code", "password"] - } - "logout.v1" = %q - } -} - -` diff --git a/internal/command/ui_input.go b/internal/command/ui_input.go deleted file mode 100644 index 071982dec283..000000000000 --- a/internal/command/ui_input.go +++ /dev/null @@ -1,191 +0,0 @@ -package command - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "log" - "os" - "os/signal" - "strings" - "sync" - "sync/atomic" - "unicode" - - "github.com/bgentry/speakeasy" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/mattn/go-isatty" - "github.com/mitchellh/colorstring" -) - -var defaultInputReader io.Reader -var defaultInputWriter io.Writer -var testInputResponse []string -var testInputResponseMap map[string]string - -// UIInput is an implementation of terraform.UIInput that asks the CLI -// for input stdin. -type UIInput struct { - // Colorize will color the output. - Colorize *colorstring.Colorize - - // Reader and Writer for IO. If these aren't set, they will default to - // Stdin and Stdout respectively. - Reader io.Reader - Writer io.Writer - - listening int32 - result chan string - err chan string - - interrupted bool - l sync.Mutex - once sync.Once -} - -func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { - i.once.Do(i.init) - - r := i.Reader - w := i.Writer - if r == nil { - r = defaultInputReader - } - if w == nil { - w = defaultInputWriter - } - if r == nil { - r = os.Stdin - } - if w == nil { - w = os.Stdout - } - - // Make sure we only ask for input once at a time. Terraform - // should enforce this, but it doesn't hurt to verify. - i.l.Lock() - defer i.l.Unlock() - - // If we're interrupted, then don't ask for input - if i.interrupted { - return "", errors.New("interrupted") - } - - // If we have test results, return those. testInputResponse is the - // "old" way of doing it and we should remove that. - if testInputResponse != nil { - v := testInputResponse[0] - testInputResponse = testInputResponse[1:] - return v, nil - } - - // testInputResponseMap is the new way for test responses, based on - // the query ID. - if testInputResponseMap != nil { - v, ok := testInputResponseMap[opts.Id] - if !ok { - return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) - } - - delete(testInputResponseMap, opts.Id) - return v, nil - } - - log.Printf("[DEBUG] command: asking for input: %q", opts.Query) - - // Listen for interrupts so we can cancel the input ask - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer signal.Stop(sigCh) - - // Build the output format for asking - var buf bytes.Buffer - buf.WriteString("[reset]") - buf.WriteString(fmt.Sprintf("[bold]%s[reset]\n", opts.Query)) - if opts.Description != "" { - s := bufio.NewScanner(strings.NewReader(opts.Description)) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - buf.WriteString("\n") - } - if opts.Default != "" { - buf.WriteString(" [bold]Default:[reset] ") - buf.WriteString(opts.Default) - buf.WriteString("\n") - } - buf.WriteString(" [bold]Enter a value:[reset] ") - - // Ask the user for their input - if _, err := fmt.Fprint(w, i.Colorize.Color(buf.String())); err != nil { - return "", err - } - - // Listen for the input in a goroutine. This will allow us to - // interrupt this if we are interrupted (SIGINT). - go func() { - if !atomic.CompareAndSwapInt32(&i.listening, 0, 1) { - return // We are already listening for input. - } - defer atomic.CompareAndSwapInt32(&i.listening, 1, 0) - - var line string - var err error - if opts.Secret && isatty.IsTerminal(os.Stdin.Fd()) { - line, err = speakeasy.Ask("") - } else { - buf := bufio.NewReader(r) - line, err = buf.ReadString('\n') - } - if err != nil { - log.Printf("[ERR] UIInput scan err: %s", err) - i.err <- string(err.Error()) - } else { - i.result <- strings.TrimRightFunc(line, unicode.IsSpace) - } - }() - - select { - case err := <-i.err: - return "", errors.New(err) - - case line := <-i.result: - fmt.Fprint(w, "\n") - - if line == "" { - line = opts.Default - } - - return line, nil - case <-ctx.Done(): - // Print a newline so that any further output starts properly - // on a new line. - fmt.Fprintln(w) - - return "", ctx.Err() - case <-sigCh: - // Print a newline so that any further output starts properly - // on a new line. - fmt.Fprintln(w) - - // Mark that we were interrupted so future Ask calls fail. - i.interrupted = true - - return "", errors.New("interrupted") - } -} - -func (i *UIInput) init() { - i.result = make(chan string) - i.err = make(chan string) - - if i.Colorize == nil { - i.Colorize = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - } - } -} diff --git a/internal/command/ui_input_test.go b/internal/command/ui_input_test.go deleted file mode 100644 index d08cb0a2450b..000000000000 --- a/internal/command/ui_input_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package command - -import ( - "bytes" - "context" - "fmt" - "io" - "sync/atomic" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/terraform" -) - -func TestUIInput_impl(t *testing.T) { - var _ terraform.UIInput = new(UIInput) -} - -func TestUIInputInput(t *testing.T) { - i := &UIInput{ - Reader: bytes.NewBufferString("foo\n"), - Writer: bytes.NewBuffer(nil), - } - - v, err := i.Input(context.Background(), &terraform.InputOpts{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if v != "foo" { - t.Fatalf("unexpected input: %s", v) - } -} - -func TestUIInputInput_canceled(t *testing.T) { - r, w := io.Pipe() - i := &UIInput{ - Reader: r, - Writer: bytes.NewBuffer(nil), - } - - // Make a context that can be canceled. - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - // Cancel the context after 2 seconds. - time.Sleep(2 * time.Second) - cancel() - }() - - // Get input until the context is canceled. - v, err := i.Input(ctx, &terraform.InputOpts{}) - if err != context.Canceled { - t.Fatalf("expected a context.Canceled error, got: %v", err) - } - - // As the context was canceled v should be empty. - if v != "" { - t.Fatalf("unexpected input: %s", v) - } - - // As the context was canceled we should still be listening. - listening := atomic.LoadInt32(&i.listening) - if listening != 1 { - t.Fatalf("expected listening to be 1, got: %d", listening) - } - - go func() { - // Fake input is given after 1 second. - time.Sleep(time.Second) - fmt.Fprint(w, "foo\n") - w.Close() - }() - - v, err = i.Input(context.Background(), &terraform.InputOpts{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if v != "foo" { - t.Fatalf("unexpected input: %s", v) - } -} - -func TestUIInputInput_spaces(t *testing.T) { - i := &UIInput{ - Reader: bytes.NewBufferString("foo bar\n"), - Writer: bytes.NewBuffer(nil), - } - - v, err := i.Input(context.Background(), &terraform.InputOpts{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if v != "foo bar" { - t.Fatalf("unexpected input: %s", v) - } -} - -func TestUIInputInput_Error(t *testing.T) { - i := &UIInput{ - Reader: bytes.NewBuffer(nil), - Writer: bytes.NewBuffer(nil), - } - - v, err := i.Input(context.Background(), &terraform.InputOpts{}) - if err == nil { - t.Fatalf("Error is not 'nil'") - } - - if err.Error() != "EOF" { - t.Fatalf("unexpected error: %v", err) - } - - if v != "" { - t.Fatalf("input must be empty") - } -} diff --git a/internal/command/validate.go b/internal/command/validate.go deleted file mode 100644 index 110fcec8c32e..000000000000 --- a/internal/command/validate.go +++ /dev/null @@ -1,130 +0,0 @@ -package command - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ValidateCommand is a Command implementation that validates the terraform files -type ValidateCommand struct { - Meta -} - -func (c *ValidateCommand) Run(rawArgs []string) int { - // Parse and apply global view arguments - common, rawArgs := arguments.ParseView(rawArgs) - c.View.Configure(common) - - // Parse and validate flags - args, diags := arguments.ParseValidate(rawArgs) - if diags.HasErrors() { - c.View.Diagnostics(diags) - c.View.HelpPrompt("validate") - return 1 - } - - view := views.NewValidate(args.ViewType, c.View) - - // After this point, we must only produce JSON output if JSON mode is - // enabled, so all errors should be accumulated into diags and we'll - // print out a suitable result at the end, depending on the format - // selection. All returns from this point on must be tail-calls into - // view.Results in order to produce the expected output. - - dir, err := filepath.Abs(args.Path) - if err != nil { - diags = diags.Append(fmt.Errorf("unable to locate module: %s", err)) - return view.Results(diags) - } - - // Check for user-supplied plugin path - if c.pluginPath, err = c.loadPluginPath(); err != nil { - diags = diags.Append(fmt.Errorf("error loading plugin path: %s", err)) - return view.Results(diags) - } - - validateDiags := c.validate(dir) - diags = diags.Append(validateDiags) - - // Validating with dev overrides in effect means that the result might - // not be valid for a stable release, so we'll warn about that in case - // the user is trying to use "terraform validate" as a sort of pre-flight - // check before submitting a change. - diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) - - return view.Results(diags) -} - -func (c *ValidateCommand) validate(dir string) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - cfg, cfgDiags := c.loadConfig(dir) - diags = diags.Append(cfgDiags) - - if diags.HasErrors() { - return diags - } - - opts, err := c.contextOpts() - if err != nil { - diags = diags.Append(err) - return diags - } - - tfCtx, ctxDiags := terraform.NewContext(opts) - diags = diags.Append(ctxDiags) - if ctxDiags.HasErrors() { - return diags - } - - validateDiags := tfCtx.Validate(cfg) - diags = diags.Append(validateDiags) - return diags -} - -func (c *ValidateCommand) Synopsis() string { - return "Check whether the configuration is valid" -} - -func (c *ValidateCommand) Help() string { - helpText := ` -Usage: terraform [global options] validate [options] - - Validate the configuration files in a directory, referring only to the - configuration and not accessing any remote services such as remote state, - provider APIs, etc. - - Validate runs checks that verify whether a configuration is syntactically - valid and internally consistent, regardless of any provided variables or - existing state. It is thus primarily useful for general verification of - reusable modules, including correctness of attribute names and value types. - - It is safe to run this command automatically, for example as a post-save - check in a text editor or as a test step for a re-usable module in a CI - system. - - Validation requires an initialized working directory with any referenced - plugins and modules installed. To initialize a working directory for - validation without accessing any configured remote backend, use: - terraform init -backend=false - - To verify configuration in the context of a particular run (a particular - target workspace, input variable values, etc), use the 'terraform plan' - command instead, which includes an implied validation check. - -Options: - - -json Produce output in a machine-readable JSON format, suitable for - use in text editor integrations and other automated systems. - Always disables color. - - -no-color If specified, output won't contain any color. -` - return strings.TrimSpace(helpText) -} diff --git a/internal/command/validate_test.go b/internal/command/validate_test.go deleted file mode 100644 index 969e79683fab..000000000000 --- a/internal/command/validate_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package command - -import ( - "encoding/json" - "io/ioutil" - "os" - "path" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/terminal" -) - -func setupTest(t *testing.T, fixturepath string, args ...string) (*terminal.TestOutput, int) { - view, done := testView(t) - p := testProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.String, Optional: true}, - "description": {Type: cty.String, Optional: true}, - "name": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - }, - }, - } - c := &ValidateCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(p), - View: view, - }, - } - - args = append(args, "-no-color") - args = append(args, testFixturePath(fixturepath)) - - code := c.Run(args) - return done(t), code -} - -func TestValidateCommand(t *testing.T) { - if output, code := setupTest(t, "validate-valid"); code != 0 { - t.Fatalf("unexpected non-successful exit code %d\n\n%s", code, output.Stderr()) - } -} - -func TestValidateCommandWithTfvarsFile(t *testing.T) { - // Create a temporary working directory that is empty because this test - // requires scanning the current working directory by validate command. - td := t.TempDir() - testCopyDir(t, testFixturePath("validate-valid/with-tfvars-file"), td) - defer testChdir(t, td)() - - view, done := testView(t) - c := &ValidateCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - View: view, - }, - } - - args := []string{} - code := c.Run(args) - output := done(t) - if code != 0 { - t.Fatalf("bad %d\n\n%s", code, output.Stderr()) - } -} - -func TestValidateFailingCommand(t *testing.T) { - if output, code := setupTest(t, "validate-invalid"); code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } -} - -func TestValidateFailingCommandMissingQuote(t *testing.T) { - output, code := setupTest(t, "validate-invalid/missing_quote") - - if code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } - wantError := "Error: Invalid reference" - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } -} - -func TestValidateFailingCommandMissingVariable(t *testing.T) { - output, code := setupTest(t, "validate-invalid/missing_var") - if code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } - wantError := "Error: Reference to undeclared input variable" - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } -} - -func TestSameProviderMutipleTimesShouldFail(t *testing.T) { - output, code := setupTest(t, "validate-invalid/multiple_providers") - if code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } - wantError := "Error: Duplicate provider configuration" - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } -} - -func TestSameModuleMultipleTimesShouldFail(t *testing.T) { - output, code := setupTest(t, "validate-invalid/multiple_modules") - if code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } - wantError := "Error: Duplicate module call" - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } -} - -func TestSameResourceMultipleTimesShouldFail(t *testing.T) { - output, code := setupTest(t, "validate-invalid/multiple_resources") - if code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } - wantError := `Error: Duplicate resource "aws_instance" configuration` - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } -} - -func TestOutputWithoutValueShouldFail(t *testing.T) { - output, code := setupTest(t, "validate-invalid/outputs") - if code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } - wantError := `The argument "value" is required, but no definition was found.` - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } - wantError = `An argument named "values" is not expected here. Did you mean "value"?` - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } -} - -func TestModuleWithIncorrectNameShouldFail(t *testing.T) { - output, code := setupTest(t, "validate-invalid/incorrectmodulename") - if code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } - - wantError := `Error: Invalid module instance name` - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } - wantError = `Error: Variables not allowed` - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } -} - -func TestWronglyUsedInterpolationShouldFail(t *testing.T) { - output, code := setupTest(t, "validate-invalid/interpolation") - if code != 1 { - t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) - } - - wantError := `Error: Variables not allowed` - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } - wantError = `A single static variable reference is required` - if !strings.Contains(output.Stderr(), wantError) { - t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) - } -} - -func TestMissingDefinedVar(t *testing.T) { - output, code := setupTest(t, "validate-invalid/missing_defined_var") - // This is allowed because validate tests only that variables are referenced - // correctly, not that they all have defined values. - if code != 0 { - t.Fatalf("Should have passed: %d\n\n%s", code, output.Stderr()) - } -} - -func TestValidate_json(t *testing.T) { - tests := []struct { - path string - valid bool - }{ - {"validate-valid", true}, - {"validate-invalid", false}, - {"validate-invalid/missing_quote", false}, - {"validate-invalid/missing_var", false}, - {"validate-invalid/multiple_providers", false}, - {"validate-invalid/multiple_modules", false}, - {"validate-invalid/multiple_resources", false}, - {"validate-invalid/outputs", false}, - {"validate-invalid/incorrectmodulename", false}, - {"validate-invalid/interpolation", false}, - {"validate-invalid/missing_defined_var", true}, - } - - for _, tc := range tests { - t.Run(tc.path, func(t *testing.T) { - var want, got map[string]interface{} - - wantFile, err := os.Open(path.Join(testFixturePath(tc.path), "output.json")) - if err != nil { - t.Fatalf("failed to open output file: %s", err) - } - defer wantFile.Close() - wantBytes, err := ioutil.ReadAll(wantFile) - if err != nil { - t.Fatalf("failed to read output file: %s", err) - } - err = json.Unmarshal([]byte(wantBytes), &want) - if err != nil { - t.Fatalf("failed to unmarshal expected JSON: %s", err) - } - - output, code := setupTest(t, tc.path, "-json") - - gotString := output.Stdout() - err = json.Unmarshal([]byte(gotString), &got) - if err != nil { - t.Fatalf("failed to unmarshal actual JSON: %s", err) - } - - if !cmp.Equal(got, want) { - t.Errorf("wrong output:\n %v\n", cmp.Diff(got, want)) - t.Errorf("raw output:\n%s\n", gotString) - } - - if tc.valid && code != 0 { - t.Errorf("wrong exit code: want 0, got %d", code) - } else if !tc.valid && code != 1 { - t.Errorf("wrong exit code: want 1, got %d", code) - } - - if errorOutput := output.Stderr(); errorOutput != "" { - t.Errorf("unexpected error output:\n%s", errorOutput) - } - }) - } -} diff --git a/internal/command/version_test.go b/internal/command/version_test.go deleted file mode 100644 index 3ec5a4b86399..000000000000 --- a/internal/command/version_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package command - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/mitchellh/cli" -) - -func TestVersionCommand_implements(t *testing.T) { - var _ cli.Command = &VersionCommand{} -} - -func TestVersion(t *testing.T) { - td := t.TempDir() - defer testChdir(t, td)() - - // We'll create a fixed dependency lock file in our working directory - // so we can verify that the version command shows the information - // from it. - locks := depsfile.NewLocks() - locks.SetProvider( - addrs.NewDefaultProvider("test2"), - getproviders.MustParseVersion("1.2.3"), - nil, - nil, - ) - locks.SetProvider( - addrs.NewDefaultProvider("test1"), - getproviders.MustParseVersion("7.8.9-beta.2"), - nil, - nil, - ) - - ui := cli.NewMockUi() - c := &VersionCommand{ - Meta: Meta{ - Ui: ui, - }, - Version: "4.5.6", - VersionPrerelease: "foo", - Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, - } - if err := c.replaceLockedDependencies(locks); err != nil { - t.Fatal(err) - } - if code := c.Run([]string{}); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := "Terraform v4.5.6-foo\non aros_riscv64\n+ provider registry.terraform.io/hashicorp/test1 v7.8.9-beta.2\n+ provider registry.terraform.io/hashicorp/test2 v1.2.3" - if actual != expected { - t.Fatalf("wrong output\ngot:\n%s\nwant:\n%s", actual, expected) - } - -} - -func TestVersion_flags(t *testing.T) { - ui := new(cli.MockUi) - m := Meta{ - Ui: ui, - } - - // `terraform version` - c := &VersionCommand{ - Meta: m, - Version: "4.5.6", - VersionPrerelease: "foo", - Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, - } - - if code := c.Run([]string{"-v", "-version"}); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := "Terraform v4.5.6-foo\non aros_riscv64" - if actual != expected { - t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func TestVersion_outdated(t *testing.T) { - ui := new(cli.MockUi) - m := Meta{ - Ui: ui, - } - - c := &VersionCommand{ - Meta: m, - Version: "4.5.6", - CheckFunc: mockVersionCheckFunc(true, "4.5.7"), - Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, - } - - if code := c.Run([]string{}); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := "Terraform v4.5.6\non aros_riscv64\n\nYour version of Terraform is out of date! The latest version\nis 4.5.7. You can update by downloading from https://www.terraform.io/downloads.html" - if actual != expected { - t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func TestVersion_json(t *testing.T) { - td := t.TempDir() - defer testChdir(t, td)() - - ui := cli.NewMockUi() - meta := Meta{ - Ui: ui, - } - - // `terraform version -json` without prerelease - c := &VersionCommand{ - Meta: meta, - Version: "4.5.6", - Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, - } - if code := c.Run([]string{"-json"}); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := strings.TrimSpace(` -{ - "terraform_version": "4.5.6", - "platform": "aros_riscv64", - "provider_selections": {}, - "terraform_outdated": false -} -`) - if diff := cmp.Diff(expected, actual); diff != "" { - t.Fatalf("wrong output\n%s", diff) - } - - // flush the output from the mock ui - ui.OutputWriter.Reset() - - // Now we'll create a fixed dependency lock file in our working directory - // so we can verify that the version command shows the information - // from it. - locks := depsfile.NewLocks() - locks.SetProvider( - addrs.NewDefaultProvider("test2"), - getproviders.MustParseVersion("1.2.3"), - nil, - nil, - ) - locks.SetProvider( - addrs.NewDefaultProvider("test1"), - getproviders.MustParseVersion("7.8.9-beta.2"), - nil, - nil, - ) - - // `terraform version -json` with prerelease and provider dependencies - c = &VersionCommand{ - Meta: meta, - Version: "4.5.6", - VersionPrerelease: "foo", - Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, - } - if err := c.replaceLockedDependencies(locks); err != nil { - t.Fatal(err) - } - if code := c.Run([]string{"-json"}); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - actual = strings.TrimSpace(ui.OutputWriter.String()) - expected = strings.TrimSpace(` -{ - "terraform_version": "4.5.6-foo", - "platform": "aros_riscv64", - "provider_selections": { - "registry.terraform.io/hashicorp/test1": "7.8.9-beta.2", - "registry.terraform.io/hashicorp/test2": "1.2.3" - }, - "terraform_outdated": false -} -`) - if diff := cmp.Diff(expected, actual); diff != "" { - t.Fatalf("wrong output\n%s", diff) - } - -} - -func TestVersion_jsonoutdated(t *testing.T) { - ui := new(cli.MockUi) - m := Meta{ - Ui: ui, - } - - c := &VersionCommand{ - Meta: m, - Version: "4.5.6", - CheckFunc: mockVersionCheckFunc(true, "4.5.7"), - Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, - } - - if code := c.Run([]string{"-json"}); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := "{\n \"terraform_version\": \"4.5.6\",\n \"platform\": \"aros_riscv64\",\n \"provider_selections\": {},\n \"terraform_outdated\": true\n}" - if actual != expected { - t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func mockVersionCheckFunc(outdated bool, latest string) VersionCheckFunc { - return func() (VersionCheckInfo, error) { - return VersionCheckInfo{ - Outdated: outdated, - Latest: latest, - // Alerts is not used by version command - }, nil - } -} diff --git a/internal/command/views/apply.go b/internal/command/views/apply.go deleted file mode 100644 index ec07f6ad9ad0..000000000000 --- a/internal/command/views/apply.go +++ /dev/null @@ -1,162 +0,0 @@ -package views - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// The Apply view is used for the apply command. -type Apply interface { - ResourceCount(stateOutPath string) - Outputs(outputValues map[string]*states.OutputValue) - - Operation() Operation - Hooks() []terraform.Hook - - Diagnostics(diags tfdiags.Diagnostics) - HelpPrompt() -} - -// NewApply returns an initialized Apply implementation for the given ViewType. -func NewApply(vt arguments.ViewType, destroy bool, view *View) Apply { - switch vt { - case arguments.ViewJSON: - return &ApplyJSON{ - view: NewJSONView(view), - destroy: destroy, - countHook: &countHook{}, - } - case arguments.ViewHuman: - return &ApplyHuman{ - view: view, - destroy: destroy, - inAutomation: view.RunningInAutomation(), - countHook: &countHook{}, - } - default: - panic(fmt.Sprintf("unknown view type %v", vt)) - } -} - -// The ApplyHuman implementation renders human-readable text logs, suitable for -// a scrolling terminal. -type ApplyHuman struct { - view *View - - destroy bool - inAutomation bool - - countHook *countHook -} - -var _ Apply = (*ApplyHuman)(nil) - -func (v *ApplyHuman) ResourceCount(stateOutPath string) { - if v.destroy { - v.view.streams.Printf( - v.view.colorize.Color("[reset][bold][green]\nDestroy complete! Resources: %d destroyed.\n"), - v.countHook.Removed, - ) - } else { - v.view.streams.Printf( - v.view.colorize.Color("[reset][bold][green]\nApply complete! Resources: %d added, %d changed, %d destroyed.\n"), - v.countHook.Added, - v.countHook.Changed, - v.countHook.Removed, - ) - } - if (v.countHook.Added > 0 || v.countHook.Changed > 0) && stateOutPath != "" { - v.view.streams.Printf("\n%s\n\n", format.WordWrap(stateOutPathPostApply, v.view.outputColumns())) - v.view.streams.Printf("State path: %s\n", stateOutPath) - } -} - -func (v *ApplyHuman) Outputs(outputValues map[string]*states.OutputValue) { - if len(outputValues) > 0 { - v.view.streams.Print(v.view.colorize.Color("[reset][bold][green]\nOutputs:\n\n")) - NewOutput(arguments.ViewHuman, v.view).Output("", outputValues) - } -} - -func (v *ApplyHuman) Operation() Operation { - return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) -} - -func (v *ApplyHuman) Hooks() []terraform.Hook { - return []terraform.Hook{ - v.countHook, - NewUiHook(v.view), - } -} - -func (v *ApplyHuman) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -func (v *ApplyHuman) HelpPrompt() { - command := "apply" - if v.destroy { - command = "destroy" - } - v.view.HelpPrompt(command) -} - -const stateOutPathPostApply = "The state of your infrastructure has been saved to the path below. This state is required to modify and destroy your infrastructure, so keep it safe. To inspect the complete state use the `terraform show` command." - -// The ApplyJSON implementation renders streaming JSON logs, suitable for -// integrating with other software. -type ApplyJSON struct { - view *JSONView - - destroy bool - - countHook *countHook -} - -var _ Apply = (*ApplyJSON)(nil) - -func (v *ApplyJSON) ResourceCount(stateOutPath string) { - operation := json.OperationApplied - if v.destroy { - operation = json.OperationDestroyed - } - v.view.ChangeSummary(&json.ChangeSummary{ - Add: v.countHook.Added, - Change: v.countHook.Changed, - Remove: v.countHook.Removed, - Operation: operation, - }) -} - -func (v *ApplyJSON) Outputs(outputValues map[string]*states.OutputValue) { - outputs, diags := json.OutputsFromMap(outputValues) - if diags.HasErrors() { - v.Diagnostics(diags) - } else { - v.view.Outputs(outputs) - } -} - -func (v *ApplyJSON) Operation() Operation { - return &OperationJSON{view: v.view} -} - -func (v *ApplyJSON) Hooks() []terraform.Hook { - return []terraform.Hook{ - v.countHook, - newJSONHook(v.view), - } -} - -func (v *ApplyJSON) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -func (v *ApplyJSON) HelpPrompt() { -} diff --git a/internal/command/views/apply_test.go b/internal/command/views/apply_test.go deleted file mode 100644 index d8bc71c80aab..000000000000 --- a/internal/command/views/apply_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package views - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/zclconf/go-cty/cty" -) - -// This test is mostly because I am paranoid about having two consecutive -// boolean arguments. -func TestApply_new(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - defer done(t) - v := NewApply(arguments.ViewHuman, false, NewView(streams).SetRunningInAutomation(true)) - hv, ok := v.(*ApplyHuman) - if !ok { - t.Fatalf("unexpected return type %t", v) - } - - if hv.destroy != false { - t.Fatalf("unexpected destroy value") - } - - if hv.inAutomation != true { - t.Fatalf("unexpected inAutomation value") - } -} - -// Basic test coverage of Outputs, since most of its functionality is tested -// elsewhere. -func TestApplyHuman_outputs(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewApply(arguments.ViewHuman, false, NewView(streams)) - - v.Outputs(map[string]*states.OutputValue{ - "foo": {Value: cty.StringVal("secret")}, - }) - - got := done(t).Stdout() - for _, want := range []string{"Outputs:", `foo = "secret"`} { - if !strings.Contains(got, want) { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } - } -} - -// Outputs should do nothing if there are no outputs to render. -func TestApplyHuman_outputsEmpty(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewApply(arguments.ViewHuman, false, NewView(streams)) - - v.Outputs(map[string]*states.OutputValue{}) - - got := done(t).Stdout() - if got != "" { - t.Errorf("output should be empty, but got: %q", got) - } -} - -// Ensure that the correct view type and in-automation settings propagate to the -// Operation view. -func TestApplyHuman_operation(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - defer done(t) - v := NewApply(arguments.ViewHuman, false, NewView(streams).SetRunningInAutomation(true)).Operation() - if hv, ok := v.(*OperationHuman); !ok { - t.Fatalf("unexpected return type %t", v) - } else if hv.inAutomation != true { - t.Fatalf("unexpected inAutomation value on Operation view") - } -} - -// This view is used for both apply and destroy commands, so the help output -// needs to cover both. -func TestApplyHuman_help(t *testing.T) { - testCases := map[string]bool{ - "apply": false, - "destroy": true, - } - - for name, destroy := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewApply(arguments.ViewHuman, destroy, NewView(streams)) - v.HelpPrompt() - got := done(t).Stderr() - if !strings.Contains(got, name) { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, name) - } - }) - } -} - -// Hooks and ResourceCount are tangled up and easiest to test together. -func TestApply_resourceCount(t *testing.T) { - testCases := map[string]struct { - destroy bool - want string - }{ - "apply": { - false, - "Apply complete! Resources: 1 added, 2 changed, 3 destroyed.", - }, - "destroy": { - true, - "Destroy complete! Resources: 3 destroyed.", - }, - } - - // For compatibility reasons, these tests should hold true for both human - // and JSON output modes - views := []arguments.ViewType{arguments.ViewHuman, arguments.ViewJSON} - - for name, tc := range testCases { - for _, viewType := range views { - t.Run(fmt.Sprintf("%s (%s view)", name, viewType), func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewApply(viewType, tc.destroy, NewView(streams)) - hooks := v.Hooks() - - var count *countHook - for _, hook := range hooks { - if ch, ok := hook.(*countHook); ok { - count = ch - } - } - if count == nil { - t.Fatalf("expected Hooks to include a countHook: %#v", hooks) - } - - count.Added = 1 - count.Changed = 2 - count.Removed = 3 - - v.ResourceCount("") - - got := done(t).Stdout() - if !strings.Contains(got, tc.want) { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, tc.want) - } - }) - } - } -} - -func TestApplyHuman_resourceCountStatePath(t *testing.T) { - testCases := map[string]struct { - added int - changed int - removed int - statePath string - wantContains bool - }{ - "default state path": { - added: 1, - changed: 2, - removed: 3, - statePath: "", - wantContains: false, - }, - "only removed": { - added: 0, - changed: 0, - removed: 5, - statePath: "foo.tfstate", - wantContains: false, - }, - "added": { - added: 5, - changed: 0, - removed: 0, - statePath: "foo.tfstate", - wantContains: true, - }, - "changed": { - added: 0, - changed: 5, - removed: 0, - statePath: "foo.tfstate", - wantContains: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewApply(arguments.ViewHuman, false, NewView(streams)) - hooks := v.Hooks() - - var count *countHook - for _, hook := range hooks { - if ch, ok := hook.(*countHook); ok { - count = ch - } - } - if count == nil { - t.Fatalf("expected Hooks to include a countHook: %#v", hooks) - } - - count.Added = tc.added - count.Changed = tc.changed - count.Removed = tc.removed - - v.ResourceCount(tc.statePath) - - got := done(t).Stdout() - want := "State path: " + tc.statePath - contains := strings.Contains(got, want) - if contains && !tc.wantContains { - t.Errorf("wrong result\ngot: %q\nshould not contain: %q", got, want) - } else if !contains && tc.wantContains { - t.Errorf("wrong result\ngot: %q\nshould contain: %q", got, want) - } - }) - } -} - -// Basic test coverage of Outputs, since most of its functionality is tested -// elsewhere. -func TestApplyJSON_outputs(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewApply(arguments.ViewJSON, false, NewView(streams)) - - v.Outputs(map[string]*states.OutputValue{ - "boop_count": {Value: cty.NumberIntVal(92)}, - "password": {Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), Sensitive: true}, - }) - - want := []map[string]interface{}{ - { - "@level": "info", - "@message": "Outputs: 2", - "@module": "terraform.ui", - "type": "outputs", - "outputs": map[string]interface{}{ - "boop_count": map[string]interface{}{ - "sensitive": false, - "value": float64(92), - "type": "number", - }, - "password": map[string]interface{}{ - "sensitive": true, - "type": "string", - }, - }, - }, - } - testJSONViewOutputEquals(t, done(t).Stdout(), want) -} diff --git a/internal/command/views/json/change.go b/internal/command/views/json/change.go deleted file mode 100644 index 60439e509017..000000000000 --- a/internal/command/views/json/change.go +++ /dev/null @@ -1,122 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/plans" -) - -func NewResourceInstanceChange(change *plans.ResourceInstanceChangeSrc) *ResourceInstanceChange { - c := &ResourceInstanceChange{ - Resource: newResourceAddr(change.Addr), - Action: changeAction(change.Action), - Reason: changeReason(change.ActionReason), - } - if !change.Addr.Equal(change.PrevRunAddr) { - if c.Action == ActionNoOp { - c.Action = ActionMove - } - pr := newResourceAddr(change.PrevRunAddr) - c.PreviousResource = &pr - } - - return c -} - -type ResourceInstanceChange struct { - Resource ResourceAddr `json:"resource"` - PreviousResource *ResourceAddr `json:"previous_resource,omitempty"` - Action ChangeAction `json:"action"` - Reason ChangeReason `json:"reason,omitempty"` -} - -func (c *ResourceInstanceChange) String() string { - return fmt.Sprintf("%s: Plan to %s", c.Resource.Addr, c.Action) -} - -type ChangeAction string - -const ( - ActionNoOp ChangeAction = "noop" - ActionMove ChangeAction = "move" - ActionCreate ChangeAction = "create" - ActionRead ChangeAction = "read" - ActionUpdate ChangeAction = "update" - ActionReplace ChangeAction = "replace" - ActionDelete ChangeAction = "delete" -) - -func changeAction(action plans.Action) ChangeAction { - switch action { - case plans.NoOp: - return ActionNoOp - case plans.Create: - return ActionCreate - case plans.Read: - return ActionRead - case plans.Update: - return ActionUpdate - case plans.DeleteThenCreate, plans.CreateThenDelete: - return ActionReplace - case plans.Delete: - return ActionDelete - default: - return ActionNoOp - } -} - -type ChangeReason string - -const ( - ReasonNone ChangeReason = "" - ReasonTainted ChangeReason = "tainted" - ReasonRequested ChangeReason = "requested" - ReasonReplaceTriggeredBy ChangeReason = "replace_triggered_by" - ReasonCannotUpdate ChangeReason = "cannot_update" - ReasonUnknown ChangeReason = "unknown" - - ReasonDeleteBecauseNoResourceConfig ChangeReason = "delete_because_no_resource_config" - ReasonDeleteBecauseWrongRepetition ChangeReason = "delete_because_wrong_repetition" - ReasonDeleteBecauseCountIndex ChangeReason = "delete_because_count_index" - ReasonDeleteBecauseEachKey ChangeReason = "delete_because_each_key" - ReasonDeleteBecauseNoModule ChangeReason = "delete_because_no_module" - ReasonDeleteBecauseNoMoveTarget ChangeReason = "delete_because_no_move_target" - ReasonReadBecauseConfigUnknown ChangeReason = "read_because_config_unknown" - ReasonReadBecauseDependencyPending ChangeReason = "read_because_dependency_pending" -) - -func changeReason(reason plans.ResourceInstanceChangeActionReason) ChangeReason { - switch reason { - case plans.ResourceInstanceChangeNoReason: - return ReasonNone - case plans.ResourceInstanceReplaceBecauseTainted: - return ReasonTainted - case plans.ResourceInstanceReplaceByRequest: - return ReasonRequested - case plans.ResourceInstanceReplaceBecauseCannotUpdate: - return ReasonCannotUpdate - case plans.ResourceInstanceReplaceByTriggers: - return ReasonReplaceTriggeredBy - case plans.ResourceInstanceDeleteBecauseNoResourceConfig: - return ReasonDeleteBecauseNoResourceConfig - case plans.ResourceInstanceDeleteBecauseWrongRepetition: - return ReasonDeleteBecauseWrongRepetition - case plans.ResourceInstanceDeleteBecauseCountIndex: - return ReasonDeleteBecauseCountIndex - case plans.ResourceInstanceDeleteBecauseEachKey: - return ReasonDeleteBecauseEachKey - case plans.ResourceInstanceDeleteBecauseNoModule: - return ReasonDeleteBecauseNoModule - case plans.ResourceInstanceReadBecauseConfigUnknown: - return ReasonReadBecauseConfigUnknown - case plans.ResourceInstanceDeleteBecauseNoMoveTarget: - return ReasonDeleteBecauseNoMoveTarget - case plans.ResourceInstanceReadBecauseDependencyPending: - return ReasonReadBecauseDependencyPending - default: - // This should never happen, but there's no good way to guarantee - // exhaustive handling of the enum, so a generic fall back is better - // than a misleading result or a panic - return ReasonUnknown - } -} diff --git a/internal/command/views/json/diagnostic.go b/internal/command/views/json/diagnostic.go deleted file mode 100644 index 1175792c72a2..000000000000 --- a/internal/command/views/json/diagnostic.go +++ /dev/null @@ -1,490 +0,0 @@ -package json - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcled" - "github.com/hashicorp/hcl/v2/hclparse" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// These severities map to the tfdiags.Severity values, plus an explicit -// unknown in case that enum grows without us noticing here. -const ( - DiagnosticSeverityUnknown = "unknown" - DiagnosticSeverityError = "error" - DiagnosticSeverityWarning = "warning" -) - -// Diagnostic represents any tfdiags.Diagnostic value. The simplest form has -// just a severity, single line summary, and optional detail. If there is more -// information about the source of the diagnostic, this is represented in the -// range field. -type Diagnostic struct { - Severity string `json:"severity"` - Summary string `json:"summary"` - Detail string `json:"detail"` - Address string `json:"address,omitempty"` - Range *DiagnosticRange `json:"range,omitempty"` - Snippet *DiagnosticSnippet `json:"snippet,omitempty"` -} - -// Pos represents a position in the source code. -type Pos struct { - // Line is a one-based count for the line in the indicated file. - Line int `json:"line"` - - // Column is a one-based count of Unicode characters from the start of the line. - Column int `json:"column"` - - // Byte is a zero-based offset into the indicated file. - Byte int `json:"byte"` -} - -// DiagnosticRange represents the filename and position of the diagnostic -// subject. This defines the range of the source to be highlighted in the -// output. Note that the snippet may include additional surrounding source code -// if the diagnostic has a context range. -// -// The Start position is inclusive, and the End position is exclusive. Exact -// positions are intended for highlighting for human interpretation only and -// are subject to change. -type DiagnosticRange struct { - Filename string `json:"filename"` - Start Pos `json:"start"` - End Pos `json:"end"` -} - -// DiagnosticSnippet represents source code information about the diagnostic. -// It is possible for a diagnostic to have a source (and therefore a range) but -// no source code can be found. In this case, the range field will be present and -// the snippet field will not. -type DiagnosticSnippet struct { - // Context is derived from HCL's hcled.ContextString output. This gives a - // high-level summary of the root context of the diagnostic: for example, - // the resource block in which an expression causes an error. - Context *string `json:"context"` - - // Code is a possibly-multi-line string of Terraform configuration, which - // includes both the diagnostic source and any relevant context as defined - // by the diagnostic. - Code string `json:"code"` - - // StartLine is the line number in the source file for the first line of - // the snippet code block. This is not necessarily the same as the value of - // Range.Start.Line, as it is possible to have zero or more lines of - // context source code before the diagnostic range starts. - StartLine int `json:"start_line"` - - // HighlightStartOffset is the character offset into Code at which the - // diagnostic source range starts, which ought to be highlighted as such by - // the consumer of this data. - HighlightStartOffset int `json:"highlight_start_offset"` - - // HighlightEndOffset is the character offset into Code at which the - // diagnostic source range ends. - HighlightEndOffset int `json:"highlight_end_offset"` - - // Values is a sorted slice of expression values which may be useful in - // understanding the source of an error in a complex expression. - Values []DiagnosticExpressionValue `json:"values"` - - // FunctionCall is information about a function call whose failure is - // being reported by this diagnostic, if any. - FunctionCall *DiagnosticFunctionCall `json:"function_call,omitempty"` -} - -// DiagnosticExpressionValue represents an HCL traversal string (e.g. -// "var.foo") and a statement about its value while the expression was -// evaluated (e.g. "is a string", "will be known only after apply"). These are -// intended to help the consumer diagnose why an expression caused a diagnostic -// to be emitted. -type DiagnosticExpressionValue struct { - Traversal string `json:"traversal"` - Statement string `json:"statement"` -} - -// DiagnosticFunctionCall represents a function call whose information is -// being included as part of a diagnostic snippet. -type DiagnosticFunctionCall struct { - // CalledAs is the full name that was used to call this function, - // potentially including namespace prefixes if the function does not belong - // to the default function namespace. - CalledAs string `json:"called_as"` - - // Signature is a description of the signature of the function that was - // called, if any. Might be omitted if we're reporting that a call failed - // because the given function name isn't known, for example. - Signature *Function `json:"signature,omitempty"` -} - -// NewDiagnostic takes a tfdiags.Diagnostic and a map of configuration sources, -// and returns a Diagnostic struct. -func NewDiagnostic(diag tfdiags.Diagnostic, sources map[string][]byte) *Diagnostic { - var sev string - switch diag.Severity() { - case tfdiags.Error: - sev = DiagnosticSeverityError - case tfdiags.Warning: - sev = DiagnosticSeverityWarning - default: - sev = DiagnosticSeverityUnknown - } - - desc := diag.Description() - - diagnostic := &Diagnostic{ - Severity: sev, - Summary: desc.Summary, - Detail: desc.Detail, - Address: desc.Address, - } - - sourceRefs := diag.Source() - if sourceRefs.Subject != nil { - // We'll borrow HCL's range implementation here, because it has some - // handy features to help us produce a nice source code snippet. - highlightRange := sourceRefs.Subject.ToHCL() - - // Some diagnostic sources fail to set the end of the subject range. - if highlightRange.End == (hcl.Pos{}) { - highlightRange.End = highlightRange.Start - } - - snippetRange := highlightRange - if sourceRefs.Context != nil { - snippetRange = sourceRefs.Context.ToHCL() - } - - // Make sure the snippet includes the highlight. This should be true - // for any reasonable diagnostic, but we'll make sure. - snippetRange = hcl.RangeOver(snippetRange, highlightRange) - - // Empty ranges result in odd diagnostic output, so extend the end to - // ensure there's at least one byte in the snippet or highlight. - if snippetRange.Empty() { - snippetRange.End.Byte++ - snippetRange.End.Column++ - } - if highlightRange.Empty() { - highlightRange.End.Byte++ - highlightRange.End.Column++ - } - - diagnostic.Range = &DiagnosticRange{ - Filename: highlightRange.Filename, - Start: Pos{ - Line: highlightRange.Start.Line, - Column: highlightRange.Start.Column, - Byte: highlightRange.Start.Byte, - }, - End: Pos{ - Line: highlightRange.End.Line, - Column: highlightRange.End.Column, - Byte: highlightRange.End.Byte, - }, - } - - var src []byte - if sources != nil { - src = sources[highlightRange.Filename] - } - - // If we have a source file for the diagnostic, we can emit a code - // snippet. - if src != nil { - diagnostic.Snippet = &DiagnosticSnippet{ - StartLine: snippetRange.Start.Line, - - // Ensure that the default Values struct is an empty array, as this - // makes consuming the JSON structure easier in most languages. - Values: []DiagnosticExpressionValue{}, - } - - file, offset := parseRange(src, highlightRange) - - // Some diagnostics may have a useful top-level context to add to - // the code snippet output. - contextStr := hcled.ContextString(file, offset-1) - if contextStr != "" { - diagnostic.Snippet.Context = &contextStr - } - - // Build the string of the code snippet, tracking at which byte of - // the file the snippet starts. - var codeStartByte int - sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) - var code strings.Builder - for sc.Scan() { - lineRange := sc.Range() - if lineRange.Overlaps(snippetRange) { - if codeStartByte == 0 && code.Len() == 0 { - codeStartByte = lineRange.Start.Byte - } - code.Write(lineRange.SliceBytes(src)) - code.WriteRune('\n') - } - } - codeStr := strings.TrimSuffix(code.String(), "\n") - diagnostic.Snippet.Code = codeStr - - // Calculate the start and end byte of the highlight range relative - // to the code snippet string. - start := highlightRange.Start.Byte - codeStartByte - end := start + (highlightRange.End.Byte - highlightRange.Start.Byte) - - // We can end up with some quirky results here in edge cases like - // when a source range starts or ends at a newline character, - // so we'll cap the results at the bounds of the highlight range - // so that consumers of this data don't need to contend with - // out-of-bounds errors themselves. - if start < 0 { - start = 0 - } else if start > len(codeStr) { - start = len(codeStr) - } - if end < 0 { - end = 0 - } else if end > len(codeStr) { - end = len(codeStr) - } - - diagnostic.Snippet.HighlightStartOffset = start - diagnostic.Snippet.HighlightEndOffset = end - - if fromExpr := diag.FromExpr(); fromExpr != nil { - // We may also be able to generate information about the dynamic - // values of relevant variables at the point of evaluation, then. - // This is particularly useful for expressions that get evaluated - // multiple times with different values, such as blocks using - // "count" and "for_each", or within "for" expressions. - expr := fromExpr.Expression - ctx := fromExpr.EvalContext - vars := expr.Variables() - values := make([]DiagnosticExpressionValue, 0, len(vars)) - seen := make(map[string]struct{}, len(vars)) - includeUnknown := tfdiags.DiagnosticCausedByUnknown(diag) - includeSensitive := tfdiags.DiagnosticCausedBySensitive(diag) - Traversals: - for _, traversal := range vars { - for len(traversal) > 1 { - val, diags := traversal.TraverseAbs(ctx) - if diags.HasErrors() { - // Skip anything that generates errors, since we probably - // already have the same error in our diagnostics set - // already. - traversal = traversal[:len(traversal)-1] - continue - } - - traversalStr := traversalStr(traversal) - if _, exists := seen[traversalStr]; exists { - continue Traversals // don't show duplicates when the same variable is referenced multiple times - } - value := DiagnosticExpressionValue{ - Traversal: traversalStr, - } - switch { - case val.HasMark(marks.Sensitive): - // We only mention a sensitive value if the diagnostic - // we're rendering is explicitly marked as being - // caused by sensitive values, because otherwise - // readers tend to be misled into thinking the error - // is caused by the sensitive value even when it isn't. - if !includeSensitive { - continue Traversals - } - // Even when we do mention one, we keep it vague - // in order to minimize the chance of giving away - // whatever was sensitive about it. - value.Statement = "has a sensitive value" - case !val.IsKnown(): - // We'll avoid saying anything about unknown or - // "known after apply" unless the diagnostic is - // explicitly marked as being caused by unknown - // values, because otherwise readers tend to be - // misled into thinking the error is caused by the - // unknown value even when it isn't. - if ty := val.Type(); ty != cty.DynamicPseudoType { - if includeUnknown { - value.Statement = fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName()) - } else { - value.Statement = fmt.Sprintf("is a %s", ty.FriendlyName()) - } - } else { - if !includeUnknown { - continue Traversals - } - value.Statement = "will be known only after apply" - } - default: - value.Statement = fmt.Sprintf("is %s", compactValueStr(val)) - } - values = append(values, value) - seen[traversalStr] = struct{}{} - } - } - sort.Slice(values, func(i, j int) bool { - return values[i].Traversal < values[j].Traversal - }) - diagnostic.Snippet.Values = values - - if callInfo := tfdiags.ExtraInfo[hclsyntax.FunctionCallDiagExtra](diag); callInfo != nil && callInfo.CalledFunctionName() != "" { - calledAs := callInfo.CalledFunctionName() - baseName := calledAs - if idx := strings.LastIndex(baseName, "::"); idx >= 0 { - baseName = baseName[idx+2:] - } - callInfo := &DiagnosticFunctionCall{ - CalledAs: calledAs, - } - if f, ok := ctx.Functions[calledAs]; ok { - callInfo.Signature = DescribeFunction(baseName, f) - } - diagnostic.Snippet.FunctionCall = callInfo - } - - } - - } - } - - return diagnostic -} - -func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { - filename := rng.Filename - offset := rng.Start.Byte - - // We need to re-parse here to get a *hcl.File we can interrogate. This - // is not awesome since we presumably already parsed the file earlier too, - // but this re-parsing is architecturally simpler than retaining all of - // the hcl.File objects and we only do this in the case of an error anyway - // so the overhead here is not a big problem. - parser := hclparse.NewParser() - var file *hcl.File - - // Ignore diagnostics here as there is nothing we can do with them. - if strings.HasSuffix(filename, ".json") { - file, _ = parser.ParseJSON(src, filename) - } else { - file, _ = parser.ParseHCL(src, filename) - } - - return file, offset -} - -// compactValueStr produces a compact, single-line summary of a given value -// that is suitable for display in the UI. -// -// For primitives it returns a full representation, while for more complex -// types it instead summarizes the type, size, etc to produce something -// that is hopefully still somewhat useful but not as verbose as a rendering -// of the entire data structure. -func compactValueStr(val cty.Value) string { - // This is a specialized subset of value rendering tailored to producing - // helpful but concise messages in diagnostics. It is not comprehensive - // nor intended to be used for other purposes. - - if val.HasMark(marks.Sensitive) { - // We check this in here just to make sure, but note that the caller - // of compactValueStr ought to have already checked this and skipped - // calling into compactValueStr anyway, so this shouldn't actually - // be reachable. - return "(sensitive value)" - } - - // WARNING: We've only checked that the value isn't sensitive _shallowly_ - // here, and so we must never show any element values from complex types - // in here. However, it's fine to show map keys and attribute names because - // those are never sensitive in isolation: the entire value would be - // sensitive in that case. - - ty := val.Type() - switch { - case val.IsNull(): - return "null" - case !val.IsKnown(): - // Should never happen here because we should filter before we get - // in here, but we'll do something reasonable rather than panic. - return "(not yet known)" - case ty == cty.Bool: - if val.True() { - return "true" - } - return "false" - case ty == cty.Number: - bf := val.AsBigFloat() - return bf.Text('g', 10) - case ty == cty.String: - // Go string syntax is not exactly the same as HCL native string syntax, - // but we'll accept the minor edge-cases where this is different here - // for now, just to get something reasonable here. - return fmt.Sprintf("%q", val.AsString()) - case ty.IsCollectionType() || ty.IsTupleType(): - l := val.LengthInt() - switch l { - case 0: - return "empty " + ty.FriendlyName() - case 1: - return ty.FriendlyName() + " with 1 element" - default: - return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) - } - case ty.IsObjectType(): - atys := ty.AttributeTypes() - l := len(atys) - switch l { - case 0: - return "object with no attributes" - case 1: - var name string - for k := range atys { - name = k - } - return fmt.Sprintf("object with 1 attribute %q", name) - default: - return fmt.Sprintf("object with %d attributes", l) - } - default: - return ty.FriendlyName() - } -} - -// traversalStr produces a representation of an HCL traversal that is compact, -// resembles HCL native syntax, and is suitable for display in the UI. -func traversalStr(traversal hcl.Traversal) string { - // This is a specialized subset of traversal rendering tailored to - // producing helpful contextual messages in diagnostics. It is not - // comprehensive nor intended to be used for other purposes. - - var buf bytes.Buffer - for _, step := range traversal { - switch tStep := step.(type) { - case hcl.TraverseRoot: - buf.WriteString(tStep.Name) - case hcl.TraverseAttr: - buf.WriteByte('.') - buf.WriteString(tStep.Name) - case hcl.TraverseIndex: - buf.WriteByte('[') - if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { - buf.WriteString(compactValueStr(tStep.Key)) - } else { - // We'll just use a placeholder for more complex values, - // since otherwise our result could grow ridiculously long. - buf.WriteString("...") - } - buf.WriteByte(']') - } - } - return buf.String() -} diff --git a/internal/command/views/json/diagnostic_test.go b/internal/command/views/json/diagnostic_test.go deleted file mode 100644 index 422dade9b3cb..000000000000 --- a/internal/command/views/json/diagnostic_test.go +++ /dev/null @@ -1,951 +0,0 @@ -package json - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -func TestNewDiagnostic(t *testing.T) { - // Common HCL for diags with source ranges. This does not have any real - // semantic errors, but we can synthesize fake HCL errors which will - // exercise the diagnostic rendering code using this - sources := map[string][]byte{ - "test.tf": []byte(`resource "test_resource" "test" { - foo = var.boop["hello!"] - bar = { - baz = maybe - } -} -`), - "short.tf": []byte("bad source code"), - "odd-comment.tf": []byte("foo\n\n#\n"), - "values.tf": []byte(`[ - var.a, - var.b, - var.c, - var.d, - var.e, - var.f, - var.g, - var.h, - var.i, - var.j, - var.k, -] -`), - } - testCases := map[string]struct { - diag interface{} // allow various kinds of diags - want *Diagnostic - }{ - "sourceless warning": { - tfdiags.Sourceless( - tfdiags.Warning, - "Oh no", - "Something is broken", - ), - &Diagnostic{ - Severity: "warning", - Summary: "Oh no", - Detail: "Something is broken", - }, - }, - "error with source code unavailable": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Bad news", - Detail: "It went wrong", - Subject: &hcl.Range{ - Filename: "modules/oops/missing.tf", - Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, - End: hcl.Pos{Line: 2, Column: 12, Byte: 33}, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "Bad news", - Detail: "It went wrong", - Range: &DiagnosticRange{ - Filename: "modules/oops/missing.tf", - Start: Pos{ - Line: 1, - Column: 6, - Byte: 5, - }, - End: Pos{ - Line: 2, - Column: 12, - Byte: 33, - }, - }, - }, - }, - "error with source code subject": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Tiny explosion", - Detail: "Unexpected detonation while parsing", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 10, Byte: 9}, - End: hcl.Pos{Line: 1, Column: 25, Byte: 24}, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "Tiny explosion", - Detail: "Unexpected detonation while parsing", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 1, - Column: 10, - Byte: 9, - }, - End: Pos{ - Line: 1, - Column: 25, - Byte: 24, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: `resource "test_resource" "test" {`, - StartLine: 1, - HighlightStartOffset: 9, - HighlightEndOffset: 24, - Values: []DiagnosticExpressionValue{}, - }, - }, - }, - "error with source code subject but no context": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Nonsense input", - Detail: "What you wrote makes no sense", - Subject: &hcl.Range{ - Filename: "short.tf", - Start: hcl.Pos{Line: 1, Column: 5, Byte: 4}, - End: hcl.Pos{Line: 1, Column: 10, Byte: 9}, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "Nonsense input", - Detail: "What you wrote makes no sense", - Range: &DiagnosticRange{ - Filename: "short.tf", - Start: Pos{ - Line: 1, - Column: 5, - Byte: 4, - }, - End: Pos{ - Line: 1, - Column: 10, - Byte: 9, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: nil, - Code: (`bad source code`), - StartLine: (1), - HighlightStartOffset: (4), - HighlightEndOffset: (9), - Values: []DiagnosticExpressionValue{}, - }, - }, - }, - "error with multi-line snippet": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "In this house we respect booleans", - Detail: "True or false, there is no maybe", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 4, Column: 11, Byte: 81}, - End: hcl.Pos{Line: 4, Column: 16, Byte: 86}, - }, - Context: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 3, Column: 3, Byte: 63}, - End: hcl.Pos{Line: 5, Column: 4, Byte: 90}, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "In this house we respect booleans", - Detail: "True or false, there is no maybe", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 4, - Column: 11, - Byte: 81, - }, - End: Pos{ - Line: 4, - Column: 16, - Byte: 86, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: " bar = {\n baz = maybe\n }", - StartLine: 3, - HighlightStartOffset: 20, - HighlightEndOffset: 25, - Values: []DiagnosticExpressionValue{}, - }, - }, - }, - "error with empty highlight range at end of source code": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "You forgot something", - Detail: "Please finish your thought", - Subject: &hcl.Range{ - Filename: "short.tf", - Start: hcl.Pos{Line: 1, Column: 16, Byte: 15}, - End: hcl.Pos{Line: 1, Column: 16, Byte: 15}, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "You forgot something", - Detail: "Please finish your thought", - Range: &DiagnosticRange{ - Filename: "short.tf", - Start: Pos{ - Line: 1, - Column: 16, - Byte: 15, - }, - End: Pos{ - Line: 1, - Column: 17, - Byte: 16, - }, - }, - Snippet: &DiagnosticSnippet{ - Code: ("bad source code"), - StartLine: (1), - HighlightStartOffset: (15), - HighlightEndOffset: (15), - Values: []DiagnosticExpressionValue{}, - }, - }, - }, - "error with unset highlight end position": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "There is no end", - Detail: "But there is a beginning", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 1, Column: 16, Byte: 15}, - End: hcl.Pos{Line: 0, Column: 0, Byte: 0}, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "There is no end", - Detail: "But there is a beginning", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 1, - Column: 16, - Byte: 15, - }, - End: Pos{ - Line: 1, - Column: 17, - Byte: 16, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: `resource "test_resource" "test" {`, - StartLine: 1, - HighlightStartOffset: 15, - HighlightEndOffset: 16, - Values: []DiagnosticExpressionValue{}, - }, - }, - }, - "error whose range starts at a newline": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid newline", - Detail: "How awkward!", - Subject: &hcl.Range{ - Filename: "odd-comment.tf", - Start: hcl.Pos{Line: 2, Column: 5, Byte: 4}, - End: hcl.Pos{Line: 3, Column: 1, Byte: 6}, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "Invalid newline", - Detail: "How awkward!", - Range: &DiagnosticRange{ - Filename: "odd-comment.tf", - Start: Pos{ - Line: 2, - Column: 5, - Byte: 4, - }, - End: Pos{ - Line: 3, - Column: 1, - Byte: 6, - }, - }, - Snippet: &DiagnosticSnippet{ - Code: `#`, - StartLine: 2, - Values: []DiagnosticExpressionValue{}, - - // Due to the range starting at a newline on a blank - // line, we end up stripping off the initial newline - // to produce only a one-line snippet. That would - // therefore cause the start offset to naturally be - // -1, just before the Code we returned, but then we - // force it to zero so that the result will still be - // in range for a byte-oriented slice of Code. - HighlightStartOffset: 0, - HighlightEndOffset: 1, - }, - }, - }, - "error with source code subject and known expression": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, - End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "boop"}, - hcl.TraverseIndex{Key: cty.StringVal("hello!")}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - "boop": cty.MapVal(map[string]cty.Value{ - "hello!": cty.StringVal("bleurgh"), - }), - }), - }, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 2, - Column: 9, - Byte: 42, - }, - End: Pos{ - Line: 2, - Column: 26, - Byte: 59, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: (` foo = var.boop["hello!"]`), - StartLine: (2), - HighlightStartOffset: (8), - HighlightEndOffset: (25), - Values: []DiagnosticExpressionValue{ - { - Traversal: `var.boop["hello!"]`, - Statement: `is "bleurgh"`, - }, - }, - }, - }, - }, - "error with source code subject and expression referring to sensitive value": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, - End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "boop"}, - hcl.TraverseIndex{Key: cty.StringVal("hello!")}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - "boop": cty.MapVal(map[string]cty.Value{ - "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), - }), - }), - }, - }, - Extra: diagnosticCausedBySensitive(true), - }, - &Diagnostic{ - Severity: "error", - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 2, - Column: 9, - Byte: 42, - }, - End: Pos{ - Line: 2, - Column: 26, - Byte: 59, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: (` foo = var.boop["hello!"]`), - StartLine: (2), - HighlightStartOffset: (8), - HighlightEndOffset: (25), - Values: []DiagnosticExpressionValue{ - { - Traversal: `var.boop["hello!"]`, - Statement: `has a sensitive value`, - }, - }, - }, - }, - }, - "error with source code subject and expression referring to sensitive value when not caused by sensitive values": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, - End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "boop"}, - hcl.TraverseIndex{Key: cty.StringVal("hello!")}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - "boop": cty.MapVal(map[string]cty.Value{ - "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), - }), - }), - }, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 2, - Column: 9, - Byte: 42, - }, - End: Pos{ - Line: 2, - Column: 26, - Byte: 59, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: (` foo = var.boop["hello!"]`), - StartLine: (2), - HighlightStartOffset: (8), - HighlightEndOffset: (25), - Values: []DiagnosticExpressionValue{ - // The sensitive value is filtered out because this is - // not a sensitive-value-related diagnostic message. - }, - }, - }, - }, - "error with source code subject and expression referring to a collection containing a sensitive value": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, - End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "boop"}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - "boop": cty.MapVal(map[string]cty.Value{ - "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), - }), - }), - }, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 2, - Column: 9, - Byte: 42, - }, - End: Pos{ - Line: 2, - Column: 26, - Byte: 59, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: (` foo = var.boop["hello!"]`), - StartLine: (2), - HighlightStartOffset: (8), - HighlightEndOffset: (25), - Values: []DiagnosticExpressionValue{ - { - Traversal: `var.boop`, - Statement: `is map of string with 1 element`, - }, - }, - }, - }, - }, - "error with source code subject and unknown string expression": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, - End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "boop"}, - hcl.TraverseIndex{Key: cty.StringVal("hello!")}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - "boop": cty.MapVal(map[string]cty.Value{ - "hello!": cty.UnknownVal(cty.String), - }), - }), - }, - }, - Extra: diagnosticCausedByUnknown(true), - }, - &Diagnostic{ - Severity: "error", - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 2, - Column: 9, - Byte: 42, - }, - End: Pos{ - Line: 2, - Column: 26, - Byte: 59, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: (` foo = var.boop["hello!"]`), - StartLine: (2), - HighlightStartOffset: (8), - HighlightEndOffset: (25), - Values: []DiagnosticExpressionValue{ - { - Traversal: `var.boop["hello!"]`, - Statement: `is a string, known only after apply`, - }, - }, - }, - }, - }, - "error with source code subject and unknown expression of unknown type": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, - End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "boop"}, - hcl.TraverseIndex{Key: cty.StringVal("hello!")}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - "boop": cty.MapVal(map[string]cty.Value{ - "hello!": cty.UnknownVal(cty.DynamicPseudoType), - }), - }), - }, - }, - Extra: diagnosticCausedByUnknown(true), - }, - &Diagnostic{ - Severity: "error", - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 2, - Column: 9, - Byte: 42, - }, - End: Pos{ - Line: 2, - Column: 26, - Byte: 59, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: (` foo = var.boop["hello!"]`), - StartLine: (2), - HighlightStartOffset: (8), - HighlightEndOffset: (25), - Values: []DiagnosticExpressionValue{ - { - Traversal: `var.boop["hello!"]`, - Statement: `will be known only after apply`, - }, - }, - }, - }, - }, - "error with source code subject and unknown expression of unknown type when not caused by unknown values": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Subject: &hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, - End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, - }, - Expression: hcltest.MockExprTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "boop"}, - hcl.TraverseIndex{Key: cty.StringVal("hello!")}, - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - "boop": cty.MapVal(map[string]cty.Value{ - "hello!": cty.UnknownVal(cty.DynamicPseudoType), - }), - }), - }, - }, - }, - &Diagnostic{ - Severity: "error", - Summary: "Wrong noises", - Detail: "Biological sounds are not allowed", - Range: &DiagnosticRange{ - Filename: "test.tf", - Start: Pos{ - Line: 2, - Column: 9, - Byte: 42, - }, - End: Pos{ - Line: 2, - Column: 26, - Byte: 59, - }, - }, - Snippet: &DiagnosticSnippet{ - Context: strPtr(`resource "test_resource" "test"`), - Code: (` foo = var.boop["hello!"]`), - StartLine: (2), - HighlightStartOffset: (8), - HighlightEndOffset: (25), - Values: []DiagnosticExpressionValue{ - // The unknown value is filtered out because this is - // not an unknown-value-related diagnostic message. - }, - }, - }, - }, - "error with source code subject with multiple expression values": { - &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Catastrophic failure", - Detail: "Basically, everything went wrong", - Subject: &hcl.Range{ - Filename: "values.tf", - Start: hcl.Pos{Line: 1, Column: 1, Byte: 0}, - End: hcl.Pos{Line: 13, Column: 2, Byte: 102}, - }, - Expression: hcltest.MockExprList([]hcl.Expression{ - hcltest.MockExprTraversalSrc("var.a"), - hcltest.MockExprTraversalSrc("var.b"), - hcltest.MockExprTraversalSrc("var.c"), - hcltest.MockExprTraversalSrc("var.d"), - hcltest.MockExprTraversalSrc("var.e"), - hcltest.MockExprTraversalSrc("var.f"), - hcltest.MockExprTraversalSrc("var.g"), - hcltest.MockExprTraversalSrc("var.h"), - hcltest.MockExprTraversalSrc("var.i"), - hcltest.MockExprTraversalSrc("var.j"), - hcltest.MockExprTraversalSrc("var.k"), - }), - EvalContext: &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - "a": cty.True, - "b": cty.NumberFloatVal(123.45), - "c": cty.NullVal(cty.String), - "d": cty.StringVal("secret").Mark(marks.Sensitive), - "e": cty.False, - "f": cty.ListValEmpty(cty.String), - "g": cty.MapVal(map[string]cty.Value{ - "boop": cty.StringVal("beep"), - }), - "h": cty.ListVal([]cty.Value{ - cty.StringVal("boop"), - cty.StringVal("beep"), - cty.StringVal("blorp"), - }), - "i": cty.EmptyObjectVal, - "j": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - }), - "k": cty.ObjectVal(map[string]cty.Value{ - "a": cty.True, - "b": cty.False, - }), - }), - }, - }, - Extra: diagnosticCausedBySensitive(true), - }, - &Diagnostic{ - Severity: "error", - Summary: "Catastrophic failure", - Detail: "Basically, everything went wrong", - Range: &DiagnosticRange{ - Filename: "values.tf", - Start: Pos{ - Line: 1, - Column: 1, - Byte: 0, - }, - End: Pos{ - Line: 13, - Column: 2, - Byte: 102, - }, - }, - Snippet: &DiagnosticSnippet{ - Code: `[ - var.a, - var.b, - var.c, - var.d, - var.e, - var.f, - var.g, - var.h, - var.i, - var.j, - var.k, -]`, - StartLine: (1), - HighlightStartOffset: (0), - HighlightEndOffset: (102), - Values: []DiagnosticExpressionValue{ - { - Traversal: `var.a`, - Statement: `is true`, - }, - { - Traversal: `var.b`, - Statement: `is 123.45`, - }, - { - Traversal: `var.c`, - Statement: `is null`, - }, - { - Traversal: `var.d`, - Statement: `has a sensitive value`, - }, - { - Traversal: `var.e`, - Statement: `is false`, - }, - { - Traversal: `var.f`, - Statement: `is empty list of string`, - }, - { - Traversal: `var.g`, - Statement: `is map of string with 1 element`, - }, - { - Traversal: `var.h`, - Statement: `is list of string with 3 elements`, - }, - { - Traversal: `var.i`, - Statement: `is object with no attributes`, - }, - { - Traversal: `var.j`, - Statement: `is object with 1 attribute "foo"`, - }, - { - Traversal: `var.k`, - Statement: `is object with 2 attributes`, - }, - }, - }, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - // Convert the diag into a tfdiags.Diagnostic - var diags tfdiags.Diagnostics - diags = diags.Append(tc.diag) - - got := NewDiagnostic(diags[0], sources) - if !cmp.Equal(tc.want, got) { - t.Fatalf("wrong result\n:%s", cmp.Diff(tc.want, got)) - } - }) - - t.Run(fmt.Sprintf("golden test for %s", name), func(t *testing.T) { - // Convert the diag into a tfdiags.Diagnostic - var diags tfdiags.Diagnostics - diags = diags.Append(tc.diag) - - got := NewDiagnostic(diags[0], sources) - - // Render the diagnostic to indented JSON - gotBytes, err := json.MarshalIndent(got, "", " ") - if err != nil { - t.Fatal(err) - } - - // Compare against the golden reference - filename := path.Join( - "testdata", - "diagnostic", - fmt.Sprintf("%s.json", strings.ReplaceAll(name, " ", "-")), - ) - - // Generate golden reference by uncommenting the next two lines: - // gotBytes = append(gotBytes, '\n') - // os.WriteFile(filename, gotBytes, 0644) - - wantFile, err := os.Open(filename) - if err != nil { - t.Fatalf("failed to open golden file: %s", err) - } - defer wantFile.Close() - wantBytes, err := ioutil.ReadAll(wantFile) - if err != nil { - t.Fatalf("failed to read output file: %s", err) - } - - // Don't care about leading or trailing whitespace - gotString := strings.TrimSpace(string(gotBytes)) - wantString := strings.TrimSpace(string(wantBytes)) - - if !cmp.Equal(wantString, gotString) { - t.Fatalf("wrong result\n:%s", cmp.Diff(wantString, gotString)) - } - }) - } -} - -// Helper function to make constructing literal Diagnostics easier. There -// are fields which are pointer-to-string to ensure that the rendered JSON -// results in `null` for an empty value, rather than `""`. -func strPtr(s string) *string { return &s } - -// diagnosticCausedByUnknown is a testing helper for exercising our logic -// for selectively showing unknown values alongside our source snippets for -// diagnostics that are explicitly marked as being caused by unknown values. -type diagnosticCausedByUnknown bool - -var _ tfdiags.DiagnosticExtraBecauseUnknown = diagnosticCausedByUnknown(true) - -func (e diagnosticCausedByUnknown) DiagnosticCausedByUnknown() bool { - return bool(e) -} - -// diagnosticCausedBySensitive is a testing helper for exercising our logic -// for selectively showing sensitive values alongside our source snippets for -// diagnostics that are explicitly marked as being caused by sensitive values. -type diagnosticCausedBySensitive bool - -var _ tfdiags.DiagnosticExtraBecauseSensitive = diagnosticCausedBySensitive(true) - -func (e diagnosticCausedBySensitive) DiagnosticCausedBySensitive() bool { - return bool(e) -} diff --git a/internal/command/views/json/hook.go b/internal/command/views/json/hook.go deleted file mode 100644 index 142a4d1fd199..000000000000 --- a/internal/command/views/json/hook.go +++ /dev/null @@ -1,376 +0,0 @@ -package json - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" -) - -type Hook interface { - HookType() MessageType - String() string -} - -// ApplyStart: triggered by PreApply hook -type applyStart struct { - Resource ResourceAddr `json:"resource"` - Action ChangeAction `json:"action"` - IDKey string `json:"id_key,omitempty"` - IDValue string `json:"id_value,omitempty"` - actionVerb string -} - -var _ Hook = (*applyStart)(nil) - -func (h *applyStart) HookType() MessageType { - return MessageApplyStart -} - -func (h *applyStart) String() string { - var id string - if h.IDKey != "" && h.IDValue != "" { - id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) - } - return fmt.Sprintf("%s: %s...%s", h.Resource.Addr, h.actionVerb, id) -} - -func NewApplyStart(addr addrs.AbsResourceInstance, action plans.Action, idKey string, idValue string) Hook { - hook := &applyStart{ - Resource: newResourceAddr(addr), - Action: changeAction(action), - IDKey: idKey, - IDValue: idValue, - actionVerb: startActionVerb(action), - } - - return hook -} - -// ApplyProgress: currently triggered by a timer started on PreApply. In -// future, this might also be triggered by provider progress reporting. -type applyProgress struct { - Resource ResourceAddr `json:"resource"` - Action ChangeAction `json:"action"` - Elapsed float64 `json:"elapsed_seconds"` - actionVerb string - elapsed time.Duration -} - -var _ Hook = (*applyProgress)(nil) - -func (h *applyProgress) HookType() MessageType { - return MessageApplyProgress -} - -func (h *applyProgress) String() string { - return fmt.Sprintf("%s: Still %s... [%s elapsed]", h.Resource.Addr, h.actionVerb, h.elapsed) -} - -func NewApplyProgress(addr addrs.AbsResourceInstance, action plans.Action, elapsed time.Duration) Hook { - return &applyProgress{ - Resource: newResourceAddr(addr), - Action: changeAction(action), - Elapsed: elapsed.Seconds(), - actionVerb: progressActionVerb(action), - elapsed: elapsed, - } -} - -// ApplyComplete: triggered by PostApply hook -type applyComplete struct { - Resource ResourceAddr `json:"resource"` - Action ChangeAction `json:"action"` - IDKey string `json:"id_key,omitempty"` - IDValue string `json:"id_value,omitempty"` - Elapsed float64 `json:"elapsed_seconds"` - actionNoun string - elapsed time.Duration -} - -var _ Hook = (*applyComplete)(nil) - -func (h *applyComplete) HookType() MessageType { - return MessageApplyComplete -} - -func (h *applyComplete) String() string { - var id string - if h.IDKey != "" && h.IDValue != "" { - id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) - } - return fmt.Sprintf("%s: %s complete after %s%s", h.Resource.Addr, h.actionNoun, h.elapsed, id) -} - -func NewApplyComplete(addr addrs.AbsResourceInstance, action plans.Action, idKey, idValue string, elapsed time.Duration) Hook { - return &applyComplete{ - Resource: newResourceAddr(addr), - Action: changeAction(action), - IDKey: idKey, - IDValue: idValue, - Elapsed: elapsed.Seconds(), - actionNoun: actionNoun(action), - elapsed: elapsed, - } -} - -// ApplyErrored: triggered by PostApply hook on failure. This will be followed -// by diagnostics when the apply finishes. -type applyErrored struct { - Resource ResourceAddr `json:"resource"` - Action ChangeAction `json:"action"` - Elapsed float64 `json:"elapsed_seconds"` - actionNoun string - elapsed time.Duration -} - -var _ Hook = (*applyErrored)(nil) - -func (h *applyErrored) HookType() MessageType { - return MessageApplyErrored -} - -func (h *applyErrored) String() string { - return fmt.Sprintf("%s: %s errored after %s", h.Resource.Addr, h.actionNoun, h.elapsed) -} - -func NewApplyErrored(addr addrs.AbsResourceInstance, action plans.Action, elapsed time.Duration) Hook { - return &applyErrored{ - Resource: newResourceAddr(addr), - Action: changeAction(action), - Elapsed: elapsed.Seconds(), - actionNoun: actionNoun(action), - elapsed: elapsed, - } -} - -// ProvisionStart: triggered by PreProvisionInstanceStep hook -type provisionStart struct { - Resource ResourceAddr `json:"resource"` - Provisioner string `json:"provisioner"` -} - -var _ Hook = (*provisionStart)(nil) - -func (h *provisionStart) HookType() MessageType { - return MessageProvisionStart -} - -func (h *provisionStart) String() string { - return fmt.Sprintf("%s: Provisioning with '%s'...", h.Resource.Addr, h.Provisioner) -} - -func NewProvisionStart(addr addrs.AbsResourceInstance, provisioner string) Hook { - return &provisionStart{ - Resource: newResourceAddr(addr), - Provisioner: provisioner, - } -} - -// ProvisionProgress: triggered by ProvisionOutput hook -type provisionProgress struct { - Resource ResourceAddr `json:"resource"` - Provisioner string `json:"provisioner"` - Output string `json:"output"` -} - -var _ Hook = (*provisionProgress)(nil) - -func (h *provisionProgress) HookType() MessageType { - return MessageProvisionProgress -} - -func (h *provisionProgress) String() string { - return fmt.Sprintf("%s: (%s): %s", h.Resource.Addr, h.Provisioner, h.Output) -} - -func NewProvisionProgress(addr addrs.AbsResourceInstance, provisioner string, output string) Hook { - return &provisionProgress{ - Resource: newResourceAddr(addr), - Provisioner: provisioner, - Output: output, - } -} - -// ProvisionComplete: triggered by PostProvisionInstanceStep hook -type provisionComplete struct { - Resource ResourceAddr `json:"resource"` - Provisioner string `json:"provisioner"` -} - -var _ Hook = (*provisionComplete)(nil) - -func (h *provisionComplete) HookType() MessageType { - return MessageProvisionComplete -} - -func (h *provisionComplete) String() string { - return fmt.Sprintf("%s: (%s) Provisioning complete", h.Resource.Addr, h.Provisioner) -} - -func NewProvisionComplete(addr addrs.AbsResourceInstance, provisioner string) Hook { - return &provisionComplete{ - Resource: newResourceAddr(addr), - Provisioner: provisioner, - } -} - -// ProvisionErrored: triggered by PostProvisionInstanceStep hook on failure. -// This will be followed by diagnostics when the apply finishes. -type provisionErrored struct { - Resource ResourceAddr `json:"resource"` - Provisioner string `json:"provisioner"` -} - -var _ Hook = (*provisionErrored)(nil) - -func (h *provisionErrored) HookType() MessageType { - return MessageProvisionErrored -} - -func (h *provisionErrored) String() string { - return fmt.Sprintf("%s: (%s) Provisioning errored", h.Resource.Addr, h.Provisioner) -} - -func NewProvisionErrored(addr addrs.AbsResourceInstance, provisioner string) Hook { - return &provisionErrored{ - Resource: newResourceAddr(addr), - Provisioner: provisioner, - } -} - -// RefreshStart: triggered by PreRefresh hook -type refreshStart struct { - Resource ResourceAddr `json:"resource"` - IDKey string `json:"id_key,omitempty"` - IDValue string `json:"id_value,omitempty"` -} - -var _ Hook = (*refreshStart)(nil) - -func (h *refreshStart) HookType() MessageType { - return MessageRefreshStart -} - -func (h *refreshStart) String() string { - var id string - if h.IDKey != "" && h.IDValue != "" { - id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) - } - return fmt.Sprintf("%s: Refreshing state...%s", h.Resource.Addr, id) -} - -func NewRefreshStart(addr addrs.AbsResourceInstance, idKey, idValue string) Hook { - return &refreshStart{ - Resource: newResourceAddr(addr), - IDKey: idKey, - IDValue: idValue, - } -} - -// RefreshComplete: triggered by PostRefresh hook -type refreshComplete struct { - Resource ResourceAddr `json:"resource"` - IDKey string `json:"id_key,omitempty"` - IDValue string `json:"id_value,omitempty"` -} - -var _ Hook = (*refreshComplete)(nil) - -func (h *refreshComplete) HookType() MessageType { - return MessageRefreshComplete -} - -func (h *refreshComplete) String() string { - var id string - if h.IDKey != "" && h.IDValue != "" { - id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) - } - return fmt.Sprintf("%s: Refresh complete%s", h.Resource.Addr, id) -} - -func NewRefreshComplete(addr addrs.AbsResourceInstance, idKey, idValue string) Hook { - return &refreshComplete{ - Resource: newResourceAddr(addr), - IDKey: idKey, - IDValue: idValue, - } -} - -// Convert the subset of plans.Action values we expect to receive into a -// present-tense verb for the applyStart hook message. -func startActionVerb(action plans.Action) string { - switch action { - case plans.Create: - return "Creating" - case plans.Update: - return "Modifying" - case plans.Delete: - return "Destroying" - case plans.Read: - return "Refreshing" - case plans.CreateThenDelete, plans.DeleteThenCreate: - // This is not currently possible to reach, as we receive separate - // passes for create and delete - return "Replacing" - case plans.NoOp: - // This should never be possible: a no-op planned change should not - // be applied. We'll fall back to "Applying". - fallthrough - default: - return "Applying" - } -} - -// Convert the subset of plans.Action values we expect to receive into a -// present-tense verb for the applyProgress hook message. This will be -// prefixed with "Still ", so it is lower-case. -func progressActionVerb(action plans.Action) string { - switch action { - case plans.Create: - return "creating" - case plans.Update: - return "modifying" - case plans.Delete: - return "destroying" - case plans.Read: - return "refreshing" - case plans.CreateThenDelete, plans.DeleteThenCreate: - // This is not currently possible to reach, as we receive separate - // passes for create and delete - return "replacing" - case plans.NoOp: - // This should never be possible: a no-op planned change should not - // be applied. We'll fall back to "applying". - fallthrough - default: - return "applying" - } -} - -// Convert the subset of plans.Action values we expect to receive into a -// noun for the applyComplete and applyErrored hook messages. This will be -// combined into a phrase like "Creation complete after 1m4s". -func actionNoun(action plans.Action) string { - switch action { - case plans.Create: - return "Creation" - case plans.Update: - return "Modifications" - case plans.Delete: - return "Destruction" - case plans.Read: - return "Refresh" - case plans.CreateThenDelete, plans.DeleteThenCreate: - // This is not currently possible to reach, as we receive separate - // passes for create and delete - return "Replacement" - case plans.NoOp: - // This should never be possible: a no-op planned change should not - // be applied. We'll fall back to "Apply". - fallthrough - default: - return "Apply" - } -} diff --git a/internal/command/views/json/output.go b/internal/command/views/json/output.go deleted file mode 100644 index c9648c56260b..000000000000 --- a/internal/command/views/json/output.go +++ /dev/null @@ -1,75 +0,0 @@ -package json - -import ( - "encoding/json" - "fmt" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -type Output struct { - Sensitive bool `json:"sensitive"` - Type json.RawMessage `json:"type,omitempty"` - Value json.RawMessage `json:"value,omitempty"` - Action ChangeAction `json:"action,omitempty"` -} - -type Outputs map[string]Output - -func OutputsFromMap(outputValues map[string]*states.OutputValue) (Outputs, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - outputs := make(map[string]Output, len(outputValues)) - - for name, ov := range outputValues { - unmarked, _ := ov.Value.UnmarkDeep() - value, err := ctyjson.Marshal(unmarked, unmarked.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("Error serializing output %q", name), - fmt.Sprintf("Error: %s", err), - )) - return nil, diags - } - valueType, err := ctyjson.MarshalType(unmarked.Type()) - if err != nil { - diags = diags.Append(err) - return nil, diags - } - - var redactedValue json.RawMessage - if !ov.Sensitive { - redactedValue = json.RawMessage(value) - } - - outputs[name] = Output{ - Sensitive: ov.Sensitive, - Type: json.RawMessage(valueType), - Value: redactedValue, - } - } - - return outputs, nil -} - -func OutputsFromChanges(changes []*plans.OutputChangeSrc) Outputs { - outputs := make(map[string]Output, len(changes)) - - for _, change := range changes { - outputs[change.Addr.OutputValue.Name] = Output{ - Sensitive: change.Sensitive, - Action: changeAction(change.Action), - } - } - - return outputs -} - -func (o Outputs) String() string { - return fmt.Sprintf("Outputs: %d", len(o)) -} diff --git a/internal/command/views/json/output_test.go b/internal/command/views/json/output_test.go deleted file mode 100644 index 0fa15e22d6dd..000000000000 --- a/internal/command/views/json/output_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package json - -import ( - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestOutputsFromMap(t *testing.T) { - got, diags := OutputsFromMap(map[string]*states.OutputValue{ - // Normal non-sensitive output - "boop": { - Value: cty.NumberIntVal(1234), - }, - // Sensitive string output - "beep": { - Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), - Sensitive: true, - }, - // Sensitive object output which is marked at the leaf - "blorp": { - Value: cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "b": cty.ObjectVal(map[string]cty.Value{ - "c": cty.StringVal("oh, hi").Mark(marks.Sensitive), - }), - }), - }), - Sensitive: true, - }, - // Null value - "honk": { - Value: cty.NullVal(cty.Map(cty.Bool)), - }, - }) - if len(diags) > 0 { - t.Fatal(diags.Err()) - } - - want := Outputs{ - "boop": { - Sensitive: false, - Type: json.RawMessage(`"number"`), - Value: json.RawMessage(`1234`), - }, - "beep": { - Sensitive: true, - Type: json.RawMessage(`"string"`), - }, - "blorp": { - Sensitive: true, - Type: json.RawMessage(`["object",{"a":["object",{"b":["object",{"c":"string"}]}]}]`), - }, - "honk": { - Sensitive: false, - Type: json.RawMessage(`["map","bool"]`), - Value: json.RawMessage(`null`), - }, - } - - if !cmp.Equal(want, got) { - t.Fatalf("unexpected result\n%s", cmp.Diff(want, got)) - } -} - -func TestOutputsFromChanges(t *testing.T) { - root := addrs.RootModuleInstance - num, err := plans.NewDynamicValue(cty.NumberIntVal(1234), cty.Number) - if err != nil { - t.Fatalf("unexpected error creating dynamic value: %v", err) - } - str, err := plans.NewDynamicValue(cty.StringVal("1234"), cty.String) - if err != nil { - t.Fatalf("unexpected error creating dynamic value: %v", err) - } - - got := OutputsFromChanges([]*plans.OutputChangeSrc{ - // Unchanged output "boop", value 1234 - { - Addr: root.OutputValue("boop"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.NoOp, - Before: num, - After: num, - }, - Sensitive: false, - }, - // New output "beep", value 1234 - { - Addr: root.OutputValue("beep"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - Before: nil, - After: num, - }, - Sensitive: false, - }, - // Deleted output "blorp", prior value 1234 - { - Addr: root.OutputValue("blorp"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - Before: num, - After: nil, - }, - Sensitive: false, - }, - // Updated output "honk", prior value 1234, new value "1234" - { - Addr: root.OutputValue("honk"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - Before: num, - After: str, - }, - Sensitive: false, - }, - // New sensitive output "secret", value "1234" - { - Addr: root.OutputValue("secret"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - Before: nil, - After: str, - }, - Sensitive: true, - }, - }) - - want := Outputs{ - "boop": { - Action: "noop", - Sensitive: false, - }, - "beep": { - Action: "create", - Sensitive: false, - }, - "blorp": { - Action: "delete", - Sensitive: false, - }, - "honk": { - Action: "update", - Sensitive: false, - }, - "secret": { - Action: "create", - Sensitive: true, - }, - } - - if !cmp.Equal(want, got) { - t.Fatalf("unexpected result\n%s", cmp.Diff(want, got)) - } -} - -func TestOutputs_String(t *testing.T) { - outputs := Outputs{ - "boop": { - Sensitive: false, - Type: json.RawMessage(`"number"`), - Value: json.RawMessage(`1234`), - }, - "beep": { - Sensitive: true, - Type: json.RawMessage(`"string"`), - Value: json.RawMessage(`"horse-battery"`), - }, - } - if got, want := outputs.String(), "Outputs: 2"; got != want { - t.Fatalf("unexpected value\n got: %q\nwant: %q", got, want) - } -} diff --git a/internal/command/views/output.go b/internal/command/views/output.go deleted file mode 100644 index 6545aaceec9b..000000000000 --- a/internal/command/views/output.go +++ /dev/null @@ -1,285 +0,0 @@ -package views - -import ( - "bytes" - "encoding/json" - "fmt" - "sort" - "strings" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/repl" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// The Output view renders either one or all outputs, depending on whether or -// not the name argument is empty. -type Output interface { - Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics - Diagnostics(diags tfdiags.Diagnostics) -} - -// NewOutput returns an initialized Output implementation for the given ViewType. -func NewOutput(vt arguments.ViewType, view *View) Output { - switch vt { - case arguments.ViewJSON: - return &OutputJSON{view: view} - case arguments.ViewRaw: - return &OutputRaw{view: view} - case arguments.ViewHuman: - return &OutputHuman{view: view} - default: - panic(fmt.Sprintf("unknown view type %v", vt)) - } -} - -// The OutputHuman implementation renders outputs in a format equivalent to HCL -// source. This uses the same formatting logic as in the console REPL. -type OutputHuman struct { - view *View -} - -var _ Output = (*OutputHuman)(nil) - -func (v *OutputHuman) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - if len(outputs) == 0 { - diags = diags.Append(noOutputsWarning()) - return diags - } - - if name != "" { - output, ok := outputs[name] - if !ok { - diags = diags.Append(missingOutputError(name)) - return diags - } - result := repl.FormatValue(output.Value, 0) - v.view.streams.Println(result) - return nil - } - - outputBuf := new(bytes.Buffer) - if len(outputs) > 0 { - // Output the outputs in alphabetical order - keyLen := 0 - ks := make([]string, 0, len(outputs)) - for key := range outputs { - ks = append(ks, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(ks) - - for _, k := range ks { - v := outputs[k] - if v.Sensitive { - outputBuf.WriteString(fmt.Sprintf("%s = \n", k)) - continue - } - - result := repl.FormatValue(v.Value, 0) - outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, result)) - } - } - - v.view.streams.Println(strings.TrimSpace(outputBuf.String())) - - return nil -} - -func (v *OutputHuman) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -// The OutputRaw implementation renders single string, number, or boolean -// output values directly and without quotes or other formatting. This is -// intended for use in shell scripting or other environments where the exact -// type of an output value is not important. -type OutputRaw struct { - view *View -} - -var _ Output = (*OutputRaw)(nil) - -func (v *OutputRaw) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - if len(outputs) == 0 { - diags = diags.Append(noOutputsWarning()) - return diags - } - - if name == "" { - diags = diags.Append(fmt.Errorf("Raw output format is only supported for single outputs")) - return diags - } - - output, ok := outputs[name] - if !ok { - diags = diags.Append(missingOutputError(name)) - return diags - } - - strV, err := convert.Convert(output.Value, cty.String) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported value for raw output", - fmt.Sprintf( - "The -raw option only supports strings, numbers, and boolean values, but output value %q is %s.\n\nUse the -json option for machine-readable representations of output values that have complex types.", - name, output.Value.Type().FriendlyName(), - ), - )) - return diags - } - if strV.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported value for raw output", - fmt.Sprintf( - "The value for output value %q is null, so -raw mode cannot print it.", - name, - ), - )) - return diags - } - if !strV.IsKnown() { - // Since we're working with values from the state it would be very - // odd to end up in here, but we'll handle it anyway to avoid a - // panic in case our rules somehow change in future. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported value for raw output", - fmt.Sprintf( - "The value for output value %q won't be known until after a successful terraform apply, so -raw mode cannot print it.", - name, - ), - )) - return diags - } - // If we get out here then we should have a valid string to print. - // We're writing it using Print here so that a shell caller will get - // exactly the value and no extra whitespace (including trailing newline). - v.view.streams.Print(strV.AsString()) - return nil -} - -func (v *OutputRaw) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -// The OutputJSON implementation renders outputs as JSON values. When rendering -// a single output, only the value is displayed. When rendering all outputs, -// the result is a JSON object with keys matching the output names and object -// values including type and sensitivity metadata. -type OutputJSON struct { - view *View -} - -var _ Output = (*OutputJSON)(nil) - -func (v *OutputJSON) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - if name != "" { - output, ok := outputs[name] - if !ok { - diags = diags.Append(missingOutputError(name)) - return diags - } - value := output.Value - - jsonOutput, err := ctyjson.Marshal(value, value.Type()) - if err != nil { - diags = diags.Append(err) - return diags - } - - v.view.streams.Println(string(jsonOutput)) - - return nil - } - - // Due to a historical accident, the switch from state version 2 to - // 3 caused our JSON output here to be the full metadata about the - // outputs rather than just the output values themselves as we'd - // show in the single value case. We must now maintain that behavior - // for compatibility, so this is an emulation of the JSON - // serialization of outputs used in state format version 3. - type OutputMeta struct { - Sensitive bool `json:"sensitive"` - Type json.RawMessage `json:"type"` - Value json.RawMessage `json:"value"` - } - outputMetas := map[string]OutputMeta{} - - for n, os := range outputs { - jsonVal, err := ctyjson.Marshal(os.Value, os.Value.Type()) - if err != nil { - diags = diags.Append(err) - return diags - } - jsonType, err := ctyjson.MarshalType(os.Value.Type()) - if err != nil { - diags = diags.Append(err) - return diags - } - outputMetas[n] = OutputMeta{ - Sensitive: os.Sensitive, - Type: json.RawMessage(jsonType), - Value: json.RawMessage(jsonVal), - } - } - - jsonOutputs, err := json.MarshalIndent(outputMetas, "", " ") - if err != nil { - diags = diags.Append(err) - return diags - } - - v.view.streams.Println(string(jsonOutputs)) - - return nil -} - -func (v *OutputJSON) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -// For text and raw output modes, an empty map of outputs is considered a -// separate and higher priority failure mode than an output not being present -// in a non-empty map. This warning diagnostic explains how this might have -// happened. -func noOutputsWarning() tfdiags.Diagnostic { - return tfdiags.Sourceless( - tfdiags.Warning, - "No outputs found", - "The state file either has no outputs defined, or all the defined "+ - "outputs are empty. Please define an output in your configuration "+ - "with the `output` keyword and run `terraform refresh` for it to "+ - "become available. If you are using interpolation, please verify "+ - "the interpolated value is not empty. You can use the "+ - "`terraform console` command to assist.", - ) -} - -// Attempting to display a missing output results in this failure, which -// includes suggestions on how to rectify the problem. -func missingOutputError(name string) tfdiags.Diagnostic { - return tfdiags.Sourceless( - tfdiags.Error, - fmt.Sprintf("Output %q not found", name), - "The output variable requested could not be found in the state "+ - "file. If you recently added this to your configuration, be "+ - "sure to run `terraform apply`, since the state won't be updated "+ - "with new output variables until that command is run.", - ) -} diff --git a/internal/command/views/output_test.go b/internal/command/views/output_test.go deleted file mode 100644 index 3307778e673d..000000000000 --- a/internal/command/views/output_test.go +++ /dev/null @@ -1,363 +0,0 @@ -package views - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/zclconf/go-cty/cty" -) - -// Test various single output values for human-readable UI. Note that since -// OutputHuman defers to repl.FormatValue to render a single value, most of the -// test coverage should be in that package. -func TestOutputHuman_single(t *testing.T) { - testCases := map[string]struct { - value cty.Value - want string - wantErr bool - }{ - "string": { - value: cty.StringVal("hello"), - want: "\"hello\"\n", - }, - "list of maps": { - value: cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("value"), - "key2": cty.StringVal("value2"), - }), - cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("value"), - }), - }), - want: `tolist([ - tomap({ - "key" = "value" - "key2" = "value2" - }), - tomap({ - "key" = "value" - }), -]) -`, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewOutput(arguments.ViewHuman, NewView(streams)) - - outputs := map[string]*states.OutputValue{ - "foo": {Value: tc.value}, - } - diags := v.Output("foo", outputs) - - if diags.HasErrors() { - if !tc.wantErr { - t.Fatalf("unexpected diagnostics: %s", diags) - } - } else if tc.wantErr { - t.Fatalf("succeeded, but want error") - } - - if got, want := done(t).Stdout(), tc.want; got != want { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } - }) - } -} - -// Sensitive output values are rendered to the console intentionally when -// requesting a single output. -func TestOutput_sensitive(t *testing.T) { - testCases := map[string]arguments.ViewType{ - "human": arguments.ViewHuman, - "json": arguments.ViewJSON, - "raw": arguments.ViewRaw, - } - for name, vt := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewOutput(vt, NewView(streams)) - - outputs := map[string]*states.OutputValue{ - "foo": { - Value: cty.StringVal("secret"), - Sensitive: true, - }, - } - diags := v.Output("foo", outputs) - - if diags.HasErrors() { - t.Fatalf("unexpected diagnostics: %s", diags) - } - - // Test for substring match here because we don't care about exact - // output format in this test, just the presence of the sensitive - // value. - if got, want := done(t).Stdout(), "secret"; !strings.Contains(got, want) { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } - }) - } -} - -// Showing all outputs is supported by human and JSON output format. -func TestOutput_all(t *testing.T) { - outputs := map[string]*states.OutputValue{ - "foo": { - Value: cty.StringVal("secret"), - Sensitive: true, - }, - "bar": { - Value: cty.ListVal([]cty.Value{cty.True, cty.False, cty.True}), - }, - "baz": { - Value: cty.ObjectVal(map[string]cty.Value{ - "boop": cty.NumberIntVal(5), - "beep": cty.StringVal("true"), - }), - }, - } - - testCases := map[string]struct { - vt arguments.ViewType - want string - }{ - "human": { - arguments.ViewHuman, - `bar = tolist([ - true, - false, - true, -]) -baz = { - "beep" = "true" - "boop" = 5 -} -foo = -`, - }, - "json": { - arguments.ViewJSON, - `{ - "bar": { - "sensitive": false, - "type": [ - "list", - "bool" - ], - "value": [ - true, - false, - true - ] - }, - "baz": { - "sensitive": false, - "type": [ - "object", - { - "beep": "string", - "boop": "number" - } - ], - "value": { - "beep": "true", - "boop": 5 - } - }, - "foo": { - "sensitive": true, - "type": "string", - "value": "secret" - } -} -`, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewOutput(tc.vt, NewView(streams)) - diags := v.Output("", outputs) - - if diags.HasErrors() { - t.Fatalf("unexpected diagnostics: %s", diags) - } - - if got := done(t).Stdout(); got != tc.want { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, tc.want) - } - }) - } -} - -// JSON output format supports empty outputs by rendering an empty object -// without diagnostics. -func TestOutputJSON_empty(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewOutput(arguments.ViewJSON, NewView(streams)) - - diags := v.Output("", map[string]*states.OutputValue{}) - - if diags.HasErrors() { - t.Fatalf("unexpected diagnostics: %s", diags) - } - - if got, want := done(t).Stdout(), "{}\n"; got != want { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } -} - -// Human and raw formats render a warning if there are no outputs. -func TestOutput_emptyWarning(t *testing.T) { - testCases := map[string]arguments.ViewType{ - "human": arguments.ViewHuman, - "raw": arguments.ViewRaw, - } - - for name, vt := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewOutput(vt, NewView(streams)) - - diags := v.Output("", map[string]*states.OutputValue{}) - - if got, want := done(t).Stdout(), ""; got != want { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } - - if len(diags) != 1 { - t.Fatalf("expected 1 diagnostic, got %d", len(diags)) - } - - if diags.HasErrors() { - t.Fatalf("unexpected error diagnostics: %s", diags) - } - - if got, want := diags[0].Description().Summary, "No outputs found"; got != want { - t.Errorf("unexpected diagnostics: %s", diags) - } - }) - } -} - -// Raw output is a simple unquoted output format designed for shell scripts, -// which relies on the cty.AsString() implementation. This test covers -// formatting for supported value types. -func TestOutputRaw(t *testing.T) { - values := map[string]cty.Value{ - "str": cty.StringVal("bar"), - "multistr": cty.StringVal("bar\nbaz"), - "num": cty.NumberIntVal(2), - "bool": cty.True, - "obj": cty.EmptyObjectVal, - "null": cty.NullVal(cty.String), - "unknown": cty.UnknownVal(cty.String), - } - - tests := map[string]struct { - WantOutput string - WantErr bool - }{ - "str": {WantOutput: "bar"}, - "multistr": {WantOutput: "bar\nbaz"}, - "num": {WantOutput: "2"}, - "bool": {WantOutput: "true"}, - "obj": {WantErr: true}, - "null": {WantErr: true}, - "unknown": {WantErr: true}, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewOutput(arguments.ViewRaw, NewView(streams)) - - value := values[name] - outputs := map[string]*states.OutputValue{ - name: {Value: value}, - } - diags := v.Output(name, outputs) - - if diags.HasErrors() { - if !test.WantErr { - t.Fatalf("unexpected diagnostics: %s", diags) - } - } else if test.WantErr { - t.Fatalf("succeeded, but want error") - } - - if got, want := done(t).Stdout(), test.WantOutput; got != want { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } - }) - } -} - -// Raw cannot render all outputs. -func TestOutputRaw_all(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewOutput(arguments.ViewRaw, NewView(streams)) - - outputs := map[string]*states.OutputValue{ - "foo": {Value: cty.StringVal("secret")}, - "bar": {Value: cty.True}, - } - diags := v.Output("", outputs) - - if got, want := done(t).Stdout(), ""; got != want { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } - - if !diags.HasErrors() { - t.Fatalf("expected diagnostics, got %s", diags) - } - - if got, want := diags.Err().Error(), "Raw output format is only supported for single outputs"; got != want { - t.Errorf("unexpected diagnostics: %s", diags) - } -} - -// All outputs render an error if a specific output is requested which is -// missing from the map of outputs. -func TestOutput_missing(t *testing.T) { - testCases := map[string]arguments.ViewType{ - "human": arguments.ViewHuman, - "json": arguments.ViewJSON, - "raw": arguments.ViewRaw, - } - - for name, vt := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewOutput(vt, NewView(streams)) - - diags := v.Output("foo", map[string]*states.OutputValue{ - "bar": {Value: cty.StringVal("boop")}, - }) - - if len(diags) != 1 { - t.Fatalf("expected 1 diagnostic, got %d", len(diags)) - } - - if !diags.HasErrors() { - t.Fatalf("expected error diagnostics, got %s", diags) - } - - if got, want := diags[0].Description().Summary, `Output "foo" not found`; got != want { - t.Errorf("unexpected diagnostics: %s", diags) - } - - if got, want := done(t).Stdout(), ""; got != want { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } - }) - } -} diff --git a/internal/command/views/plan.go b/internal/command/views/plan.go deleted file mode 100644 index 8db4d3204c5e..000000000000 --- a/internal/command/views/plan.go +++ /dev/null @@ -1,88 +0,0 @@ -package views - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// The Plan view is used for the plan command. -type Plan interface { - Operation() Operation - Hooks() []terraform.Hook - - Diagnostics(diags tfdiags.Diagnostics) - HelpPrompt() -} - -// NewPlan returns an initialized Plan implementation for the given ViewType. -func NewPlan(vt arguments.ViewType, view *View) Plan { - switch vt { - case arguments.ViewJSON: - return &PlanJSON{ - view: NewJSONView(view), - } - case arguments.ViewHuman: - return &PlanHuman{ - view: view, - inAutomation: view.RunningInAutomation(), - } - default: - panic(fmt.Sprintf("unknown view type %v", vt)) - } -} - -// The PlanHuman implementation renders human-readable text logs, suitable for -// a scrolling terminal. -type PlanHuman struct { - view *View - - inAutomation bool -} - -var _ Plan = (*PlanHuman)(nil) - -func (v *PlanHuman) Operation() Operation { - return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) -} - -func (v *PlanHuman) Hooks() []terraform.Hook { - return []terraform.Hook{ - NewUiHook(v.view), - } -} - -func (v *PlanHuman) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -func (v *PlanHuman) HelpPrompt() { - v.view.HelpPrompt("plan") -} - -// The PlanJSON implementation renders streaming JSON logs, suitable for -// integrating with other software. -type PlanJSON struct { - view *JSONView -} - -var _ Plan = (*PlanJSON)(nil) - -func (v *PlanJSON) Operation() Operation { - return &OperationJSON{view: v.view} -} - -func (v *PlanJSON) Hooks() []terraform.Hook { - return []terraform.Hook{ - newJSONHook(v.view), - } -} - -func (v *PlanJSON) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -func (v *PlanJSON) HelpPrompt() { -} diff --git a/internal/command/views/plan_test.go b/internal/command/views/plan_test.go deleted file mode 100644 index 33205738ad77..000000000000 --- a/internal/command/views/plan_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package views - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/zclconf/go-cty/cty" -) - -// Ensure that the correct view type and in-automation settings propagate to the -// Operation view. -func TestPlanHuman_operation(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - defer done(t) - v := NewPlan(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)).Operation() - if hv, ok := v.(*OperationHuman); !ok { - t.Fatalf("unexpected return type %t", v) - } else if hv.inAutomation != true { - t.Fatalf("unexpected inAutomation value on Operation view") - } -} - -// Verify that Hooks includes a UI hook -func TestPlanHuman_hooks(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - defer done(t) - v := NewPlan(arguments.ViewHuman, NewView(streams).SetRunningInAutomation((true))) - hooks := v.Hooks() - - var uiHook *UiHook - for _, hook := range hooks { - if ch, ok := hook.(*UiHook); ok { - uiHook = ch - } - } - if uiHook == nil { - t.Fatalf("expected Hooks to include a UiHook: %#v", hooks) - } -} - -// Helper functions to build a trivial test plan, to exercise the plan -// renderer. -func testPlan(t *testing.T) *plans.Plan { - t.Helper() - - plannedVal := cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("bar"), - }) - priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) - if err != nil { - t.Fatal(err) - } - plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) - if err != nil { - t.Fatal(err) - } - - changes := plans.NewChanges() - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - - changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ - Addr: addr, - PrevRunAddr: addr, - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - Before: priorValRaw, - After: plannedValRaw, - }, - }) - - return &plans.Plan{ - Changes: changes, - } -} - -func testPlanWithDatasource(t *testing.T) *plans.Plan { - plan := testPlan(t) - - addr := addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_data_source", - Name: "bar", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - - dataVal := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("C6743020-40BD-4591-81E6-CD08494341D3"), - "bar": cty.StringVal("foo"), - }) - priorValRaw, err := plans.NewDynamicValue(cty.NullVal(dataVal.Type()), dataVal.Type()) - if err != nil { - t.Fatal(err) - } - plannedValRaw, err := plans.NewDynamicValue(dataVal, dataVal.Type()) - if err != nil { - t.Fatal(err) - } - - plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ - Addr: addr, - PrevRunAddr: addr, - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: plans.Read, - Before: priorValRaw, - After: plannedValRaw, - }, - }) - - return plan -} - -func testSchemas() *terraform.Schemas { - provider := testProvider() - return &terraform.Schemas{ - Providers: map[addrs.Provider]*terraform.ProviderSchema{ - addrs.NewDefaultProvider("test"): provider.ProviderSchema(), - }, - } -} - -func testProvider() *terraform.MockProvider { - p := new(terraform.MockProvider) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{NewState: req.PriorState} - } - - p.GetProviderSchemaResponse = testProviderSchema() - - return p -} - -func testProviderSchema() *providers.GetProviderSchemaResponse { - return &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{}, - }, - ResourceTypes: map[string]providers.Schema{ - "test_resource": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "test_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Required: true}, - "bar": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } -} diff --git a/internal/command/views/refresh.go b/internal/command/views/refresh.go deleted file mode 100644 index c670fd2d2727..000000000000 --- a/internal/command/views/refresh.go +++ /dev/null @@ -1,112 +0,0 @@ -package views - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// The Refresh view is used for the refresh command. -type Refresh interface { - Outputs(outputValues map[string]*states.OutputValue) - - Operation() Operation - Hooks() []terraform.Hook - - Diagnostics(diags tfdiags.Diagnostics) - HelpPrompt() -} - -// NewRefresh returns an initialized Refresh implementation for the given ViewType. -func NewRefresh(vt arguments.ViewType, view *View) Refresh { - switch vt { - case arguments.ViewJSON: - return &RefreshJSON{ - view: NewJSONView(view), - } - case arguments.ViewHuman: - return &RefreshHuman{ - view: view, - inAutomation: view.RunningInAutomation(), - countHook: &countHook{}, - } - default: - panic(fmt.Sprintf("unknown view type %v", vt)) - } -} - -// The RefreshHuman implementation renders human-readable text logs, suitable for -// a scrolling terminal. -type RefreshHuman struct { - view *View - - inAutomation bool - - countHook *countHook -} - -var _ Refresh = (*RefreshHuman)(nil) - -func (v *RefreshHuman) Outputs(outputValues map[string]*states.OutputValue) { - if len(outputValues) > 0 { - v.view.streams.Print(v.view.colorize.Color("[reset][bold][green]\nOutputs:\n\n")) - NewOutput(arguments.ViewHuman, v.view).Output("", outputValues) - } -} - -func (v *RefreshHuman) Operation() Operation { - return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) -} - -func (v *RefreshHuman) Hooks() []terraform.Hook { - return []terraform.Hook{ - v.countHook, - NewUiHook(v.view), - } -} - -func (v *RefreshHuman) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -func (v *RefreshHuman) HelpPrompt() { - v.view.HelpPrompt("refresh") -} - -// The RefreshJSON implementation renders streaming JSON logs, suitable for -// integrating with other software. -type RefreshJSON struct { - view *JSONView -} - -var _ Refresh = (*RefreshJSON)(nil) - -func (v *RefreshJSON) Outputs(outputValues map[string]*states.OutputValue) { - outputs, diags := json.OutputsFromMap(outputValues) - if diags.HasErrors() { - v.Diagnostics(diags) - } else { - v.view.Outputs(outputs) - } -} - -func (v *RefreshJSON) Operation() Operation { - return &OperationJSON{view: v.view} -} - -func (v *RefreshJSON) Hooks() []terraform.Hook { - return []terraform.Hook{ - newJSONHook(v.view), - } -} - -func (v *RefreshJSON) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -func (v *RefreshJSON) HelpPrompt() { -} diff --git a/internal/command/views/refresh_test.go b/internal/command/views/refresh_test.go deleted file mode 100644 index d68348e5fca4..000000000000 --- a/internal/command/views/refresh_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package views - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/zclconf/go-cty/cty" -) - -// Ensure that the correct view type and in-automation settings propagate to the -// Operation view. -func TestRefreshHuman_operation(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - defer done(t) - v := NewRefresh(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)).Operation() - if hv, ok := v.(*OperationHuman); !ok { - t.Fatalf("unexpected return type %t", v) - } else if hv.inAutomation != true { - t.Fatalf("unexpected inAutomation value on Operation view") - } -} - -// Verify that Hooks includes a UI hook -func TestRefreshHuman_hooks(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - defer done(t) - v := NewRefresh(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)) - hooks := v.Hooks() - - var uiHook *UiHook - for _, hook := range hooks { - if ch, ok := hook.(*UiHook); ok { - uiHook = ch - } - } - if uiHook == nil { - t.Fatalf("expected Hooks to include a UiHook: %#v", hooks) - } -} - -// Basic test coverage of Outputs, since most of its functionality is tested -// elsewhere. -func TestRefreshHuman_outputs(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewRefresh(arguments.ViewHuman, NewView(streams)) - - v.Outputs(map[string]*states.OutputValue{ - "foo": {Value: cty.StringVal("secret")}, - }) - - got := done(t).Stdout() - for _, want := range []string{"Outputs:", `foo = "secret"`} { - if !strings.Contains(got, want) { - t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) - } - } -} - -// Outputs should do nothing if there are no outputs to render. -func TestRefreshHuman_outputsEmpty(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewRefresh(arguments.ViewHuman, NewView(streams)) - - v.Outputs(map[string]*states.OutputValue{}) - - got := done(t).Stdout() - if got != "" { - t.Errorf("output should be empty, but got: %q", got) - } -} - -// Basic test coverage of Outputs, since most of its functionality is tested -// elsewhere. -func TestRefreshJSON_outputs(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - v := NewRefresh(arguments.ViewJSON, NewView(streams)) - - v.Outputs(map[string]*states.OutputValue{ - "boop_count": {Value: cty.NumberIntVal(92)}, - "password": {Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), Sensitive: true}, - }) - - want := []map[string]interface{}{ - { - "@level": "info", - "@message": "Outputs: 2", - "@module": "terraform.ui", - "type": "outputs", - "outputs": map[string]interface{}{ - "boop_count": map[string]interface{}{ - "sensitive": false, - "value": float64(92), - "type": "number", - }, - "password": map[string]interface{}{ - "sensitive": true, - "type": "string", - }, - }, - }, - } - testJSONViewOutputEquals(t, done(t).Stdout(), want) -} diff --git a/internal/command/views/show.go b/internal/command/views/show.go deleted file mode 100644 index 4cd4e2b6707c..000000000000 --- a/internal/command/views/show.go +++ /dev/null @@ -1,138 +0,0 @@ -package views - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/jsonformat" - "github.com/hashicorp/terraform/internal/command/jsonplan" - "github.com/hashicorp/terraform/internal/command/jsonprovider" - "github.com/hashicorp/terraform/internal/command/jsonstate" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -type Show interface { - // Display renders the plan, if it is available. If plan is nil, it renders the statefile. - Display(config *configs.Config, plan *plans.Plan, stateFile *statefile.File, schemas *terraform.Schemas) int - - // Diagnostics renders early diagnostics, resulting from argument parsing. - Diagnostics(diags tfdiags.Diagnostics) -} - -func NewShow(vt arguments.ViewType, view *View) Show { - switch vt { - case arguments.ViewJSON: - return &ShowJSON{view: view} - case arguments.ViewHuman: - return &ShowHuman{view: view} - default: - panic(fmt.Sprintf("unknown view type %v", vt)) - } -} - -type ShowHuman struct { - view *View -} - -var _ Show = (*ShowHuman)(nil) - -func (v *ShowHuman) Display(config *configs.Config, plan *plans.Plan, stateFile *statefile.File, schemas *terraform.Schemas) int { - renderer := jsonformat.Renderer{ - Colorize: v.view.colorize, - Streams: v.view.streams, - RunningInAutomation: v.view.runningInAutomation, - } - - if plan != nil { - outputs, changed, drift, attrs, err := jsonplan.MarshalForRenderer(plan, schemas) - if err != nil { - v.view.streams.Eprintf("Failed to marshal plan to json: %s", err) - return 1 - } - - jplan := jsonformat.Plan{ - PlanFormatVersion: jsonplan.FormatVersion, - ProviderFormatVersion: jsonprovider.FormatVersion, - OutputChanges: outputs, - ResourceChanges: changed, - ResourceDrift: drift, - ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), - RelevantAttributes: attrs, - } - - var opts []jsonformat.PlanRendererOpt - if !plan.CanApply() { - opts = append(opts, jsonformat.CanNotApply) - } - if plan.Errored { - opts = append(opts, jsonformat.Errored) - } - - renderer.RenderHumanPlan(jplan, plan.UIMode, opts...) - } else { - if stateFile == nil { - v.view.streams.Println("No state.") - return 0 - } - - root, outputs, err := jsonstate.MarshalForRenderer(stateFile, schemas) - if err != nil { - v.view.streams.Eprintf("Failed to marshal state to json: %s", err) - return 1 - } - - jstate := jsonformat.State{ - StateFormatVersion: jsonstate.FormatVersion, - ProviderFormatVersion: jsonprovider.FormatVersion, - RootModule: root, - RootModuleOutputs: outputs, - ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), - } - - renderer.RenderHumanState(jstate) - } - return 0 -} - -func (v *ShowHuman) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -type ShowJSON struct { - view *View -} - -var _ Show = (*ShowJSON)(nil) - -func (v *ShowJSON) Display(config *configs.Config, plan *plans.Plan, stateFile *statefile.File, schemas *terraform.Schemas) int { - if plan != nil { - jsonPlan, err := jsonplan.Marshal(config, plan, stateFile, schemas) - - if err != nil { - v.view.streams.Eprintf("Failed to marshal plan to json: %s", err) - return 1 - } - v.view.streams.Println(string(jsonPlan)) - } else { - // It is possible that there is neither state nor a plan. - // That's ok, we'll just return an empty object. - jsonState, err := jsonstate.Marshal(stateFile, schemas) - if err != nil { - v.view.streams.Eprintf("Failed to marshal state to json: %s", err) - return 1 - } - v.view.streams.Println(string(jsonState)) - } - return 0 -} - -// Diagnostics should only be called if show cannot be executed. -// In this case, we choose to render human-readable diagnostic output, -// primarily for backwards compatibility. -func (v *ShowJSON) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} diff --git a/internal/command/views/show_test.go b/internal/command/views/show_test.go deleted file mode 100644 index 29fff81ca259..000000000000 --- a/internal/command/views/show_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package views - -import ( - "encoding/json" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/terraform" - - "github.com/zclconf/go-cty/cty" -) - -func TestShowHuman(t *testing.T) { - testCases := map[string]struct { - plan *plans.Plan - stateFile *statefile.File - schemas *terraform.Schemas - wantExact bool - wantString string - }{ - "plan file": { - testPlan(t), - nil, - testSchemas(), - false, - "# test_resource.foo will be created", - }, - "statefile": { - nil, - &statefile.File{ - Serial: 0, - Lineage: "fake-for-testing", - State: testState(), - }, - testSchemas(), - false, - "# test_resource.foo:", - }, - "empty statefile": { - nil, - &statefile.File{ - Serial: 0, - Lineage: "fake-for-testing", - State: states.NewState(), - }, - testSchemas(), - true, - "The state file is empty. No resources are represented.\n", - }, - "nothing": { - nil, - nil, - nil, - true, - "No state.\n", - }, - } - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - view := NewView(streams) - view.Configure(&arguments.View{NoColor: true}) - v := NewShow(arguments.ViewHuman, view) - - code := v.Display(nil, testCase.plan, testCase.stateFile, testCase.schemas) - if code != 0 { - t.Errorf("expected 0 return code, got %d", code) - } - - output := done(t) - got := output.Stdout() - want := testCase.wantString - if (testCase.wantExact && got != want) || (!testCase.wantExact && !strings.Contains(got, want)) { - t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) - } - }) - } -} - -func TestShowJSON(t *testing.T) { - testCases := map[string]struct { - plan *plans.Plan - stateFile *statefile.File - }{ - "plan file": { - testPlan(t), - nil, - }, - "statefile": { - nil, - &statefile.File{ - Serial: 0, - Lineage: "fake-for-testing", - State: testState(), - }, - }, - "empty statefile": { - nil, - &statefile.File{ - Serial: 0, - Lineage: "fake-for-testing", - State: states.NewState(), - }, - }, - "nothing": { - nil, - nil, - }, - } - - config, _, configCleanup := initwd.MustLoadConfigForTests(t, "./testdata/show") - defer configCleanup() - - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - view := NewView(streams) - view.Configure(&arguments.View{NoColor: true}) - v := NewShow(arguments.ViewJSON, view) - - schemas := &terraform.Schemas{ - Providers: map[addrs.Provider]*terraform.ProviderSchema{ - addrs.NewDefaultProvider("test"): { - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - } - - code := v.Display(config, testCase.plan, testCase.stateFile, schemas) - - if code != 0 { - t.Errorf("expected 0 return code, got %d", code) - } - - // Make sure the result looks like JSON; we comprehensively test - // the structure of this output in the command package tests. - var result map[string]interface{} - got := done(t).All() - t.Logf("output: %s", got) - if err := json.Unmarshal([]byte(got), &result); err != nil { - t.Fatal(err) - } - }) - } -} - -// testState returns a test State structure. -func testState() *states.State { - return states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"bar","foo":"value"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - // DeepCopy is used here to ensure our synthetic state matches exactly - // with a state that will have been copied during the command - // operation, and all fields have been copied correctly. - }).DeepCopy() -} diff --git a/internal/command/views/test.go b/internal/command/views/test.go deleted file mode 100644 index 18c32c747b7b..000000000000 --- a/internal/command/views/test.go +++ /dev/null @@ -1,373 +0,0 @@ -package views - -import ( - "encoding/xml" - "fmt" - "io/ioutil" - "sort" - "strings" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/moduletest" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/mitchellh/colorstring" -) - -// Test is the view interface for the "terraform test" command. -type Test interface { - // Results presents the given test results. - Results(map[string]*moduletest.Suite) tfdiags.Diagnostics - - // Diagnostics is for reporting warnings or errors that occurred with the - // mechanics of running tests. For this command in particular, some - // errors are considered to be test failures rather than mechanism failures, - // and so those will be reported via Results rather than via Diagnostics. - Diagnostics(tfdiags.Diagnostics) -} - -// NewTest returns an implementation of Test configured to respect the -// settings described in the given arguments. -func NewTest(base *View, args arguments.TestOutput) Test { - return &testHuman{ - streams: base.streams, - showDiagnostics: base.Diagnostics, - colorize: base.colorize, - junitXMLFile: args.JUnitXMLFile, - } -} - -type testHuman struct { - // This is the subset of functionality we need from the base view. - streams *terminal.Streams - showDiagnostics func(diags tfdiags.Diagnostics) - colorize *colorstring.Colorize - - // If junitXMLFile is not empty then results will be written to - // the given file path in addition to the usual output. - junitXMLFile string -} - -func (v *testHuman) Results(results map[string]*moduletest.Suite) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // FIXME: Due to how this prototype command evolved concurrently with - // establishing the idea of command views, the handling of JUnit output - // as part of the "human" view rather than as a separate view in its - // own right is a little odd and awkward. We should refactor this - // prior to making "terraform test" a real supported command to make - // it be structured more like the other commands that use the views - // package. - - v.humanResults(results) - - if v.junitXMLFile != "" { - moreDiags := v.junitXMLResults(results, v.junitXMLFile) - diags = diags.Append(moreDiags) - } - - return diags -} - -func (v *testHuman) Diagnostics(diags tfdiags.Diagnostics) { - if len(diags) == 0 { - return - } - v.showDiagnostics(diags) -} - -func (v *testHuman) humanResults(results map[string]*moduletest.Suite) { - failCount := 0 - width := v.streams.Stderr.Columns() - - suiteNames := make([]string, 0, len(results)) - for suiteName := range results { - suiteNames = append(suiteNames, suiteName) - } - sort.Strings(suiteNames) - for _, suiteName := range suiteNames { - suite := results[suiteName] - - componentNames := make([]string, 0, len(suite.Components)) - for componentName := range suite.Components { - componentNames = append(componentNames, componentName) - } - for _, componentName := range componentNames { - component := suite.Components[componentName] - - assertionNames := make([]string, 0, len(component.Assertions)) - for assertionName := range component.Assertions { - assertionNames = append(assertionNames, assertionName) - } - sort.Strings(assertionNames) - - for _, assertionName := range assertionNames { - assertion := component.Assertions[assertionName] - - fullName := fmt.Sprintf("%s.%s.%s", suiteName, componentName, assertionName) - if strings.HasPrefix(componentName, "(") { - // parenthesis-prefixed components are placeholders that - // the test harness generates to represent problems that - // prevented checking any assertions at all, so we'll - // just hide them and show the suite name. - fullName = suiteName - } - headingExtra := fmt.Sprintf("%s (%s)", fullName, assertion.Description) - - switch assertion.Outcome { - case moduletest.Failed: - // Failed means that the assertion was successfully - // excecuted but that the assertion condition didn't hold. - v.eprintRuleHeading("yellow", "Failed", headingExtra) - - case moduletest.Error: - // Error means that the system encountered an unexpected - // error when trying to evaluate the assertion. - v.eprintRuleHeading("red", "Error", headingExtra) - - default: - // We don't do anything for moduletest.Passed or - // moduletest.Skipped. Perhaps in future we'll offer a - // -verbose option to include information about those. - continue - } - failCount++ - - if len(assertion.Message) > 0 { - dispMsg := format.WordWrap(assertion.Message, width) - v.streams.Eprintln(dispMsg) - } - if len(assertion.Diagnostics) > 0 { - // We'll do our own writing of the diagnostics in this - // case, rather than using v.Diagnostics, because we - // specifically want all of these diagnostics to go to - // Stderr along with all of the other output we've - // generated. - for _, diag := range assertion.Diagnostics { - diagStr := format.Diagnostic(diag, nil, v.colorize, width) - v.streams.Eprint(diagStr) - } - } - } - } - } - - if failCount > 0 { - // If we've printed at least one failure then we'll have printed at - // least one horizontal rule across the terminal, and so we'll balance - // that with another horizontal rule. - if width > 1 { - rule := strings.Repeat("─", width-1) - v.streams.Eprintln(v.colorize.Color("[dark_gray]" + rule)) - } - } - - if failCount == 0 { - if len(results) > 0 { - // This is not actually an error, but it's convenient if all of our - // result output goes to the same stream for when this is running in - // automation that might be gathering this output via a pipe. - v.streams.Eprint(v.colorize.Color("[bold][green]Success![reset] All of the test assertions passed.\n\n")) - } else { - v.streams.Eprint(v.colorize.Color("[bold][yellow]No tests defined.[reset] This module doesn't have any test suites to run.\n\n")) - } - } - - // Try to flush any buffering that might be happening. (This isn't always - // successful, depending on what sort of fd Stderr is connected to.) - v.streams.Stderr.File.Sync() -} - -func (v *testHuman) junitXMLResults(results map[string]*moduletest.Suite, filename string) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // "JUnit XML" is a file format that has become a de-facto standard for - // test reporting tools but that is not formally specified anywhere, and - // so each producer and consumer implementation unfortunately tends to - // differ in certain ways from others. - // With that in mind, this is a best effort sort of thing aimed at being - // broadly compatible with various consumers, but it's likely that - // some consumers will present these results better than others. - // This implementation is based mainly on the pseudo-specification of the - // format curated here, based on the Jenkins parser implementation: - // https://llg.cubic.org/docs/junit/ - - // An "Outcome" represents one of the various XML elements allowed inside - // a testcase element to indicate the test outcome. - type Outcome struct { - Message string `xml:"message,omitempty"` - } - - // TestCase represents an individual test case as part of a suite. Note - // that a JUnit XML incorporates both the "component" and "assertion" - // levels of our model: we pretend that component is a class name and - // assertion is a method name in order to match with the Java-flavored - // expectations of JUnit XML, which are hopefully close enough to get - // a test result rendering that's useful to humans. - type TestCase struct { - AssertionName string `xml:"name"` - ComponentName string `xml:"classname"` - - // These fields represent the different outcomes of a TestCase. Only one - // of these should be populated in each TestCase; this awkward - // structure is just to make this play nicely with encoding/xml's - // expecatations. - Skipped *Outcome `xml:"skipped,omitempty"` - Error *Outcome `xml:"error,omitempty"` - Failure *Outcome `xml:"failure,omitempty"` - - Stderr string `xml:"system-out,omitempty"` - } - - // TestSuite represents an individual test suite, of potentially many - // in a JUnit XML document. - type TestSuite struct { - Name string `xml:"name"` - TotalCount int `xml:"tests"` - SkippedCount int `xml:"skipped"` - ErrorCount int `xml:"errors"` - FailureCount int `xml:"failures"` - Cases []*TestCase `xml:"testcase"` - } - - // TestSuites represents the root element of the XML document. - type TestSuites struct { - XMLName struct{} `xml:"testsuites"` - ErrorCount int `xml:"errors"` - FailureCount int `xml:"failures"` - TotalCount int `xml:"tests"` - Suites []*TestSuite `xml:"testsuite"` - } - - xmlSuites := TestSuites{} - suiteNames := make([]string, 0, len(results)) - for suiteName := range results { - suiteNames = append(suiteNames, suiteName) - } - sort.Strings(suiteNames) - for _, suiteName := range suiteNames { - suite := results[suiteName] - - xmlSuite := &TestSuite{ - Name: suiteName, - } - xmlSuites.Suites = append(xmlSuites.Suites, xmlSuite) - - componentNames := make([]string, 0, len(suite.Components)) - for componentName := range suite.Components { - componentNames = append(componentNames, componentName) - } - for _, componentName := range componentNames { - component := suite.Components[componentName] - - assertionNames := make([]string, 0, len(component.Assertions)) - for assertionName := range component.Assertions { - assertionNames = append(assertionNames, assertionName) - } - sort.Strings(assertionNames) - - for _, assertionName := range assertionNames { - assertion := component.Assertions[assertionName] - xmlSuites.TotalCount++ - xmlSuite.TotalCount++ - - xmlCase := &TestCase{ - ComponentName: componentName, - AssertionName: assertionName, - } - xmlSuite.Cases = append(xmlSuite.Cases, xmlCase) - - switch assertion.Outcome { - case moduletest.Pending: - // We represent "pending" cases -- cases blocked by - // upstream errors -- as if they were "skipped" in JUnit - // terms, because we didn't actually check them and so - // can't say whether they succeeded or not. - xmlSuite.SkippedCount++ - xmlCase.Skipped = &Outcome{ - Message: assertion.Message, - } - case moduletest.Failed: - xmlSuites.FailureCount++ - xmlSuite.FailureCount++ - xmlCase.Failure = &Outcome{ - Message: assertion.Message, - } - case moduletest.Error: - xmlSuites.ErrorCount++ - xmlSuite.ErrorCount++ - xmlCase.Error = &Outcome{ - Message: assertion.Message, - } - - // We'll also include the diagnostics in the "stderr" - // portion of the output, so they'll hopefully be visible - // in a test log viewer in JUnit-XML-Consuming CI systems. - var buf strings.Builder - for _, diag := range assertion.Diagnostics { - diagStr := format.DiagnosticPlain(diag, nil, 68) - buf.WriteString(diagStr) - } - xmlCase.Stderr = buf.String() - } - - } - } - } - - xmlOut, err := xml.MarshalIndent(&xmlSuites, "", " ") - if err != nil { - // If marshalling fails then that's a bug in the code above, - // because we should always be producing a value that is - // accepted by encoding/xml. - panic(fmt.Sprintf("invalid values to marshal as JUnit XML: %s", err)) - } - - err = ioutil.WriteFile(filename, xmlOut, 0644) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write JUnit XML file", - fmt.Sprintf( - "Could not create %s to record the test results in JUnit XML format: %s.", - filename, - err, - ), - )) - } - - return diags -} - -func (v *testHuman) eprintRuleHeading(color, prefix, extra string) { - const lineCell string = "─" - textLen := len(prefix) + len(": ") + len(extra) - spacingLen := 2 - leftLineLen := 3 - - rightLineLen := 0 - width := v.streams.Stderr.Columns() - if (textLen + spacingLen + leftLineLen) < (width - 1) { - // (we allow an extra column at the end because some terminals can't - // print in the final column without wrapping to the next line) - rightLineLen = width - (textLen + spacingLen + leftLineLen) - 1 - } - - colorCode := "[" + color + "]" - - // We'll prepare what we're going to print in memory first, so that we can - // send it all to stderr in one write in case other programs are also - // concurrently trying to write to the terminal for some reason. - var buf strings.Builder - buf.WriteString(v.colorize.Color(colorCode + strings.Repeat(lineCell, leftLineLen))) - buf.WriteByte(' ') - buf.WriteString(v.colorize.Color("[bold]" + colorCode + prefix + ":")) - buf.WriteByte(' ') - buf.WriteString(extra) - if rightLineLen > 0 { - buf.WriteByte(' ') - buf.WriteString(v.colorize.Color(colorCode + strings.Repeat(lineCell, rightLineLen))) - } - v.streams.Eprintln(buf.String()) -} diff --git a/internal/command/views/test_test.go b/internal/command/views/test_test.go deleted file mode 100644 index 6acd889e8551..000000000000 --- a/internal/command/views/test_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package views - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/moduletest" - "github.com/hashicorp/terraform/internal/terminal" -) - -func TestTest(t *testing.T) { - streams, close := terminal.StreamsForTesting(t) - baseView := NewView(streams) - view := NewTest(baseView, arguments.TestOutput{ - JUnitXMLFile: "", - }) - - results := map[string]*moduletest.Suite{} - view.Results(results) - - output := close(t) - gotOutput := strings.TrimSpace(output.All()) - wantOutput := `No tests defined. This module doesn't have any test suites to run.` - if gotOutput != wantOutput { - t.Errorf("wrong output\ngot:\n%s\nwant:\n%s", gotOutput, wantOutput) - } - - // TODO: Test more at this layer. For now, the main UI output tests for - // the "terraform test" command are in the command package as part of - // the overall command tests. -} diff --git a/internal/command/views/validate.go b/internal/command/views/validate.go deleted file mode 100644 index 08ce913f82ce..000000000000 --- a/internal/command/views/validate.go +++ /dev/null @@ -1,138 +0,0 @@ -package views - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/command/format" - viewsjson "github.com/hashicorp/terraform/internal/command/views/json" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// The Validate is used for the validate command. -type Validate interface { - // Results renders the diagnostics returned from a validation walk, and - // returns a CLI exit code: 0 if there are no errors, 1 otherwise - Results(diags tfdiags.Diagnostics) int - - // Diagnostics renders early diagnostics, resulting from argument parsing. - Diagnostics(diags tfdiags.Diagnostics) -} - -// NewValidate returns an initialized Validate implementation for the given ViewType. -func NewValidate(vt arguments.ViewType, view *View) Validate { - switch vt { - case arguments.ViewJSON: - return &ValidateJSON{view: view} - case arguments.ViewHuman: - return &ValidateHuman{view: view} - default: - panic(fmt.Sprintf("unknown view type %v", vt)) - } -} - -// The ValidateHuman implementation renders diagnostics in a human-readable form, -// along with a success/failure message if Terraform is able to execute the -// validation walk. -type ValidateHuman struct { - view *View -} - -var _ Validate = (*ValidateHuman)(nil) - -func (v *ValidateHuman) Results(diags tfdiags.Diagnostics) int { - columns := v.view.outputColumns() - - if len(diags) == 0 { - v.view.streams.Println(format.WordWrap(v.view.colorize.Color(validateSuccess), columns)) - } else { - v.Diagnostics(diags) - - if !diags.HasErrors() { - v.view.streams.Println(format.WordWrap(v.view.colorize.Color(validateWarnings), columns)) - } - } - - if diags.HasErrors() { - return 1 - } - return 0 -} - -const validateSuccess = "[green][bold]Success![reset] The configuration is valid.\n" - -const validateWarnings = "[green][bold]Success![reset] The configuration is valid, but there were some validation warnings as shown above.\n" - -func (v *ValidateHuman) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} - -// The ValidateJSON implementation renders validation results as a JSON object. -// This object includes top-level fields summarizing the result, and an array -// of JSON diagnostic objects. -type ValidateJSON struct { - view *View -} - -var _ Validate = (*ValidateJSON)(nil) - -func (v *ValidateJSON) Results(diags tfdiags.Diagnostics) int { - // FormatVersion represents the version of the json format and will be - // incremented for any change to this format that requires changes to a - // consuming parser. - const FormatVersion = "1.0" - - type Output struct { - FormatVersion string `json:"format_version"` - - // We include some summary information that is actually redundant - // with the detailed diagnostics, but avoids the need for callers - // to re-implement our logic for deciding these. - Valid bool `json:"valid"` - ErrorCount int `json:"error_count"` - WarningCount int `json:"warning_count"` - Diagnostics []*viewsjson.Diagnostic `json:"diagnostics"` - } - - output := Output{ - FormatVersion: FormatVersion, - Valid: true, // until proven otherwise - } - configSources := v.view.configSources() - for _, diag := range diags { - output.Diagnostics = append(output.Diagnostics, viewsjson.NewDiagnostic(diag, configSources)) - - switch diag.Severity() { - case tfdiags.Error: - output.ErrorCount++ - output.Valid = false - case tfdiags.Warning: - output.WarningCount++ - } - } - if output.Diagnostics == nil { - // Make sure this always appears as an array in our output, since - // this is easier to consume for dynamically-typed languages. - output.Diagnostics = []*viewsjson.Diagnostic{} - } - - j, err := json.MarshalIndent(&output, "", " ") - if err != nil { - // Should never happen because we fully-control the input here - panic(err) - } - v.view.streams.Println(string(j)) - - if diags.HasErrors() { - return 1 - } - return 0 -} - -// Diagnostics should only be called if the validation walk cannot be executed. -// In this case, we choose to render human-readable diagnostic output, -// primarily for backwards compatibility. -func (v *ValidateJSON) Diagnostics(diags tfdiags.Diagnostics) { - v.view.Diagnostics(diags) -} diff --git a/internal/command/views/validate_test.go b/internal/command/views/validate_test.go deleted file mode 100644 index 6545c3b3140f..000000000000 --- a/internal/command/views/validate_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package views - -import ( - "encoding/json" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/command/arguments" - "github.com/hashicorp/terraform/internal/terminal" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestValidateHuman(t *testing.T) { - testCases := map[string]struct { - diag tfdiags.Diagnostic - wantSuccess bool - wantSubstring string - }{ - "success": { - nil, - true, - "The configuration is valid.", - }, - "warning": { - tfdiags.Sourceless( - tfdiags.Warning, - "Your shoelaces are untied", - "Watch out, or you'll trip!", - ), - true, - "The configuration is valid, but there were some validation warnings", - }, - "error": { - tfdiags.Sourceless( - tfdiags.Error, - "Configuration is missing random_pet", - "Every configuration should have a random_pet.", - ), - false, - "Error: Configuration is missing random_pet", - }, - } - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - view := NewView(streams) - view.Configure(&arguments.View{NoColor: true}) - v := NewValidate(arguments.ViewHuman, view) - - var diags tfdiags.Diagnostics - - if tc.diag != nil { - diags = diags.Append(tc.diag) - } - - ret := v.Results(diags) - - if tc.wantSuccess && ret != 0 { - t.Errorf("expected 0 return code, got %d", ret) - } else if !tc.wantSuccess && ret != 1 { - t.Errorf("expected 1 return code, got %d", ret) - } - - got := done(t).All() - if strings.Contains(got, "Success!") != tc.wantSuccess { - t.Errorf("unexpected output:\n%s", got) - } - if !strings.Contains(got, tc.wantSubstring) { - t.Errorf("expected output to include %q, but was:\n%s", tc.wantSubstring, got) - } - }) - } -} - -func TestValidateJSON(t *testing.T) { - testCases := map[string]struct { - diag tfdiags.Diagnostic - wantSuccess bool - }{ - "success": { - nil, - true, - }, - "warning": { - tfdiags.Sourceless( - tfdiags.Warning, - "Your shoelaces are untied", - "Watch out, or you'll trip!", - ), - true, - }, - "error": { - tfdiags.Sourceless( - tfdiags.Error, - "Configuration is missing random_pet", - "Every configuration should have a random_pet.", - ), - false, - }, - } - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - streams, done := terminal.StreamsForTesting(t) - view := NewView(streams) - view.Configure(&arguments.View{NoColor: true}) - v := NewValidate(arguments.ViewJSON, view) - - var diags tfdiags.Diagnostics - - if tc.diag != nil { - diags = diags.Append(tc.diag) - } - - ret := v.Results(diags) - - if tc.wantSuccess && ret != 0 { - t.Errorf("expected 0 return code, got %d", ret) - } else if !tc.wantSuccess && ret != 1 { - t.Errorf("expected 1 return code, got %d", ret) - } - - got := done(t).All() - - // Make sure the result looks like JSON; we comprehensively test - // the structure of this output in the command package tests. - var result map[string]interface{} - - if err := json.Unmarshal([]byte(got), &result); err != nil { - t.Fatal(err) - } - }) - } -} diff --git a/internal/command/webbrowser/mock.go b/internal/command/webbrowser/mock.go deleted file mode 100644 index 1245cbedeb05..000000000000 --- a/internal/command/webbrowser/mock.go +++ /dev/null @@ -1,155 +0,0 @@ -package webbrowser - -import ( - "context" - "fmt" - "log" - "net/http" - "net/url" - "sync" - - "github.com/hashicorp/terraform/internal/httpclient" -) - -// NewMockLauncher creates and returns a mock implementation of Launcher, -// with some special behavior designed for use in unit tests. -// -// See the documentation of MockLauncher itself for more information. -func NewMockLauncher(ctx context.Context) *MockLauncher { - client := httpclient.New() - return &MockLauncher{ - Client: client, - Context: ctx, - } -} - -// MockLauncher is a mock implementation of Launcher that has some special -// behavior designed for use in unit tests. -// -// When OpenURL is called, MockLauncher will make an HTTP request to the given -// URL rather than interacting with a "real" browser. -// -// In normal situations it will then return with no further action, but if -// the response to the given URL is either a standard HTTP redirect response -// or includes the custom HTTP header X-Redirect-To then MockLauncher will -// send a follow-up request to that target URL, and continue in this manner -// until it reaches a URL that is not a redirect. (The X-Redirect-To header -// is there so that a server can potentially offer a normal HTML page to -// an actual browser while also giving a next-hop hint for MockLauncher.) -// -// Since MockLauncher is not a full programmable user-agent implementation -// it can't be used for testing of real-world web applications, but it can -// be used for testing against specialized test servers that are written -// with MockLauncher in mind and know how to drive the request flow through -// whatever steps are required to complete the desired test. -// -// All of the actions taken by MockLauncher happen asynchronously in the -// background, to simulate the concurrency of a separate web browser. -// Test code using MockLauncher should provide a context which is cancelled -// when the test completes, to help avoid leaking MockLaunchers. -type MockLauncher struct { - // Client is the HTTP client that MockLauncher will use to make requests. - // By default (if you use NewMockLauncher) this is a new client created - // via httpclient.New, but callers may override it if they need customized - // behavior for a particular test. - // - // Do not use a client that is shared with any other subsystem, because - // MockLauncher will customize the settings of the given client. - Client *http.Client - - // Context can be cancelled in order to abort an OpenURL call before it - // would naturally complete. - Context context.Context - - // Responses is a log of all of the responses recieved from the launcher's - // requests, in the order requested. - Responses []*http.Response - - // done is a waitgroup used internally to signal when the async work is - // complete, in order to make this mock more convenient to use in tests. - done sync.WaitGroup -} - -var _ Launcher = (*MockLauncher)(nil) - -// OpenURL is the mock implementation of Launcher, which has the special -// behavior described for type MockLauncher. -func (l *MockLauncher) OpenURL(u string) error { - // We run our operation in the background because it's supposed to be - // behaving like a web browser running in a separate process. - log.Printf("[TRACE] webbrowser.MockLauncher: OpenURL(%q) starting in the background", u) - l.done.Add(1) - go func() { - err := l.openURL(u) - if err != nil { - // Can't really do anything with this asynchronously, so we'll - // just log it so that someone debugging will be able to see it. - log.Printf("[ERROR] webbrowser.MockLauncher: OpenURL(%q): %s", u, err) - } else { - log.Printf("[TRACE] webbrowser.MockLauncher: OpenURL(%q) has concluded", u) - } - l.done.Done() - }() - return nil -} - -func (l *MockLauncher) openURL(u string) error { - // We need to disable automatic redirect following so that we can implement - // it ourselves below, and thus be able to see the redirects in our - // responses log. - l.Client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - } - - // We'll keep looping as long as the server keeps giving us new URLs to - // request. - for u != "" { - log.Printf("[DEBUG] webbrowser.MockLauncher: requesting %s", u) - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return fmt.Errorf("failed to construct HTTP request for %s: %s", u, err) - } - resp, err := l.Client.Do(req) - if err != nil { - log.Printf("[DEBUG] webbrowser.MockLauncher: request failed: %s", err) - return fmt.Errorf("error requesting %s: %s", u, err) - } - l.Responses = append(l.Responses, resp) - if resp.StatusCode >= 400 { - log.Printf("[DEBUG] webbrowser.MockLauncher: request failed: %s", resp.Status) - return fmt.Errorf("error requesting %s: %s", u, resp.Status) - } - log.Printf("[DEBUG] webbrowser.MockLauncher: request succeeded: %s", resp.Status) - - u = "" // unless it's a redirect, we'll stop after this - if location := resp.Header.Get("Location"); location != "" { - u = location - } else if redirectTo := resp.Header.Get("X-Redirect-To"); redirectTo != "" { - u = redirectTo - } - - if u != "" { - // HTTP technically doesn't permit relative URLs in Location, but - // browsers tolerate it and so real-world servers do it, and thus - // we'll allow it here too. - oldURL := resp.Request.URL - givenURL, err := url.Parse(u) - if err != nil { - return fmt.Errorf("invalid redirect URL %s: %s", u, err) - } - u = oldURL.ResolveReference(givenURL).String() - log.Printf("[DEBUG] webbrowser.MockLauncher: redirected to %s", u) - } - } - - log.Printf("[DEBUG] webbrowser.MockLauncher: all done") - return nil -} - -// Wait blocks until the MockLauncher has finished its asynchronous work of -// making HTTP requests and following redirects, at which point it will have -// reached a request that didn't redirect anywhere and stopped iterating. -func (l *MockLauncher) Wait() { - log.Printf("[TRACE] webbrowser.MockLauncher: Wait() for current work to complete") - l.done.Wait() -} diff --git a/internal/communicator/communicator.go b/internal/communicator/communicator.go deleted file mode 100644 index 5b754b34f323..000000000000 --- a/internal/communicator/communicator.go +++ /dev/null @@ -1,170 +0,0 @@ -package communicator - -import ( - "context" - "fmt" - "io" - "log" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/terraform/internal/communicator/remote" - "github.com/hashicorp/terraform/internal/communicator/shared" - "github.com/hashicorp/terraform/internal/communicator/ssh" - "github.com/hashicorp/terraform/internal/communicator/winrm" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/zclconf/go-cty/cty" -) - -// Communicator is an interface that must be implemented by all communicators -// used for any of the provisioners -type Communicator interface { - // Connect is used to set up the connection - Connect(provisioners.UIOutput) error - - // Disconnect is used to terminate the connection - Disconnect() error - - // Timeout returns the configured connection timeout - Timeout() time.Duration - - // ScriptPath returns the configured script path - ScriptPath() string - - // Start executes a remote command in a new session - Start(*remote.Cmd) error - - // Upload is used to upload a single file - Upload(string, io.Reader) error - - // UploadScript is used to upload a file as an executable script - UploadScript(string, io.Reader) error - - // UploadDir is used to upload a directory - UploadDir(string, string) error -} - -// New returns a configured Communicator or an error if the connection type is not supported -func New(v cty.Value) (Communicator, error) { - v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) - if err != nil { - return nil, err - } - - typeVal := v.GetAttr("type") - connType := "" - if !typeVal.IsNull() { - connType = typeVal.AsString() - } - - switch connType { - case "ssh", "": // The default connection type is ssh, so if connType is empty use ssh - return ssh.New(v) - case "winrm": - return winrm.New(v) - default: - return nil, fmt.Errorf("connection type '%s' not supported", connType) - } -} - -// maxBackoffDelay is the maximum delay between retry attempts -var maxBackoffDelay = 20 * time.Second -var initialBackoffDelay = time.Second - -// in practice we want to abort the retry asap, but for tests we need to -// synchronize the return. -var retryTestWg *sync.WaitGroup - -// Fatal is an interface that error values can return to halt Retry -type Fatal interface { - FatalError() error -} - -// Retry retries the function f until it returns a nil error, a Fatal error, or -// the context expires. -func Retry(ctx context.Context, f func() error) error { - // container for atomic error value - type errWrap struct { - E error - } - - // Try the function in a goroutine - var errVal atomic.Value - doneCh := make(chan struct{}) - go func() { - if retryTestWg != nil { - defer retryTestWg.Done() - } - - defer close(doneCh) - - delay := time.Duration(0) - for { - // If our context ended, we want to exit right away. - select { - case <-ctx.Done(): - return - case <-time.After(delay): - } - - // Try the function call - err := f() - - // return if we have no error, or a FatalError - done := false - switch e := err.(type) { - case nil: - done = true - case Fatal: - err = e.FatalError() - done = true - } - - errVal.Store(errWrap{err}) - - if done { - return - } - - log.Printf("[WARN] retryable error: %v", err) - - delay *= 2 - - if delay == 0 { - delay = initialBackoffDelay - } - - if delay > maxBackoffDelay { - delay = maxBackoffDelay - } - - log.Printf("[INFO] sleeping for %s", delay) - } - }() - - // Wait for completion - select { - case <-ctx.Done(): - case <-doneCh: - } - - var lastErr error - // Check if we got an error executing - if ev, ok := errVal.Load().(errWrap); ok { - lastErr = ev.E - } - - // Check if we have a context error to check if we're interrupted or timeout - switch ctx.Err() { - case context.Canceled: - return fmt.Errorf("interrupted - last error: %v", lastErr) - case context.DeadlineExceeded: - return fmt.Errorf("timeout - last error: %v", lastErr) - } - - if lastErr != nil { - return lastErr - } - return nil -} diff --git a/internal/communicator/ssh/communicator.go b/internal/communicator/ssh/communicator.go deleted file mode 100644 index 609dc1fbaf0e..000000000000 --- a/internal/communicator/ssh/communicator.go +++ /dev/null @@ -1,896 +0,0 @@ -package ssh - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "math/rand" - "net" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/apparentlymart/go-shquot/shquot" - "github.com/hashicorp/terraform/internal/communicator/remote" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/zclconf/go-cty/cty" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" - - _ "github.com/hashicorp/terraform/internal/logging" -) - -const ( - // DefaultShebang is added at the top of a SSH script file - DefaultShebang = "#!/bin/sh\n" -) - -var ( - // randShared is a global random generator object that is shared. This must be - // shared since it is seeded by the current time and creating multiple can - // result in the same values. By using a shared RNG we assure different numbers - // per call. - randLock sync.Mutex - randShared *rand.Rand - - // enable ssh keeplive probes by default - keepAliveInterval = 2 * time.Second - - // max time to wait for for a KeepAlive response before considering the - // connection to be dead. - maxKeepAliveDelay = 120 * time.Second -) - -// Communicator represents the SSH communicator -type Communicator struct { - connInfo *connectionInfo - client *ssh.Client - config *sshConfig - conn net.Conn - cancelKeepAlive context.CancelFunc - - lock sync.Mutex -} - -type sshConfig struct { - // The configuration of the Go SSH connection - config *ssh.ClientConfig - - // connection returns a new connection. The current connection - // in use will be closed as part of the Close method, or in the - // case an error occurs. - connection func() (net.Conn, error) - - // noPty, if true, will not request a pty from the remote end. - noPty bool - - // sshAgent is a struct surrounding the agent.Agent client and the net.Conn - // to the SSH Agent. It is nil if no SSH agent is configured - sshAgent *sshAgent -} - -type fatalError struct { - error -} - -func (e fatalError) FatalError() error { - return e.error -} - -// New creates a new communicator implementation over SSH. -func New(v cty.Value) (*Communicator, error) { - connInfo, err := parseConnectionInfo(v) - if err != nil { - return nil, err - } - - config, err := prepareSSHConfig(connInfo) - if err != nil { - return nil, err - } - - // Set up the random number generator once. The seed value is the - // time multiplied by the PID. This can overflow the int64 but that - // is okay. We multiply by the PID in case we have multiple processes - // grabbing this at the same time. This is possible with Terraform and - // if we communicate to the same host at the same instance, we could - // overwrite the same files. Multiplying by the PID prevents this. - randLock.Lock() - defer randLock.Unlock() - if randShared == nil { - randShared = rand.New(rand.NewSource( - time.Now().UnixNano() * int64(os.Getpid()))) - } - - comm := &Communicator{ - connInfo: connInfo, - config: config, - } - - return comm, nil -} - -// Connect implementation of communicator.Communicator interface -func (c *Communicator) Connect(o provisioners.UIOutput) (err error) { - // Grab a lock so we can modify our internal attributes - c.lock.Lock() - defer c.lock.Unlock() - - if c.conn != nil { - c.conn.Close() - } - - // Set the conn and client to nil since we'll recreate it - c.conn = nil - c.client = nil - - if o != nil { - o.Output(fmt.Sprintf( - "Connecting to remote host via SSH...\n"+ - " Host: %s\n"+ - " User: %s\n"+ - " Password: %t\n"+ - " Private key: %t\n"+ - " Certificate: %t\n"+ - " SSH Agent: %t\n"+ - " Checking Host Key: %t\n"+ - " Target Platform: %s\n", - c.connInfo.Host, c.connInfo.User, - c.connInfo.Password != "", - c.connInfo.PrivateKey != "", - c.connInfo.Certificate != "", - c.connInfo.Agent, - c.connInfo.HostKey != "", - c.connInfo.TargetPlatform, - )) - - if c.connInfo.BastionHost != "" { - o.Output(fmt.Sprintf( - "Using configured bastion host...\n"+ - " Host: %s\n"+ - " User: %s\n"+ - " Password: %t\n"+ - " Private key: %t\n"+ - " Certificate: %t\n"+ - " SSH Agent: %t\n"+ - " Checking Host Key: %t", - c.connInfo.BastionHost, c.connInfo.BastionUser, - c.connInfo.BastionPassword != "", - c.connInfo.BastionPrivateKey != "", - c.connInfo.BastionCertificate != "", - c.connInfo.Agent, - c.connInfo.BastionHostKey != "", - )) - } - - if c.connInfo.ProxyHost != "" { - o.Output(fmt.Sprintf( - "Using configured proxy host...\n"+ - " ProxyHost: %s\n"+ - " ProxyPort: %d\n"+ - " ProxyUserName: %s\n"+ - " ProxyUserPassword: %t", - c.connInfo.ProxyHost, - c.connInfo.ProxyPort, - c.connInfo.ProxyUserName, - c.connInfo.ProxyUserPassword != "", - )) - } - } - - hostAndPort := fmt.Sprintf("%s:%d", c.connInfo.Host, c.connInfo.Port) - log.Printf("[DEBUG] Connecting to %s for SSH", hostAndPort) - c.conn, err = c.config.connection() - if err != nil { - // Explicitly set this to the REAL nil. Connection() can return - // a nil implementation of net.Conn which will make the - // "if c.conn == nil" check fail above. Read here for more information - // on this psychotic language feature: - // - // http://golang.org/doc/faq#nil_error - c.conn = nil - - log.Printf("[ERROR] connection error: %s", err) - return err - } - - log.Printf("[DEBUG] Connection established. Handshaking for user %v", c.connInfo.User) - sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, hostAndPort, c.config.config) - if err != nil { - err = fmt.Errorf("SSH authentication failed (%s@%s): %w", c.connInfo.User, hostAndPort, err) - - // While in theory this should be a fatal error, some hosts may start - // the ssh service before it is properly configured, or before user - // authentication data is available. - // Log the error, and allow the provisioner to retry. - log.Printf("[WARN] %s", err) - return err - } - - c.client = ssh.NewClient(sshConn, sshChan, req) - - if c.config.sshAgent != nil { - log.Printf("[DEBUG] Telling SSH config to forward to agent") - if err := c.config.sshAgent.ForwardToAgent(c.client); err != nil { - return fatalError{err} - } - - log.Printf("[DEBUG] Setting up a session to request agent forwarding") - session, err := c.client.NewSession() - if err != nil { - return err - } - defer session.Close() - - err = agent.RequestAgentForwarding(session) - - if err == nil { - log.Printf("[INFO] agent forwarding enabled") - } else { - log.Printf("[WARN] error forwarding agent: %s", err) - } - } - - if err != nil { - return err - } - - if o != nil { - o.Output("Connected!") - } - - ctx, cancelKeepAlive := context.WithCancel(context.TODO()) - c.cancelKeepAlive = cancelKeepAlive - - // Start a keepalive goroutine to help maintain the connection for - // long-running commands. - log.Printf("[DEBUG] starting ssh KeepAlives") - - // We want a local copy of the ssh client pointer, so that a reconnect - // doesn't race with the running keep-alive loop. - sshClient := c.client - go func() { - defer cancelKeepAlive() - // Along with the KeepAlives generating packets to keep the tcp - // connection open, we will use the replies to verify liveness of the - // connection. This will prevent dead connections from blocking the - // provisioner indefinitely. - respCh := make(chan error, 1) - - go func() { - t := time.NewTicker(keepAliveInterval) - defer t.Stop() - for { - select { - case <-t.C: - _, _, err := sshClient.SendRequest("keepalive@terraform.io", true, nil) - respCh <- err - case <-ctx.Done(): - return - } - } - }() - - after := time.NewTimer(maxKeepAliveDelay) - defer after.Stop() - - for { - select { - case err := <-respCh: - if err != nil { - log.Printf("[ERROR] ssh keepalive: %s", err) - sshConn.Close() - return - } - case <-after.C: - // abort after too many missed keepalives - log.Println("[ERROR] no reply from ssh server") - sshConn.Close() - return - case <-ctx.Done(): - return - } - if !after.Stop() { - <-after.C - } - after.Reset(maxKeepAliveDelay) - } - }() - - return nil -} - -// Disconnect implementation of communicator.Communicator interface -func (c *Communicator) Disconnect() error { - c.lock.Lock() - defer c.lock.Unlock() - - if c.cancelKeepAlive != nil { - c.cancelKeepAlive() - } - - if c.config.sshAgent != nil { - if err := c.config.sshAgent.Close(); err != nil { - return err - } - } - - if c.conn != nil { - conn := c.conn - c.conn = nil - return conn.Close() - } - - return nil -} - -// Timeout implementation of communicator.Communicator interface -func (c *Communicator) Timeout() time.Duration { - return c.connInfo.TimeoutVal -} - -// ScriptPath implementation of communicator.Communicator interface -func (c *Communicator) ScriptPath() string { - randLock.Lock() - defer randLock.Unlock() - - return strings.Replace( - c.connInfo.ScriptPath, "%RAND%", - strconv.FormatInt(int64(randShared.Int31()), 10), -1) -} - -// Start implementation of communicator.Communicator interface -func (c *Communicator) Start(cmd *remote.Cmd) error { - cmd.Init() - - session, err := c.newSession() - if err != nil { - return err - } - - // Set up our session - session.Stdin = cmd.Stdin - session.Stdout = cmd.Stdout - session.Stderr = cmd.Stderr - - if !c.config.noPty && c.connInfo.TargetPlatform != TargetPlatformWindows { - // Request a PTY - termModes := ssh.TerminalModes{ - ssh.ECHO: 0, // do not echo - ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud - ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud - } - - if err := session.RequestPty("xterm", 80, 40, termModes); err != nil { - return err - } - } - - log.Printf("[DEBUG] starting remote command: %s", cmd.Command) - err = session.Start(strings.TrimSpace(cmd.Command) + "\n") - if err != nil { - return err - } - - // Start a goroutine to wait for the session to end and set the - // exit boolean and status. - go func() { - defer session.Close() - - err := session.Wait() - exitStatus := 0 - if err != nil { - exitErr, ok := err.(*ssh.ExitError) - if ok { - exitStatus = exitErr.ExitStatus() - } - } - - cmd.SetExitStatus(exitStatus, err) - log.Printf("[DEBUG] remote command exited with '%d': %s", exitStatus, cmd.Command) - }() - - return nil -} - -// Upload implementation of communicator.Communicator interface -func (c *Communicator) Upload(path string, input io.Reader) error { - // The target directory and file for talking the SCP protocol - targetDir := filepath.Dir(path) - targetFile := filepath.Base(path) - - // On windows, filepath.Dir uses backslash separators (ie. "\tmp"). - // This does not work when the target host is unix. Switch to forward slash - // which works for unix and windows - targetDir = filepath.ToSlash(targetDir) - - // Skip copying if we can get the file size directly from common io.Readers - size := int64(0) - - switch src := input.(type) { - case *os.File: - fi, err := src.Stat() - if err == nil { - size = fi.Size() - } - case *bytes.Buffer: - size = int64(src.Len()) - case *bytes.Reader: - size = int64(src.Len()) - case *strings.Reader: - size = int64(src.Len()) - } - - scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { - return scpUploadFile(targetFile, input, w, stdoutR, size) - } - - cmd, err := quoteShell([]string{"scp", "-vt", targetDir}, c.connInfo.TargetPlatform) - if err != nil { - return err - } - return c.scpSession(cmd, scpFunc) -} - -// UploadScript implementation of communicator.Communicator interface -func (c *Communicator) UploadScript(path string, input io.Reader) error { - reader := bufio.NewReader(input) - prefix, err := reader.Peek(2) - if err != nil { - return fmt.Errorf("Error reading script: %s", err) - } - var script bytes.Buffer - - if string(prefix) != "#!" && c.connInfo.TargetPlatform != TargetPlatformWindows { - script.WriteString(DefaultShebang) - } - script.ReadFrom(reader) - - if err := c.Upload(path, &script); err != nil { - return err - } - if c.connInfo.TargetPlatform != TargetPlatformWindows { - var stdout, stderr bytes.Buffer - cmd := &remote.Cmd{ - Command: fmt.Sprintf("chmod 0777 %s", path), - Stdout: &stdout, - Stderr: &stderr, - } - if err := c.Start(cmd); err != nil { - return fmt.Errorf( - "Error chmodding script file to 0777 in remote "+ - "machine: %s", err) - } - - if err := cmd.Wait(); err != nil { - return fmt.Errorf( - "Error chmodding script file to 0777 in remote "+ - "machine %v: %s %s", err, stdout.String(), stderr.String()) - } - } - return nil -} - -// UploadDir implementation of communicator.Communicator interface -func (c *Communicator) UploadDir(dst string, src string) error { - log.Printf("[DEBUG] Uploading dir '%s' to '%s'", src, dst) - scpFunc := func(w io.Writer, r *bufio.Reader) error { - uploadEntries := func() error { - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - entries, err := f.Readdir(-1) - if err != nil { - return err - } - - return scpUploadDir(src, entries, w, r) - } - - if src[len(src)-1] != '/' { - log.Printf("[DEBUG] No trailing slash, creating the source directory name") - return scpUploadDirProtocol(filepath.Base(src), w, r, uploadEntries) - } - // Trailing slash, so only upload the contents - return uploadEntries() - } - - cmd, err := quoteShell([]string{"scp", "-rvt", dst}, c.connInfo.TargetPlatform) - if err != nil { - return err - } - return c.scpSession(cmd, scpFunc) -} - -func (c *Communicator) newSession() (session *ssh.Session, err error) { - log.Println("[DEBUG] opening new ssh session") - if c.client == nil { - err = errors.New("ssh client is not connected") - } else { - session, err = c.client.NewSession() - } - - if err != nil { - log.Printf("[WARN] ssh session open error: '%s', attempting reconnect", err) - if err := c.Connect(nil); err != nil { - return nil, err - } - - return c.client.NewSession() - } - - return session, nil -} - -func (c *Communicator) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error { - session, err := c.newSession() - if err != nil { - return err - } - defer session.Close() - - // Get a pipe to stdin so that we can send data down - stdinW, err := session.StdinPipe() - if err != nil { - return err - } - - // We only want to close once, so we nil w after we close it, - // and only close in the defer if it hasn't been closed already. - defer func() { - if stdinW != nil { - stdinW.Close() - } - }() - - // Get a pipe to stdout so that we can get responses back - stdoutPipe, err := session.StdoutPipe() - if err != nil { - return err - } - stdoutR := bufio.NewReader(stdoutPipe) - - // Set stderr to a bytes buffer - stderr := new(bytes.Buffer) - session.Stderr = stderr - - // Start the sink mode on the other side - // TODO(mitchellh): There are probably issues with shell escaping the path - log.Println("[DEBUG] Starting remote scp process: ", scpCommand) - if err := session.Start(scpCommand); err != nil { - return err - } - - // Call our callback that executes in the context of SCP. We ignore - // EOF errors if they occur because it usually means that SCP prematurely - // ended on the other side. - log.Println("[DEBUG] Started SCP session, beginning transfers...") - if err := f(stdinW, stdoutR); err != nil && err != io.EOF { - return err - } - - // Close the stdin, which sends an EOF, and then set w to nil so that - // our defer func doesn't close it again since that is unsafe with - // the Go SSH package. - log.Println("[DEBUG] SCP session complete, closing stdin pipe.") - stdinW.Close() - stdinW = nil - - // Wait for the SCP connection to close, meaning it has consumed all - // our data and has completed. Or has errored. - log.Println("[DEBUG] Waiting for SSH session to complete.") - err = session.Wait() - - // log any stderr before exiting on an error - scpErr := stderr.String() - if len(scpErr) > 0 { - log.Printf("[ERROR] scp stderr: %q", stderr) - } - - if err != nil { - if exitErr, ok := err.(*ssh.ExitError); ok { - // Otherwise, we have an ExitErorr, meaning we can just read - // the exit status - log.Printf("[ERROR] %s", exitErr) - - // If we exited with status 127, it means SCP isn't available. - // Return a more descriptive error for that. - if exitErr.ExitStatus() == 127 { - return errors.New( - "SCP failed to start. This usually means that SCP is not\n" + - "properly installed on the remote system.") - } - } - - return err - } - - return nil -} - -// checkSCPStatus checks that a prior command sent to SCP completed -// successfully. If it did not complete successfully, an error will -// be returned. -func checkSCPStatus(r *bufio.Reader) error { - code, err := r.ReadByte() - if err != nil { - return err - } - - if code != 0 { - // Treat any non-zero (really 1 and 2) as fatal errors - message, _, err := r.ReadLine() - if err != nil { - return fmt.Errorf("Error reading error message: %s", err) - } - - return errors.New(string(message)) - } - - return nil -} - -var testUploadSizeHook func(size int64) - -func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, size int64) error { - if testUploadSizeHook != nil { - testUploadSizeHook(size) - } - - if size == 0 { - // Create a temporary file where we can copy the contents of the src - // so that we can determine the length, since SCP is length-prefixed. - tf, err := ioutil.TempFile("", "terraform-upload") - if err != nil { - return fmt.Errorf("Error creating temporary file for upload: %s", err) - } - defer os.Remove(tf.Name()) - defer tf.Close() - - log.Println("[DEBUG] Copying input data into temporary file so we can read the length") - if _, err := io.Copy(tf, src); err != nil { - return err - } - - // Sync the file so that the contents are definitely on disk, then - // read the length of it. - if err := tf.Sync(); err != nil { - return fmt.Errorf("Error creating temporary file for upload: %s", err) - } - - // Seek the file to the beginning so we can re-read all of it - if _, err := tf.Seek(0, 0); err != nil { - return fmt.Errorf("Error creating temporary file for upload: %s", err) - } - - fi, err := tf.Stat() - if err != nil { - return fmt.Errorf("Error creating temporary file for upload: %s", err) - } - - src = tf - size = fi.Size() - } - - // Start the protocol - log.Println("[DEBUG] Beginning file upload...") - fmt.Fprintln(w, "C0644", size, dst) - if err := checkSCPStatus(r); err != nil { - return err - } - - if _, err := io.Copy(w, src); err != nil { - return err - } - - fmt.Fprint(w, "\x00") - if err := checkSCPStatus(r); err != nil { - return err - } - - return nil -} - -func scpUploadDirProtocol(name string, w io.Writer, r *bufio.Reader, f func() error) error { - log.Printf("[DEBUG] SCP: starting directory upload: %s", name) - fmt.Fprintln(w, "D0755 0", name) - err := checkSCPStatus(r) - if err != nil { - return err - } - - if err := f(); err != nil { - return err - } - - fmt.Fprintln(w, "E") - if err != nil { - return err - } - - return nil -} - -func scpUploadDir(root string, fs []os.FileInfo, w io.Writer, r *bufio.Reader) error { - for _, fi := range fs { - realPath := filepath.Join(root, fi.Name()) - - // Track if this is actually a symlink to a directory. If it is - // a symlink to a file we don't do any special behavior because uploading - // a file just works. If it is a directory, we need to know so we - // treat it as such. - isSymlinkToDir := false - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - symPath, err := filepath.EvalSymlinks(realPath) - if err != nil { - return err - } - - symFi, err := os.Lstat(symPath) - if err != nil { - return err - } - - isSymlinkToDir = symFi.IsDir() - } - - if !fi.IsDir() && !isSymlinkToDir { - // It is a regular file (or symlink to a file), just upload it - f, err := os.Open(realPath) - if err != nil { - return err - } - - err = func() error { - defer f.Close() - return scpUploadFile(fi.Name(), f, w, r, fi.Size()) - }() - - if err != nil { - return err - } - - continue - } - - // It is a directory, recursively upload - err := scpUploadDirProtocol(fi.Name(), w, r, func() error { - f, err := os.Open(realPath) - if err != nil { - return err - } - defer f.Close() - - entries, err := f.Readdir(-1) - if err != nil { - return err - } - - return scpUploadDir(realPath, entries, w, r) - }) - if err != nil { - return err - } - } - - return nil -} - -// ConnectFunc is a convenience method for returning a function -// that just uses net.Dial to communicate with the remote end that -// is suitable for use with the SSH communicator configuration. -func ConnectFunc(network, addr string, p *proxyInfo) func() (net.Conn, error) { - return func() (net.Conn, error) { - var c net.Conn - var err error - - // Wrap connection to host if proxy server is configured - if p != nil { - RegisterDialerType() - c, err = newHttpProxyConn(p, addr) - } else { - c, err = net.DialTimeout(network, addr, 15*time.Second) - } - - if err != nil { - return nil, err - } - - if tcpConn, ok := c.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - } - - return c, nil - } -} - -// BastionConnectFunc is a convenience method for returning a function -// that connects to a host over a bastion connection. -func BastionConnectFunc( - bProto string, - bAddr string, - bConf *ssh.ClientConfig, - proto string, - addr string, - p *proxyInfo) func() (net.Conn, error) { - return func() (net.Conn, error) { - log.Printf("[DEBUG] Connecting to bastion: %s", bAddr) - var bastion *ssh.Client - var err error - - // Wrap connection to bastion server if proxy server is configured - if p != nil { - var pConn net.Conn - var bConn ssh.Conn - var bChans <-chan ssh.NewChannel - var bReq <-chan *ssh.Request - - RegisterDialerType() - pConn, err = newHttpProxyConn(p, bAddr) - - if err != nil { - return nil, fmt.Errorf("Error connecting to proxy: %s", err) - } - - bConn, bChans, bReq, err = ssh.NewClientConn(pConn, bAddr, bConf) - - if err != nil { - return nil, fmt.Errorf("Error creating new client connection via proxy: %s", err) - } - - bastion = ssh.NewClient(bConn, bChans, bReq) - } else { - bastion, err = ssh.Dial(bProto, bAddr, bConf) - } - - if err != nil { - return nil, fmt.Errorf("Error connecting to bastion: %s", err) - } - - log.Printf("[DEBUG] Connecting via bastion (%s) to host: %s", bAddr, addr) - conn, err := bastion.Dial(proto, addr) - if err != nil { - bastion.Close() - return nil, err - } - - // Wrap it up so we close both things properly - return &bastionConn{ - Conn: conn, - Bastion: bastion, - }, nil - } -} - -type bastionConn struct { - net.Conn - Bastion *ssh.Client -} - -func (c *bastionConn) Close() error { - c.Conn.Close() - return c.Bastion.Close() -} - -func quoteShell(args []string, targetPlatform string) (string, error) { - if targetPlatform == TargetPlatformUnix { - return shquot.POSIXShell(args), nil - } - if targetPlatform == TargetPlatformWindows { - return shquot.WindowsArgv(args), nil - } - - return "", fmt.Errorf("Cannot quote shell command, target platform unknown: %s", targetPlatform) - -} diff --git a/internal/communicator/ssh/communicator_test.go b/internal/communicator/ssh/communicator_test.go deleted file mode 100644 index b829e5b9afb3..000000000000 --- a/internal/communicator/ssh/communicator_test.go +++ /dev/null @@ -1,759 +0,0 @@ -//go:build !race -// +build !race - -package ssh - -import ( - "bufio" - "bytes" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/communicator/remote" - "github.com/zclconf/go-cty/cty" - "golang.org/x/crypto/ssh" -) - -// private key for mock server -const testServerPrivateKey = `-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA19lGVsTqIT5iiNYRgnoY1CwkbETW5cq+Rzk5v/kTlf31XpSU -70HVWkbTERECjaYdXM2gGcbb+sxpq6GtXf1M3kVomycqhxwhPv4Cr6Xp4WT/jkFx -9z+FFzpeodGJWjOH6L2H5uX1Cvr9EDdQp9t9/J32/qBFntY8GwoUI/y/1MSTmMiF -tupdMODN064vd3gyMKTwrlQ8tZM6aYuyOPsutLlUY7M5x5FwMDYvnPDSeyT/Iw0z -s3B+NCyqeeMd2T7YzQFnRATj0M7rM5LoSs7DVqVriOEABssFyLj31PboaoLhOKgc -qoM9khkNzr7FHVvi+DhYM2jD0DwvqZLN6NmnLwIDAQABAoIBAQCGVj+kuSFOV1lT -+IclQYA6bM6uY5mroqcSBNegVxCNhWU03BxlW//BE9tA/+kq53vWylMeN9mpGZea -riEMIh25KFGWXqXlOOioH8bkMsqA8S7sBmc7jljyv+0toQ9vCCtJ+sueNPhxQQxH -D2YvUjfzBQ04I9+wn30BByDJ1QA/FoPsunxIOUCcRBE/7jxuLYcpR+JvEF68yYIh -atXRld4W4in7T65YDR8jK1Uj9XAcNeDYNpT/M6oFLx1aPIlkG86aCWRO19S1jLPT -b1ZAKHHxPMCVkSYW0RqvIgLXQOR62D0Zne6/2wtzJkk5UCjkSQ2z7ZzJpMkWgDgN -ifCULFPBAoGBAPoMZ5q1w+zB+knXUD33n1J+niN6TZHJulpf2w5zsW+m2K6Zn62M -MXndXlVAHtk6p02q9kxHdgov34Uo8VpuNjbS1+abGFTI8NZgFo+bsDxJdItemwC4 -KJ7L1iz39hRN/ZylMRLz5uTYRGddCkeIHhiG2h7zohH/MaYzUacXEEy3AoGBANz8 -e/msleB+iXC0cXKwds26N4hyMdAFE5qAqJXvV3S2W8JZnmU+sS7vPAWMYPlERPk1 -D8Q2eXqdPIkAWBhrx4RxD7rNc5qFNcQWEhCIxC9fccluH1y5g2M+4jpMX2CT8Uv+ -3z+NoJ5uDTXZTnLCfoZzgZ4nCZVZ+6iU5U1+YXFJAoGBANLPpIV920n/nJmmquMj -orI1R/QXR9Cy56cMC65agezlGOfTYxk5Cfl5Ve+/2IJCfgzwJyjWUsFx7RviEeGw -64o7JoUom1HX+5xxdHPsyZ96OoTJ5RqtKKoApnhRMamau0fWydH1yeOEJd+TRHhc -XStGfhz8QNa1dVFvENczja1vAoGABGWhsd4VPVpHMc7lUvrf4kgKQtTC2PjA4xoc -QJ96hf/642sVE76jl+N6tkGMzGjnVm4P2j+bOy1VvwQavKGoXqJBRd5Apppv727g -/SM7hBXKFc/zH80xKBBgP/i1DR7kdjakCoeu4ngeGywvu2jTS6mQsqzkK+yWbUxJ -I7mYBsECgYB/KNXlTEpXtz/kwWCHFSYA8U74l7zZbVD8ul0e56JDK+lLcJ0tJffk -gqnBycHj6AhEycjda75cs+0zybZvN4x65KZHOGW/O/7OAWEcZP5TPb3zf9ned3Hl -NsZoFj52ponUM6+99A2CmezFCN16c4mbA//luWF+k3VVqR6BpkrhKw== ------END RSA PRIVATE KEY-----` - -// this cert was signed by the key from testCAPublicKey -const testServerHostCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgvQ3Bs1ex7277b9q6I0fNaWsVEC16f+LcT8RLPSVMEVMAAAADAQABAAABAQDX2UZWxOohPmKI1hGCehjULCRsRNblyr5HOTm/+ROV/fVelJTvQdVaRtMREQKNph1czaAZxtv6zGmroa1d/UzeRWibJyqHHCE+/gKvpenhZP+OQXH3P4UXOl6h0YlaM4fovYfm5fUK+v0QN1Cn2338nfb+oEWe1jwbChQj/L/UxJOYyIW26l0w4M3Tri93eDIwpPCuVDy1kzppi7I4+y60uVRjsznHkXAwNi+c8NJ7JP8jDTOzcH40LKp54x3ZPtjNAWdEBOPQzuszkuhKzsNWpWuI4QAGywXIuPfU9uhqguE4qByqgz2SGQ3OvsUdW+L4OFgzaMPQPC+pks3o2acvAAAAAAAAAAAAAAACAAAAB2NhLXRlc3QAAAANAAAACTEyNy4wLjAuMQAAAABag0jkAAAAAHDcHtAAAAAAAAAAAAAAAAAAAAEXAAAAB3NzaC1yc2EAAAADAQABAAABAQCrozyZIhdEvalCn+eSzHH94cO9ykiywA13ntWI7mJcHBwYTeCYWG8E9zGXyp2iDOjCGudM0Tdt8o0OofKChk9Z/qiUN0G8y1kmaXBlBM3qA5R9NPpvMYMNkYLfX6ivtZCnqrsbzaoqN2Oc/7H2StHzJWh/XCGu9otQZA6vdv1oSmAsZOjw/xIGaGQqDUaLq21J280PP1qSbdJHf76iSHE+TWe3YpqV946JWM5tCh0DykZ10VznvxYpUjzhr07IN3tVKxOXbPnnU7lX6IaLIWgfzLqwSyheeux05c3JLF9iF4sFu8ou4hwQz1iuUTU1jxgwZP0w/bkXgFFs0949lW81AAABDwAAAAdzc2gtcnNhAAABAEyoiVkZ5z79nh3WSU5mU2U7e2BItnnEqsJIm9EN+35uG0yORSXmQoaa9mtli7G3r79tyqEJd/C95EdNvU/9TjaoDcbH8OHP+Ue9XSfUzBuQ6bGSXe6mlZlO7QJ1cIyWphFP3MkrweDSiJ+SpeXzLzZkiJ7zKv5czhBEyG/MujFgvikotL+eUNG42y2cgsesXSjENSBS3l11q55a+RM2QKt3W32im8CsSxrH6Mz6p4JXQNgsVvZRknLxNlWXULFB2HLTunPKzJNMTf6xZf66oivSBAXVIdNKhlVpAQ3dT/dW5K6J4aQF/hjWByyLprFwZ16cPDqvtalnTCpbRYelNbw=` - -const testCAPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrozyZIhdEvalCn+eSzHH94cO9ykiywA13ntWI7mJcHBwYTeCYWG8E9zGXyp2iDOjCGudM0Tdt8o0OofKChk9Z/qiUN0G8y1kmaXBlBM3qA5R9NPpvMYMNkYLfX6ivtZCnqrsbzaoqN2Oc/7H2StHzJWh/XCGu9otQZA6vdv1oSmAsZOjw/xIGaGQqDUaLq21J280PP1qSbdJHf76iSHE+TWe3YpqV946JWM5tCh0DykZ10VznvxYpUjzhr07IN3tVKxOXbPnnU7lX6IaLIWgfzLqwSyheeux05c3JLF9iF4sFu8ou4hwQz1iuUTU1jxgwZP0w/bkXgFFs0949lW81` - -func newMockLineServer(t *testing.T, signer ssh.Signer, pubKey string) string { - serverConfig := &ssh.ServerConfig{ - PasswordCallback: acceptUserPass("user", "pass"), - PublicKeyCallback: acceptPublicKey(pubKey), - } - - var err error - if signer == nil { - signer, err = ssh.ParsePrivateKey([]byte(testServerPrivateKey)) - if err != nil { - t.Fatalf("unable to parse private key: %s", err) - } - } - serverConfig.AddHostKey(signer) - - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to listen for connection: %s", err) - } - - go func() { - defer l.Close() - c, err := l.Accept() - if err != nil { - t.Errorf("Unable to accept incoming connection: %s", err) - } - defer c.Close() - conn, chans, _, err := ssh.NewServerConn(c, serverConfig) - if err != nil { - t.Logf("Handshaking error: %v", err) - } - t.Log("Accepted SSH connection") - - for newChannel := range chans { - channel, requests, err := newChannel.Accept() - if err != nil { - t.Errorf("Unable to accept channel.") - } - t.Log("Accepted channel") - - go func(in <-chan *ssh.Request) { - defer channel.Close() - for req := range in { - // since this channel's requests are serviced serially, - // this will block keepalive probes, and can simulate a - // hung connection. - if bytes.Contains(req.Payload, []byte("sleep")) { - time.Sleep(time.Second) - } - - if req.WantReply { - req.Reply(true, nil) - } - } - }(requests) - } - conn.Close() - }() - - return l.Addr().String() -} - -func TestNew_Invalid(t *testing.T) { - address := newMockLineServer(t, nil, testClientPublicKey) - parts := strings.Split(address, ":") - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "user": cty.StringVal("user"), - "password": cty.StringVal("i-am-invalid"), - "host": cty.StringVal(parts[0]), - "port": cty.StringVal(parts[1]), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - err = c.Connect(nil) - if err == nil { - t.Fatal("should have had an error connecting") - } -} - -func TestNew_InvalidHost(t *testing.T) { - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "user": cty.StringVal("user"), - "password": cty.StringVal("i-am-invalid"), - "port": cty.StringVal("22"), - "timeout": cty.StringVal("30s"), - }) - - _, err := New(v) - if err == nil { - t.Fatal("should have had an error creating communicator") - } -} - -func TestStart(t *testing.T) { - address := newMockLineServer(t, nil, testClientPublicKey) - parts := strings.Split(address, ":") - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "user": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "host": cty.StringVal(parts[0]), - "port": cty.StringVal(parts[1]), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - var cmd remote.Cmd - stdout := new(bytes.Buffer) - cmd.Command = "echo foo" - cmd.Stdout = stdout - - err = c.Start(&cmd) - if err != nil { - t.Fatalf("error executing remote command: %s", err) - } -} - -// TestKeepAlives verifies that the keepalive messages don't interfere with -// normal operation of the client. -func TestKeepAlives(t *testing.T) { - ivl := keepAliveInterval - keepAliveInterval = 250 * time.Millisecond - defer func() { keepAliveInterval = ivl }() - - address := newMockLineServer(t, nil, testClientPublicKey) - parts := strings.Split(address, ":") - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "user": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "host": cty.StringVal(parts[0]), - "port": cty.StringVal(parts[1]), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - if err := c.Connect(nil); err != nil { - t.Fatal(err) - } - - var cmd remote.Cmd - stdout := new(bytes.Buffer) - cmd.Command = "sleep" - cmd.Stdout = stdout - - // wait a bit before executing the command, so that at least 1 keepalive is sent - time.Sleep(500 * time.Millisecond) - - err = c.Start(&cmd) - if err != nil { - t.Fatalf("error executing remote command: %s", err) - } -} - -// TestDeadConnection verifies that failed keepalive messages will eventually -// kill the connection. -func TestFailedKeepAlives(t *testing.T) { - ivl := keepAliveInterval - del := maxKeepAliveDelay - maxKeepAliveDelay = 500 * time.Millisecond - keepAliveInterval = 250 * time.Millisecond - defer func() { - keepAliveInterval = ivl - maxKeepAliveDelay = del - }() - - address := newMockLineServer(t, nil, testClientPublicKey) - parts := strings.Split(address, ":") - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "user": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "host": cty.StringVal(parts[0]), - "port": cty.StringVal(parts[1]), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - if err := c.Connect(nil); err != nil { - t.Fatal(err) - } - var cmd remote.Cmd - stdout := new(bytes.Buffer) - cmd.Command = "sleep" - cmd.Stdout = stdout - - err = c.Start(&cmd) - if err == nil { - t.Fatal("expected connection error") - } -} - -func TestLostConnection(t *testing.T) { - address := newMockLineServer(t, nil, testClientPublicKey) - parts := strings.Split(address, ":") - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "user": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "host": cty.StringVal(parts[0]), - "port": cty.StringVal(parts[1]), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - var cmd remote.Cmd - stdout := new(bytes.Buffer) - cmd.Command = "echo foo" - cmd.Stdout = stdout - - err = c.Start(&cmd) - if err != nil { - t.Fatalf("error executing remote command: %s", err) - } - - // The test server can't execute anything, so Wait will block, unless - // there's an error. Disconnect the communicator transport, to cause the - // command to fail. - go func() { - time.Sleep(100 * time.Millisecond) - c.Disconnect() - }() - - err = cmd.Wait() - if err == nil { - t.Fatal("expected communicator error") - } -} - -func TestHostKey(t *testing.T) { - // get the server's public key - signer, err := ssh.ParsePrivateKey([]byte(testServerPrivateKey)) - if err != nil { - t.Fatalf("unable to parse private key: %v", err) - } - pubKey := fmt.Sprintf("ssh-rsa %s", base64.StdEncoding.EncodeToString(signer.PublicKey().Marshal())) - - address := newMockLineServer(t, nil, testClientPublicKey) - host, p, _ := net.SplitHostPort(address) - port, _ := strconv.Atoi(p) - - connInfo := &connectionInfo{ - User: "user", - Password: "pass", - Host: host, - HostKey: pubKey, - Port: uint16(port), - Timeout: "30s", - } - - cfg, err := prepareSSHConfig(connInfo) - if err != nil { - t.Fatal(err) - } - - c := &Communicator{ - connInfo: connInfo, - config: cfg, - } - - var cmd remote.Cmd - stdout := new(bytes.Buffer) - cmd.Command = "echo foo" - cmd.Stdout = stdout - - if err := c.Start(&cmd); err != nil { - t.Fatal(err) - } - if err := c.Disconnect(); err != nil { - t.Fatal(err) - } - - // now check with the wrong HostKey - address = newMockLineServer(t, nil, testClientPublicKey) - _, p, _ = net.SplitHostPort(address) - port, _ = strconv.Atoi(p) - - connInfo.HostKey = testClientPublicKey - connInfo.Port = uint16(port) - - cfg, err = prepareSSHConfig(connInfo) - if err != nil { - t.Fatal(err) - } - - c = &Communicator{ - connInfo: connInfo, - config: cfg, - } - - err = c.Start(&cmd) - if err == nil || !strings.Contains(err.Error(), "mismatch") { - t.Fatalf("expected host key mismatch, got error:%v", err) - } -} - -func TestHostCert(t *testing.T) { - pk, _, _, _, err := ssh.ParseAuthorizedKey([]byte(testServerHostCert)) - if err != nil { - t.Fatal(err) - } - - signer, err := ssh.ParsePrivateKey([]byte(testServerPrivateKey)) - if err != nil { - t.Fatal(err) - } - - signer, err = ssh.NewCertSigner(pk.(*ssh.Certificate), signer) - if err != nil { - t.Fatal(err) - } - - address := newMockLineServer(t, signer, testClientPublicKey) - host, p, _ := net.SplitHostPort(address) - port, _ := strconv.Atoi(p) - - connInfo := &connectionInfo{ - User: "user", - Password: "pass", - Host: host, - HostKey: testCAPublicKey, - Port: uint16(port), - Timeout: "30s", - } - - cfg, err := prepareSSHConfig(connInfo) - if err != nil { - t.Fatal(err) - } - - c := &Communicator{ - connInfo: connInfo, - config: cfg, - } - - var cmd remote.Cmd - stdout := new(bytes.Buffer) - cmd.Command = "echo foo" - cmd.Stdout = stdout - - if err := c.Start(&cmd); err != nil { - t.Fatal(err) - } - if err := c.Disconnect(); err != nil { - t.Fatal(err) - } - - // now check with the wrong HostKey - address = newMockLineServer(t, signer, testClientPublicKey) - _, p, _ = net.SplitHostPort(address) - port, _ = strconv.Atoi(p) - - connInfo.HostKey = testClientPublicKey - connInfo.Port = uint16(port) - - cfg, err = prepareSSHConfig(connInfo) - if err != nil { - t.Fatal(err) - } - - c = &Communicator{ - connInfo: connInfo, - config: cfg, - } - - err = c.Start(&cmd) - if err == nil || !strings.Contains(err.Error(), "authorities") { - t.Fatalf("expected host key mismatch, got error:%v", err) - } -} - -const SERVER_PEM = `-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA8CkDr7uxCFt6lQUVwS8NyPO+fQNxORoGnMnN/XhVJZvpqyKR -Uji9R0d8D66bYxUUsabXjP2y4HTVzbZtnvXFZZshk0cOtJjjekpYJaLK2esPR/iX -wvSltNkrDQDPN/RmgEEMIevW8AgrPsqrnybFHxTpd7rEUHXBOe4nMNRIg3XHykB6 -jZk8q5bBPUe3I/f0DK5TJEBpTc6dO3P/j93u55VUqr39/SPRHnld2mCw+c8v6UOh -sssO/DIZFPScD3DYqsk2N+/nz9zXfcOTdWGhawgxuIo1DTokrNQbG3pDrLqcWgqj -13vqJFCmRA0O2CQIwJePd6+Np/XO3Uh/KL6FlQIDAQABAoIBAQCmvQMXNmvCDqk7 -30zsVDvw4fHGH+azK3Od1aqTqcEMHISOUbCtckFPxLzIsoSltRQqB1kuRVG07skm -Stsu+xny4lLcSwBVuLRuykEK2EyYIc/5Owo6y9pkhkaSf5ZfFes4bnD6+B/BhRpp -PRMMq0E+xCkX/G6iIi9mhgdlqm0x/vKtjzQeeshw9+gRcRLUpX+UeKFKXMXcDayx -qekr1bAaQKNBhTK+CbZjcqzG4f+BXVGRTZ9nsPAV+yTnWUCU0TghwPmtthHbebqa -9hlkum7qik/bQj/tjJ8/b0vTfHQSVxhtPG/ZV2Tn9ZuL/vrkYqeyMU8XkJ/uaEvH -WPyOcB4BAoGBAP5o5JSEtPog+U3JFrLNSRjz5ofZNVkJzice+0XyqlzJDHhX5tF8 -mriYQZLLXYhckBm4IdkhTn/dVbXNQTzyy2WVuO5nU8bkCMvGL9CGpW4YGqwGf7NX -e4H3emtRjLv8VZpUHe/RUUDhmYvMSt1qmXuskfpROuGfLhQBUd6A4J+BAoGBAPGp -UcMKjrxZ5qjYU6DLgS+xeca4Eu70HgdbSQbRo45WubXjyXvTRFij36DrpxJWf1D7 -lIsyBifoTra/lAuC1NQXGYWjTCdk2ey8Ll5qOgiXvE6lINHABr+U/Z90/g6LuML2 -VzaZbq/QLcT3yVsdyTogKckzCaKsCpusyHE1CXAVAoGAd6kMglKc8N0bhZukgnsN -+5+UeacPcY6sGTh4RWErAjNKGzx1A2lROKvcg9gFaULoQECcIw2IZ5nKW5VsLueg -BWrTrcaJ4A2XmYjhKnp6SvspaGoyHD90hx/Iw7t6r1yzQsB3yDmytwqldtyjBdvC -zynPC2azhDWjraMlR7tka4ECgYAxwvLiHa9sm3qCtCDsUFtmrb3srITBjaUNUL/F -1q8+JR+Sk7gudj9xnTT0VvINNaB71YIt83wPBagHu4VJpYQbtDH+MbUBu6OgOtO1 -f1w53rzY2OncJxV8p7pd9mJGLoE6LC2jQY7oRw7Vq0xcJdME1BCmrIrEY3a/vaF8 -pjYuTQKBgQCIOH23Xita8KmhH0NdlWxZfcQt1j3AnOcKe6UyN4BsF8hqS7eTA52s -WjG5X2IBl7gs1eMM1qkqR8npS9nwfO/pBmZPwjiZoilypXxWj+c+P3vwre2yija4 -bXgFVj4KFBwhr1+8KcobxC0SAPEouMvSkxzjjw+gnebozUtPlud9jA== ------END RSA PRIVATE KEY----- -` -const CLIENT_CERT_SIGNED_BY_SERVER = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgbMDNUn4M2TtzrSH7MOT2QsvLzZWjehJ5TYrBOp9p+lwAAAADAQABAAABAQCyu57E7zIWRyEWuaiOiikOSZKFjbwLkpE9fboFfLLsNUJj4zw+5bZUJtzWK8roPjgL8s1oPncro5wuTtI2Nu4fkpeFK0Hb33o6Eyksuj4Om4+6Uemn1QEcb0bZqK8Zyg9Dg9deP7LeE0v78b5/jZafFgwxv+/sMhM0PRD34NCDYcYmkkHlvQtQWFAdbPXCgghObedZyYdoqZVuhTsiPMWtQS/cc9M4tv6mPOuQlhZt3R/Oh/kwUyu45oGRb5bhO4JicozFS3oeClpU+UMbgslkzApJqxZBWN7+PDFSZhKk2GslyeyP4sH3E30Z00yVi/lQYgmQsB+Hg6ClemNQMNu/AAAAAAAAAAAAAAACAAAABHVzZXIAAAAIAAAABHVzZXIAAAAAWzBjXAAAAAB/POfPAAAAAAAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEA8CkDr7uxCFt6lQUVwS8NyPO+fQNxORoGnMnN/XhVJZvpqyKRUji9R0d8D66bYxUUsabXjP2y4HTVzbZtnvXFZZshk0cOtJjjekpYJaLK2esPR/iXwvSltNkrDQDPN/RmgEEMIevW8AgrPsqrnybFHxTpd7rEUHXBOe4nMNRIg3XHykB6jZk8q5bBPUe3I/f0DK5TJEBpTc6dO3P/j93u55VUqr39/SPRHnld2mCw+c8v6UOhsssO/DIZFPScD3DYqsk2N+/nz9zXfcOTdWGhawgxuIo1DTokrNQbG3pDrLqcWgqj13vqJFCmRA0O2CQIwJePd6+Np/XO3Uh/KL6FlQAAAQ8AAAAHc3NoLXJzYQAAAQC6sKEQHyl954BQn2BXuTgOB3NkENBxN7SD8ZaS8PNkDESytLjSIqrzoE6m7xuzprA+G23XRrCY/um3UvM7+7+zbwig2NIBbGbp3QFliQHegQKW6hTZP09jAQZk5jRrrEr/QT/s+gtHPmjxJK7XOQYxhInDKj+aJg62ExcwpQlP/0ATKNOIkdzTzzq916p0UOnnVaaPMKibh5Lv69GafIhKJRZSuuLN9fvs1G1RuUbxn/BNSeoRCr54L++Ztg09fJxunoyELs8mwgzCgB3pdZoUR2Z6ak05W4mvH3lkSz2BKUrlwxI6mterxhJy1GuN1K/zBG0gEMl2UTLajGK3qKM8 itbitloaner@MacBook-Pro-4.fios-router.home` -const CLIENT_PEM = `-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAsruexO8yFkchFrmojoopDkmShY28C5KRPX26BXyy7DVCY+M8 -PuW2VCbc1ivK6D44C/LNaD53K6OcLk7SNjbuH5KXhStB2996OhMpLLo+DpuPulHp -p9UBHG9G2aivGcoPQ4PXXj+y3hNL+/G+f42WnxYMMb/v7DITND0Q9+DQg2HGJpJB -5b0LUFhQHWz1woIITm3nWcmHaKmVboU7IjzFrUEv3HPTOLb+pjzrkJYWbd0fzof5 -MFMruOaBkW+W4TuCYnKMxUt6HgpaVPlDG4LJZMwKSasWQVje/jwxUmYSpNhrJcns -j+LB9xN9GdNMlYv5UGIJkLAfh4OgpXpjUDDbvwIDAQABAoIBAEu2ctFVyk/pnbi0 -uRR4rl+hBvKQUeJNGj2ELvL4Ggs5nIAX2IOEZ7JKLC6FqpSrFq7pEd5g57aSvixX -s3DH4CN7w7fj1ShBCNPlHgIWewdRGpeA74vrDWdwNAEsFdDE6aZeCTOhpDGy1vNJ -OrtpzS5i9pN0jTvvEneEjtWSZIHiiVlN+0hsFaiwZ6KXON+sDccZPmnP6Fzwj5Rc -WS0dKSwnxnx0otWgwWFs8nr306nSeMsNmQkHsS9lz4DEVpp9owdzrX1JmbQvNYAV -ohmB3ET4JYFgerqPXJfed9poueGuWCP6MYhsjNeHN35QhofxdO5/0i3JlZfqwZei -tNq/0oECgYEA6SqjRqDiIp3ajwyB7Wf0cIQG/P6JZDyN1jl//htgniliIH5UP1Tm -uAMG5MincV6X9lOyXyh6Yofu5+NR0yt9SqbDZVJ3ZCxKTun7pxJvQFd7wl5bMkiJ -qVfS08k6gQHHDoO+eel+DtpIfWc+e3tvX0aihSU0GZEMqDXYkkphLGECgYEAxDxb -+JwJ3N5UEjjkuvFBpuJnmjIaN9HvQkTv3inlx1gLE4iWBZXXsu4aWF8MCUeAAZyP -42hQDSkCYX/A22tYCEn/jfrU6A+6rkWBTjdUlYLvlSkhosSnO+117WEItb5cUE95 -hF4UY7LNs1AsDkV4WE87f/EjpxSwUAjB2Lfd/B8CgYAJ/JiHsuZcozQ0Qk3iVDyF -ATKnbWOHFozgqw/PW27U92LLj32eRM2o/gAylmGNmoaZt1YBe2NaiwXxiqv7hnZU -VzYxRcn1UWxRWvY7Xq/DKrwTRCVVzwOObEOMbKcD1YaoGX50DEso6bKHJH/pnAzW -INlfKIvFuI+5OK0w/tyQoQKBgQCf/jpaOxaLfrV62eobRQJrByLDBGB97GsvU7di -IjTWz8DQH0d5rE7d8uWF8ZCFrEcAiV6DYZQK9smbJqbd/uoacAKtBro5rkFdPwwK -8m/DKqsdqRhkdgOHh7bjYH7Sdy8ax4Fi27WyB6FQtmgFBrz0+zyetsODwQlzZ4Bs -qpSRrwKBgQC0vWHrY5aGIdF+b8EpP0/SSLLALpMySHyWhDyxYcPqdhszYbjDcavv -xrrLXNUD2duBHKPVYE+7uVoDkpZXLUQ4x8argo/IwQM6Kh2ma1y83TYMT6XhL1+B -5UPcl6RXZBCkiU7nFIG6/0XKFqVWc3fU8e09X+iJwXIJ5Jatywtg+g== ------END RSA PRIVATE KEY----- -` - -func TestCertificateBasedAuth(t *testing.T) { - signer, err := ssh.ParsePrivateKey([]byte(SERVER_PEM)) - if err != nil { - t.Fatalf("unable to parse private key: %v", err) - } - address := newMockLineServer(t, signer, CLIENT_CERT_SIGNED_BY_SERVER) - host, p, _ := net.SplitHostPort(address) - port, _ := strconv.Atoi(p) - - connInfo := &connectionInfo{ - User: "user", - Host: host, - PrivateKey: CLIENT_PEM, - Certificate: CLIENT_CERT_SIGNED_BY_SERVER, - Port: uint16(port), - Timeout: "30s", - } - - cfg, err := prepareSSHConfig(connInfo) - if err != nil { - t.Fatal(err) - } - - c := &Communicator{ - connInfo: connInfo, - config: cfg, - } - - var cmd remote.Cmd - stdout := new(bytes.Buffer) - cmd.Command = "echo foo" - cmd.Stdout = stdout - - if err := c.Start(&cmd); err != nil { - t.Fatal(err) - } - if err := c.Disconnect(); err != nil { - t.Fatal(err) - } -} - -func TestAccUploadFile(t *testing.T) { - // use the local ssh server and scp binary to check uploads - if ok := os.Getenv("SSH_UPLOAD_TEST"); ok == "" { - t.Log("Skipping Upload Acceptance without SSH_UPLOAD_TEST set") - t.Skip() - } - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "user": cty.StringVal(os.Getenv("USER")), - "host": cty.StringVal("127.0.0.1"), - "port": cty.StringVal("22"), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - tmpDir := t.TempDir() - source, err := os.CreateTemp(tmpDir, "tempfile.in") - if err != nil { - t.Fatal(err) - } - - content := "this is the file content" - if _, err := source.WriteString(content); err != nil { - t.Fatal(err) - } - source.Seek(0, io.SeekStart) - - tmpFile := filepath.Join(tmpDir, "tempFile.out") - - testUploadSizeHook = func(size int64) { - if size != int64(len(content)) { - t.Errorf("expected %d bytes, got %d\n", len(content), size) - } - } - defer func() { - testUploadSizeHook = nil - }() - - err = c.Upload(tmpFile, source) - if err != nil { - t.Fatalf("error uploading file: %s", err) - } - - data, err := ioutil.ReadFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - if string(data) != content { - t.Fatalf("bad: %s", data) - } -} - -func TestAccHugeUploadFile(t *testing.T) { - // use the local ssh server and scp binary to check uploads - if ok := os.Getenv("SSH_UPLOAD_TEST"); ok == "" { - t.Log("Skipping Upload Acceptance without SSH_UPLOAD_TEST set") - t.Skip() - } - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "host": cty.StringVal("127.0.0.1"), - "user": cty.StringVal(os.Getenv("USER")), - "port": cty.StringVal("22"), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - // copy 4GB of data, random to prevent compression. - size := int64(1 << 32) - source := io.LimitReader(rand.New(rand.NewSource(0)), size) - - dest, err := ioutil.TempFile("", "communicator") - if err != nil { - t.Fatal(err) - } - destName := dest.Name() - dest.Close() - defer os.Remove(destName) - - t.Log("Uploading to", destName) - - // bypass the Upload method so we can directly supply the file size - // preventing the extra copy of the huge file. - targetDir := filepath.Dir(destName) - targetFile := filepath.Base(destName) - - scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { - return scpUploadFile(targetFile, source, w, stdoutR, size) - } - - cmd, err := quoteShell([]string{"scp", "-vt", targetDir}, c.connInfo.TargetPlatform) - if err != nil { - t.Fatal(err) - } - err = c.scpSession(cmd, scpFunc) - if err != nil { - t.Fatal(err) - } - - // check the final file size - fs, err := os.Stat(destName) - if err != nil { - t.Fatal(err) - } - - if fs.Size() != size { - t.Fatalf("expected file size of %d, got %d", size, fs.Size()) - } -} - -func TestScriptPath(t *testing.T) { - cases := []struct { - Input string - Pattern string - }{ - { - "/tmp/script.sh", - `^/tmp/script\.sh$`, - }, - { - "/tmp/script_%RAND%.sh", - `^/tmp/script_(\d+)\.sh$`, - }, - } - - for _, tc := range cases { - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "host": cty.StringVal("127.0.0.1"), - "script_path": cty.StringVal(tc.Input), - }) - - comm, err := New(v) - if err != nil { - t.Fatalf("err: %s", err) - } - output := comm.ScriptPath() - - match, err := regexp.Match(tc.Pattern, []byte(output)) - if err != nil { - t.Fatalf("bad: %s\n\nerr: %s", tc.Input, err) - } - if !match { - t.Fatalf("bad: %s\n\n%s", tc.Input, output) - } - } -} - -func TestScriptPath_randSeed(t *testing.T) { - // Pre GH-4186 fix, this value was the deterministic start the pseudorandom - // chain of unseeded math/rand values for Int31(). - staticSeedPath := "/tmp/terraform_1298498081.sh" - c, err := New(cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "host": cty.StringVal("127.0.0.1"), - })) - if err != nil { - t.Fatalf("err: %s", err) - } - path := c.ScriptPath() - if path == staticSeedPath { - t.Fatalf("rand not seeded! got: %s", path) - } -} - -var testClientPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDE6A1c4n+OtEPEFlNKTZf2i03L3NylSYmvmJ8OLmzLuPZmJBJt4G3VZ/60s1aKzwLKrTq20S+ONG4zvnK5zIPoauoNNdUJKbg944hB4OE+HDbrBhk7SH+YWCsCILBoSXwAVdUEic6FWf/SeqBSmTBySHvpuNOw16J+SK6Ardx8k64F2tRkZuC6AmOZijgKa/sQKjWAIVPk34ECM6OLfPc3kKUEfkdpYLvuMfuRMfSTlxn5lFC0b0SovK9aWfNMBH9iXLQkieQ5rXoyzUC7mwgnASgl8cqw1UrToiUuhvneduXBhbQfmC/Upv+tL6dSSk+0DlgVKEHuJmc8s8+/qpdL` - -func acceptUserPass(goodUser, goodPass string) func(ssh.ConnMetadata, []byte) (*ssh.Permissions, error) { - return func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { - if c.User() == goodUser && string(pass) == goodPass { - return nil, nil - } - return nil, fmt.Errorf("password rejected for %q", c.User()) - } -} - -func acceptPublicKey(keystr string) func(ssh.ConnMetadata, ssh.PublicKey) (*ssh.Permissions, error) { - return func(_ ssh.ConnMetadata, inkey ssh.PublicKey) (*ssh.Permissions, error) { - goodkey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(keystr)) - if err != nil { - return nil, fmt.Errorf("error parsing key: %v", err) - } - - if bytes.Equal(inkey.Marshal(), goodkey.Marshal()) { - return nil, nil - } - - return nil, fmt.Errorf("public key rejected") - } -} diff --git a/internal/communicator/ssh/provisioner.go b/internal/communicator/ssh/provisioner.go deleted file mode 100644 index b98ee9f5d2cb..000000000000 --- a/internal/communicator/ssh/provisioner.go +++ /dev/null @@ -1,593 +0,0 @@ -package ssh - -import ( - "bytes" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "path/filepath" - "strings" - "time" - - "github.com/hashicorp/terraform/internal/communicator/shared" - sshagent "github.com/xanzy/ssh-agent" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" - "golang.org/x/crypto/ssh/knownhosts" -) - -const ( - // DefaultUser is used if there is no user given - DefaultUser = "root" - - // DefaultPort is used if there is no port given - DefaultPort = 22 - - // DefaultUnixScriptPath is used as the path to copy the file to - // for remote execution on unix if not provided otherwise. - DefaultUnixScriptPath = "/tmp/terraform_%RAND%.sh" - // DefaultWindowsScriptPath is used as the path to copy the file to - // for remote execution on windows if not provided otherwise. - DefaultWindowsScriptPath = "C:/windows/temp/terraform_%RAND%.cmd" - - // DefaultTimeout is used if there is no timeout given - DefaultTimeout = 5 * time.Minute - - // TargetPlatformUnix used for cleaner code, and is used if no target platform has been specified - TargetPlatformUnix = "unix" - //TargetPlatformWindows used for cleaner code - TargetPlatformWindows = "windows" -) - -// connectionInfo is decoded from the ConnInfo of the resource. These are the -// only keys we look at. If a PrivateKey is given, that is used instead -// of a password. -type connectionInfo struct { - User string - Password string - PrivateKey string - Certificate string - Host string - HostKey string - Port uint16 - Agent bool - ScriptPath string - TargetPlatform string - Timeout string - TimeoutVal time.Duration - - ProxyScheme string - ProxyHost string - ProxyPort uint16 - ProxyUserName string - ProxyUserPassword string - - BastionUser string - BastionPassword string - BastionPrivateKey string - BastionCertificate string - BastionHost string - BastionHostKey string - BastionPort uint16 - - AgentIdentity string -} - -// decodeConnInfo decodes the given cty.Value using the same behavior as the -// lgeacy mapstructure decoder in order to preserve as much of the existing -// logic as possible for compatibility. -func decodeConnInfo(v cty.Value) (*connectionInfo, error) { - connInfo := &connectionInfo{} - if v.IsNull() { - return connInfo, nil - } - - for k, v := range v.AsValueMap() { - if v.IsNull() { - continue - } - - switch k { - case "user": - connInfo.User = v.AsString() - case "password": - connInfo.Password = v.AsString() - case "private_key": - connInfo.PrivateKey = v.AsString() - case "certificate": - connInfo.Certificate = v.AsString() - case "host": - connInfo.Host = v.AsString() - case "host_key": - connInfo.HostKey = v.AsString() - case "port": - if err := gocty.FromCtyValue(v, &connInfo.Port); err != nil { - return nil, err - } - case "agent": - connInfo.Agent = v.True() - case "script_path": - connInfo.ScriptPath = v.AsString() - case "target_platform": - connInfo.TargetPlatform = v.AsString() - case "timeout": - connInfo.Timeout = v.AsString() - case "proxy_scheme": - connInfo.ProxyScheme = v.AsString() - case "proxy_host": - connInfo.ProxyHost = v.AsString() - case "proxy_port": - if err := gocty.FromCtyValue(v, &connInfo.ProxyPort); err != nil { - return nil, err - } - case "proxy_user_name": - connInfo.ProxyUserName = v.AsString() - case "proxy_user_password": - connInfo.ProxyUserPassword = v.AsString() - case "bastion_user": - connInfo.BastionUser = v.AsString() - case "bastion_password": - connInfo.BastionPassword = v.AsString() - case "bastion_private_key": - connInfo.BastionPrivateKey = v.AsString() - case "bastion_certificate": - connInfo.BastionCertificate = v.AsString() - case "bastion_host": - connInfo.BastionHost = v.AsString() - case "bastion_host_key": - connInfo.BastionHostKey = v.AsString() - case "bastion_port": - if err := gocty.FromCtyValue(v, &connInfo.BastionPort); err != nil { - return nil, err - } - case "agent_identity": - connInfo.AgentIdentity = v.AsString() - } - } - return connInfo, nil -} - -// parseConnectionInfo is used to convert the raw configuration into the -// *connectionInfo struct. -func parseConnectionInfo(v cty.Value) (*connectionInfo, error) { - v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) - if err != nil { - return nil, err - } - - connInfo, err := decodeConnInfo(v) - if err != nil { - return nil, err - } - - // To default Agent to true, we need to check the raw string, since the - // decoded boolean can't represent "absence of config". - // - // And if SSH_AUTH_SOCK is not set, there's no agent to connect to, so we - // shouldn't try. - agent := v.GetAttr("agent") - if agent.IsNull() && os.Getenv("SSH_AUTH_SOCK") != "" { - connInfo.Agent = true - } - - if connInfo.User == "" { - connInfo.User = DefaultUser - } - - // Check if host is empty. - // Otherwise return error. - if connInfo.Host == "" { - return nil, fmt.Errorf("host for provisioner cannot be empty") - } - - // Format the host if needed. - // Needed for IPv6 support. - connInfo.Host = shared.IpFormat(connInfo.Host) - - if connInfo.Port == 0 { - connInfo.Port = DefaultPort - } - // Set default targetPlatform to unix if it's empty - if connInfo.TargetPlatform == "" { - connInfo.TargetPlatform = TargetPlatformUnix - } else if connInfo.TargetPlatform != TargetPlatformUnix && connInfo.TargetPlatform != TargetPlatformWindows { - return nil, fmt.Errorf("target_platform for provisioner has to be either %s or %s", TargetPlatformUnix, TargetPlatformWindows) - } - // Choose an appropriate default script path based on the target platform. There is no single - // suitable default script path which works on both UNIX and Windows targets. - if connInfo.ScriptPath == "" && connInfo.TargetPlatform == TargetPlatformUnix { - connInfo.ScriptPath = DefaultUnixScriptPath - } - if connInfo.ScriptPath == "" && connInfo.TargetPlatform == TargetPlatformWindows { - connInfo.ScriptPath = DefaultWindowsScriptPath - } - if connInfo.Timeout != "" { - connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) - } else { - connInfo.TimeoutVal = DefaultTimeout - } - - // Default all bastion config attrs to their non-bastion counterparts - if connInfo.BastionHost != "" { - // Format the bastion host if needed. - // Needed for IPv6 support. - connInfo.BastionHost = shared.IpFormat(connInfo.BastionHost) - - if connInfo.BastionUser == "" { - connInfo.BastionUser = connInfo.User - } - if connInfo.BastionPassword == "" { - connInfo.BastionPassword = connInfo.Password - } - if connInfo.BastionPrivateKey == "" { - connInfo.BastionPrivateKey = connInfo.PrivateKey - } - if connInfo.BastionCertificate == "" { - connInfo.BastionCertificate = connInfo.Certificate - } - if connInfo.BastionPort == 0 { - connInfo.BastionPort = connInfo.Port - } - } - - return connInfo, nil -} - -// safeDuration returns either the parsed duration or a default value -func safeDuration(dur string, defaultDur time.Duration) time.Duration { - d, err := time.ParseDuration(dur) - if err != nil { - log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur) - return defaultDur - } - return d -} - -// prepareSSHConfig is used to turn the *ConnectionInfo provided into a -// usable *SSHConfig for client initialization. -func prepareSSHConfig(connInfo *connectionInfo) (*sshConfig, error) { - sshAgent, err := connectToAgent(connInfo) - if err != nil { - return nil, err - } - - host := fmt.Sprintf("%s:%d", connInfo.Host, connInfo.Port) - - sshConf, err := buildSSHClientConfig(sshClientConfigOpts{ - user: connInfo.User, - host: host, - privateKey: connInfo.PrivateKey, - password: connInfo.Password, - hostKey: connInfo.HostKey, - certificate: connInfo.Certificate, - sshAgent: sshAgent, - }) - if err != nil { - return nil, err - } - - var p *proxyInfo - - if connInfo.ProxyHost != "" { - p = newProxyInfo( - fmt.Sprintf("%s:%d", connInfo.ProxyHost, connInfo.ProxyPort), - connInfo.ProxyScheme, - connInfo.ProxyUserName, - connInfo.ProxyUserPassword, - ) - } - - connectFunc := ConnectFunc("tcp", host, p) - - var bastionConf *ssh.ClientConfig - if connInfo.BastionHost != "" { - bastionHost := fmt.Sprintf("%s:%d", connInfo.BastionHost, connInfo.BastionPort) - - bastionConf, err = buildSSHClientConfig(sshClientConfigOpts{ - user: connInfo.BastionUser, - host: bastionHost, - privateKey: connInfo.BastionPrivateKey, - password: connInfo.BastionPassword, - hostKey: connInfo.HostKey, - certificate: connInfo.BastionCertificate, - sshAgent: sshAgent, - }) - if err != nil { - return nil, err - } - - connectFunc = BastionConnectFunc("tcp", bastionHost, bastionConf, "tcp", host, p) - } - - config := &sshConfig{ - config: sshConf, - connection: connectFunc, - sshAgent: sshAgent, - } - return config, nil -} - -type sshClientConfigOpts struct { - privateKey string - password string - sshAgent *sshAgent - certificate string - user string - host string - hostKey string -} - -func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) { - hkCallback := ssh.InsecureIgnoreHostKey() - - if opts.hostKey != "" { - // The knownhosts package only takes paths to files, but terraform - // generally wants to handle config data in-memory. Rather than making - // the known_hosts file an exception, write out the data to a temporary - // file to create the HostKeyCallback. - tf, err := ioutil.TempFile("", "tf-known_hosts") - if err != nil { - return nil, fmt.Errorf("failed to create temp known_hosts file: %s", err) - } - defer tf.Close() - defer os.RemoveAll(tf.Name()) - - // we mark this as a CA as well, but the host key fallback will still - // use it as a direct match if the remote host doesn't return a - // certificate. - if _, err := tf.WriteString(fmt.Sprintf("@cert-authority %s %s\n", opts.host, opts.hostKey)); err != nil { - return nil, fmt.Errorf("failed to write temp known_hosts file: %s", err) - } - tf.Sync() - - hkCallback, err = knownhosts.New(tf.Name()) - if err != nil { - return nil, err - } - } - - conf := &ssh.ClientConfig{ - HostKeyCallback: hkCallback, - User: opts.user, - } - - if opts.privateKey != "" { - if opts.certificate != "" { - log.Println("using client certificate for authentication") - - certSigner, err := signCertWithPrivateKey(opts.privateKey, opts.certificate) - if err != nil { - return nil, err - } - conf.Auth = append(conf.Auth, certSigner) - } else { - log.Println("using private key for authentication") - - pubKeyAuth, err := readPrivateKey(opts.privateKey) - if err != nil { - return nil, err - } - conf.Auth = append(conf.Auth, pubKeyAuth) - } - } - - if opts.password != "" { - conf.Auth = append(conf.Auth, ssh.Password(opts.password)) - conf.Auth = append(conf.Auth, ssh.KeyboardInteractive( - PasswordKeyboardInteractive(opts.password))) - } - - if opts.sshAgent != nil { - conf.Auth = append(conf.Auth, opts.sshAgent.Auth()) - } - - return conf, nil -} - -// Create a Cert Signer and return ssh.AuthMethod -func signCertWithPrivateKey(pk string, certificate string) (ssh.AuthMethod, error) { - rawPk, err := ssh.ParseRawPrivateKey([]byte(pk)) - if err != nil { - return nil, fmt.Errorf("failed to parse private key %q: %s", pk, err) - } - - pcert, _, _, _, err := ssh.ParseAuthorizedKey([]byte(certificate)) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate %q: %s", certificate, err) - } - - usigner, err := ssh.NewSignerFromKey(rawPk) - if err != nil { - return nil, fmt.Errorf("failed to create signer from raw private key %q: %s", rawPk, err) - } - - ucertSigner, err := ssh.NewCertSigner(pcert.(*ssh.Certificate), usigner) - if err != nil { - return nil, fmt.Errorf("failed to create cert signer %q: %s", usigner, err) - } - - return ssh.PublicKeys(ucertSigner), nil -} - -func readPrivateKey(pk string) (ssh.AuthMethod, error) { - // We parse the private key on our own first so that we can - // show a nicer error if the private key has a password. - block, _ := pem.Decode([]byte(pk)) - if block == nil { - return nil, errors.New("Failed to read ssh private key: no key found") - } - if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - return nil, errors.New( - "Failed to read ssh private key: password protected keys are\n" + - "not supported. Please decrypt the key prior to use.") - } - - signer, err := ssh.ParsePrivateKey([]byte(pk)) - if err != nil { - return nil, fmt.Errorf("Failed to parse ssh private key: %s", err) - } - - return ssh.PublicKeys(signer), nil -} - -func connectToAgent(connInfo *connectionInfo) (*sshAgent, error) { - if !connInfo.Agent { - // No agent configured - return nil, nil - } - - agent, conn, err := sshagent.New() - if err != nil { - return nil, err - } - - // connection close is handled over in Communicator - return &sshAgent{ - agent: agent, - conn: conn, - id: connInfo.AgentIdentity, - }, nil - -} - -// A tiny wrapper around an agent.Agent to expose the ability to close its -// associated connection on request. -type sshAgent struct { - agent agent.Agent - conn net.Conn - id string -} - -func (a *sshAgent) Close() error { - if a.conn == nil { - return nil - } - - return a.conn.Close() -} - -// make an attempt to either read the identity file or find a corresponding -// public key file using the typical openssh naming convention. -// This returns the public key in wire format, or nil when a key is not found. -func findIDPublicKey(id string) []byte { - for _, d := range idKeyData(id) { - signer, err := ssh.ParsePrivateKey(d) - if err == nil { - log.Println("[DEBUG] parsed id private key") - pk := signer.PublicKey() - return pk.Marshal() - } - - // try it as a publicKey - pk, err := ssh.ParsePublicKey(d) - if err == nil { - log.Println("[DEBUG] parsed id public key") - return pk.Marshal() - } - - // finally try it as an authorized key - pk, _, _, _, err = ssh.ParseAuthorizedKey(d) - if err == nil { - log.Println("[DEBUG] parsed id authorized key") - return pk.Marshal() - } - } - - return nil -} - -// Try to read an id file using the id as the file path. Also read the .pub -// file if it exists, as the id file may be encrypted. Return only the file -// data read. We don't need to know what data came from which path, as we will -// try parsing each as a private key, a public key and an authorized key -// regardless. -func idKeyData(id string) [][]byte { - idPath, err := filepath.Abs(id) - if err != nil { - return nil - } - - var fileData [][]byte - - paths := []string{idPath} - - if !strings.HasSuffix(idPath, ".pub") { - paths = append(paths, idPath+".pub") - } - - for _, p := range paths { - d, err := ioutil.ReadFile(p) - if err != nil { - log.Printf("[DEBUG] error reading %q: %s", p, err) - continue - } - log.Printf("[DEBUG] found identity data at %q", p) - fileData = append(fileData, d) - } - - return fileData -} - -// sortSigners moves a signer with an agent comment field matching the -// agent_identity to the head of the list when attempting authentication. This -// helps when there are more keys loaded in an agent than the host will allow -// attempts. -func (s *sshAgent) sortSigners(signers []ssh.Signer) { - if s.id == "" || len(signers) < 2 { - return - } - - // if we can locate the public key, either by extracting it from the id or - // locating the .pub file, then we can more easily determine an exact match - idPk := findIDPublicKey(s.id) - - // if we have a signer with a connect field that matches the id, send that - // first, otherwise put close matches at the front of the list. - head := 0 - for i := range signers { - pk := signers[i].PublicKey() - k, ok := pk.(*agent.Key) - if !ok { - continue - } - - // check for an exact match first - if bytes.Equal(pk.Marshal(), idPk) || s.id == k.Comment { - signers[0], signers[i] = signers[i], signers[0] - break - } - - // no exact match yet, move it to the front if it's close. The agent - // may have loaded as a full filepath, while the config refers to it by - // filename only. - if strings.HasSuffix(k.Comment, s.id) { - signers[head], signers[i] = signers[i], signers[head] - head++ - continue - } - } -} - -func (s *sshAgent) Signers() ([]ssh.Signer, error) { - signers, err := s.agent.Signers() - if err != nil { - return nil, err - } - - s.sortSigners(signers) - return signers, nil -} - -func (a *sshAgent) Auth() ssh.AuthMethod { - return ssh.PublicKeysCallback(a.Signers) -} - -func (a *sshAgent) ForwardToAgent(client *ssh.Client) error { - return agent.ForwardToAgent(client, a.agent) -} diff --git a/internal/communicator/winrm/communicator.go b/internal/communicator/winrm/communicator.go deleted file mode 100644 index 302ccec8eba1..000000000000 --- a/internal/communicator/winrm/communicator.go +++ /dev/null @@ -1,202 +0,0 @@ -package winrm - -import ( - "fmt" - "io" - "log" - "math/rand" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform/internal/communicator/remote" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/masterzen/winrm" - "github.com/packer-community/winrmcp/winrmcp" - "github.com/zclconf/go-cty/cty" -) - -// Communicator represents the WinRM communicator -type Communicator struct { - connInfo *connectionInfo - client *winrm.Client - endpoint *winrm.Endpoint - rand *rand.Rand -} - -// New creates a new communicator implementation over WinRM. -func New(v cty.Value) (*Communicator, error) { - connInfo, err := parseConnectionInfo(v) - if err != nil { - return nil, err - } - - endpoint := &winrm.Endpoint{ - Host: connInfo.Host, - Port: int(connInfo.Port), - HTTPS: connInfo.HTTPS, - Insecure: connInfo.Insecure, - Timeout: connInfo.TimeoutVal, - } - if len(connInfo.CACert) > 0 { - endpoint.CACert = []byte(connInfo.CACert) - } - - comm := &Communicator{ - connInfo: connInfo, - endpoint: endpoint, - // Seed our own rand source so that script paths are not deterministic - rand: rand.New(rand.NewSource(time.Now().UnixNano())), - } - - return comm, nil -} - -// Connect implementation of communicator.Communicator interface -func (c *Communicator) Connect(o provisioners.UIOutput) error { - // Set the client to nil since we'll (re)create it - c.client = nil - - params := winrm.DefaultParameters - params.Timeout = formatDuration(c.Timeout()) - if c.connInfo.NTLM { - params.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } - } - - client, err := winrm.NewClientWithParameters( - c.endpoint, c.connInfo.User, c.connInfo.Password, params) - if err != nil { - return err - } - - if o != nil { - o.Output(fmt.Sprintf( - "Connecting to remote host via WinRM...\n"+ - " Host: %s\n"+ - " Port: %d\n"+ - " User: %s\n"+ - " Password: %t\n"+ - " HTTPS: %t\n"+ - " Insecure: %t\n"+ - " NTLM: %t\n"+ - " CACert: %t", - c.connInfo.Host, - c.connInfo.Port, - c.connInfo.User, - c.connInfo.Password != "", - c.connInfo.HTTPS, - c.connInfo.Insecure, - c.connInfo.NTLM, - c.connInfo.CACert != "", - )) - } - - log.Printf("[DEBUG] connecting to remote shell using WinRM") - shell, err := client.CreateShell() - if err != nil { - log.Printf("[ERROR] error creating shell: %s", err) - return err - } - - err = shell.Close() - if err != nil { - log.Printf("[ERROR] error closing shell: %s", err) - return err - } - - if o != nil { - o.Output("Connected!") - } - - c.client = client - - return nil -} - -// Disconnect implementation of communicator.Communicator interface -func (c *Communicator) Disconnect() error { - c.client = nil - return nil -} - -// Timeout implementation of communicator.Communicator interface -func (c *Communicator) Timeout() time.Duration { - return c.connInfo.TimeoutVal -} - -// ScriptPath implementation of communicator.Communicator interface -func (c *Communicator) ScriptPath() string { - return strings.Replace( - c.connInfo.ScriptPath, "%RAND%", - strconv.FormatInt(int64(c.rand.Int31()), 10), -1) -} - -// Start implementation of communicator.Communicator interface -func (c *Communicator) Start(rc *remote.Cmd) error { - rc.Init() - log.Printf("[DEBUG] starting remote command: %s", rc.Command) - - // TODO: make sure communicators always connect first, so we can get output - // from the connection. - if c.client == nil { - log.Println("[WARN] winrm client not connected, attempting to connect") - if err := c.Connect(nil); err != nil { - return err - } - } - - status, err := c.client.Run(rc.Command, rc.Stdout, rc.Stderr) - rc.SetExitStatus(status, err) - - return nil -} - -// Upload implementation of communicator.Communicator interface -func (c *Communicator) Upload(path string, input io.Reader) error { - wcp, err := c.newCopyClient() - if err != nil { - return err - } - log.Printf("[DEBUG] Uploading file to '%s'", path) - return wcp.Write(path, input) -} - -// UploadScript implementation of communicator.Communicator interface -func (c *Communicator) UploadScript(path string, input io.Reader) error { - return c.Upload(path, input) -} - -// UploadDir implementation of communicator.Communicator interface -func (c *Communicator) UploadDir(dst string, src string) error { - log.Printf("[DEBUG] Uploading dir '%s' to '%s'", src, dst) - wcp, err := c.newCopyClient() - if err != nil { - return err - } - return wcp.Copy(src, dst) -} - -func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { - addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) - - config := winrmcp.Config{ - Auth: winrmcp.Auth{ - User: c.connInfo.User, - Password: c.connInfo.Password, - }, - Https: c.connInfo.HTTPS, - Insecure: c.connInfo.Insecure, - OperationTimeout: c.Timeout(), - MaxOperationsPerShell: 15, // lowest common denominator - } - - if c.connInfo.NTLM { - config.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } - } - - if c.connInfo.CACert != "" { - config.CACertBytes = []byte(c.connInfo.CACert) - } - - return winrmcp.New(addr, &config) -} diff --git a/internal/communicator/winrm/communicator_test.go b/internal/communicator/winrm/communicator_test.go deleted file mode 100644 index bc1de8e30913..000000000000 --- a/internal/communicator/winrm/communicator_test.go +++ /dev/null @@ -1,218 +0,0 @@ -package winrm - -import ( - "bytes" - "io" - "regexp" - "strconv" - "testing" - - "github.com/dylanmei/winrmtest" - "github.com/hashicorp/terraform/internal/communicator/remote" - "github.com/hashicorp/terraform/internal/communicator/shared" - "github.com/zclconf/go-cty/cty" -) - -func newMockWinRMServer(t *testing.T) *winrmtest.Remote { - wrm := winrmtest.NewRemote() - - wrm.CommandFunc( - winrmtest.MatchText("echo foo"), - func(out, err io.Writer) int { - out.Write([]byte("foo")) - return 0 - }) - - wrm.CommandFunc( - winrmtest.MatchPattern(`^echo c29tZXRoaW5n >> ".*"$`), - func(out, err io.Writer) int { - return 0 - }) - - wrm.CommandFunc( - winrmtest.MatchPattern(`^powershell.exe -EncodedCommand .*$`), - func(out, err io.Writer) int { - return 0 - }) - - wrm.CommandFunc( - winrmtest.MatchText("powershell"), - func(out, err io.Writer) int { - return 0 - }) - - return wrm -} - -func TestStart(t *testing.T) { - wrm := newMockWinRMServer(t) - defer wrm.Close() - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("winrm"), - "user": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "host": cty.StringVal(wrm.Host), - "port": cty.StringVal(strconv.Itoa(wrm.Port)), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - var cmd remote.Cmd - stdout := new(bytes.Buffer) - cmd.Command = "echo foo" - cmd.Stdout = stdout - - err = c.Start(&cmd) - if err != nil { - t.Fatalf("error executing remote command: %s", err) - } - cmd.Wait() - - if stdout.String() != "foo" { - t.Fatalf("bad command response: expected %q, got %q", "foo", stdout.String()) - } -} - -func TestUpload(t *testing.T) { - wrm := newMockWinRMServer(t) - defer wrm.Close() - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("winrm"), - "user": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "host": cty.StringVal(wrm.Host), - "port": cty.StringVal(strconv.Itoa(wrm.Port)), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - err = c.Connect(nil) - if err != nil { - t.Fatalf("error connecting communicator: %s", err) - } - defer c.Disconnect() - - err = c.Upload("C:/Temp/terraform.cmd", bytes.NewReader([]byte("something"))) - if err != nil { - t.Fatalf("error uploading file: %s", err) - } -} - -func TestScriptPath(t *testing.T) { - cases := []struct { - Input string - Pattern string - }{ - { - "/tmp/script.sh", - `^/tmp/script\.sh$`, - }, - { - "/tmp/script_%RAND%.sh", - `^/tmp/script_(\d+)\.sh$`, - }, - } - - for _, tc := range cases { - v := cty.ObjectVal(map[string]cty.Value{ - "host": cty.StringVal(""), - "type": cty.StringVal("winrm"), - "script_path": cty.StringVal(tc.Input), - }) - - comm, err := New(v) - if err != nil { - t.Fatalf("err: %s", err) - } - output := comm.ScriptPath() - - match, err := regexp.Match(tc.Pattern, []byte(output)) - if err != nil { - t.Fatalf("bad: %s\n\nerr: %s", tc.Input, err) - } - if !match { - t.Fatalf("bad: %s\n\n%s", tc.Input, output) - } - } -} - -func TestNoTransportDecorator(t *testing.T) { - wrm := newMockWinRMServer(t) - defer wrm.Close() - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("winrm"), - "user": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "host": cty.StringVal(wrm.Host), - "port": cty.StringVal(strconv.Itoa(wrm.Port)), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - err = c.Connect(nil) - if err != nil { - t.Fatalf("error connecting communicator: %s", err) - } - defer c.Disconnect() - - if c.client.TransportDecorator != nil { - t.Fatal("bad TransportDecorator: expected nil, got non-nil") - } -} - -func TestTransportDecorator(t *testing.T) { - wrm := newMockWinRMServer(t) - defer wrm.Close() - - v := cty.ObjectVal(map[string]cty.Value{ - "type": cty.StringVal("winrm"), - "user": cty.StringVal("user"), - "password": cty.StringVal("pass"), - "host": cty.StringVal(wrm.Host), - "port": cty.StringVal(strconv.Itoa(wrm.Port)), - "use_ntlm": cty.StringVal("true"), - "timeout": cty.StringVal("30s"), - }) - - c, err := New(v) - if err != nil { - t.Fatalf("error creating communicator: %s", err) - } - - err = c.Connect(nil) - if err != nil { - t.Fatalf("error connecting communicator: %s", err) - } - defer c.Disconnect() - - if c.client.TransportDecorator == nil { - t.Fatal("bad TransportDecorator: expected non-nil, got nil") - } -} - -func TestScriptPath_randSeed(t *testing.T) { - // Pre GH-4186 fix, this value was the deterministic start the pseudorandom - // chain of unseeded math/rand values for Int31(). - staticSeedPath := "C:/Temp/terraform_1298498081.cmd" - c, err := New(cty.NullVal(shared.ConnectionBlockSupersetSchema.ImpliedType())) - if err != nil { - t.Fatalf("err: %s", err) - } - path := c.ScriptPath() - if path == staticSeedPath { - t.Fatalf("rand not seeded! got: %s", path) - } -} diff --git a/internal/communicator/winrm/provisioner.go b/internal/communicator/winrm/provisioner.go deleted file mode 100644 index 3843c9d00e1b..000000000000 --- a/internal/communicator/winrm/provisioner.go +++ /dev/null @@ -1,169 +0,0 @@ -package winrm - -import ( - "fmt" - "log" - "path/filepath" - "strings" - "time" - - "github.com/hashicorp/terraform/internal/communicator/shared" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -const ( - // DefaultUser is used if there is no user given - DefaultUser = "Administrator" - - // DefaultPort is used if there is no port given - DefaultPort = 5985 - - // DefaultHTTPSPort is used if there is no port given and HTTPS is true - DefaultHTTPSPort = 5986 - - // DefaultScriptPath is used as the path to copy the file to - // for remote execution if not provided otherwise. - DefaultScriptPath = "C:/Temp/terraform_%RAND%.cmd" - - // DefaultTimeout is used if there is no timeout given - DefaultTimeout = 5 * time.Minute -) - -// connectionInfo is decoded from the ConnInfo of the resource. These are the -// only keys we look at. If a KeyFile is given, that is used instead -// of a password. -type connectionInfo struct { - User string - Password string - Host string - Port uint16 - HTTPS bool - Insecure bool - NTLM bool `mapstructure:"use_ntlm"` - CACert string `mapstructure:"cacert"` - Timeout string - ScriptPath string `mapstructure:"script_path"` - TimeoutVal time.Duration `mapstructure:"-"` -} - -// decodeConnInfo decodes the given cty.Value using the same behavior as the -// lgeacy mapstructure decoder in order to preserve as much of the existing -// logic as possible for compatibility. -func decodeConnInfo(v cty.Value) (*connectionInfo, error) { - connInfo := &connectionInfo{} - if v.IsNull() { - return connInfo, nil - } - - for k, v := range v.AsValueMap() { - if v.IsNull() { - continue - } - - switch k { - case "user": - connInfo.User = v.AsString() - case "password": - connInfo.Password = v.AsString() - case "host": - connInfo.Host = v.AsString() - case "port": - if err := gocty.FromCtyValue(v, &connInfo.Port); err != nil { - return nil, err - } - case "https": - connInfo.HTTPS = v.True() - case "insecure": - connInfo.Insecure = v.True() - case "use_ntlm": - connInfo.NTLM = v.True() - case "cacert": - connInfo.CACert = v.AsString() - case "script_path": - connInfo.ScriptPath = v.AsString() - case "timeout": - connInfo.Timeout = v.AsString() - } - } - return connInfo, nil -} - -// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into -// a ConnectionInfo struct -func parseConnectionInfo(v cty.Value) (*connectionInfo, error) { - v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) - if err != nil { - return nil, err - } - - connInfo, err := decodeConnInfo(v) - if err != nil { - return nil, err - } - // Check on script paths which point to the default Windows TEMP folder because files - // which are put in there very early in the boot process could get cleaned/deleted - // before you had the change to execute them. - // - // TODO (SvH) Needs some more debugging to fully understand the exact sequence of events - // causing this... - if strings.HasPrefix(filepath.ToSlash(connInfo.ScriptPath), "C:/Windows/Temp") { - return nil, fmt.Errorf( - `Using the C:\Windows\Temp folder is not supported. Please use a different 'script_path'.`) - } - - if connInfo.User == "" { - connInfo.User = DefaultUser - } - - // Format the host if needed. - // Needed for IPv6 support. - connInfo.Host = shared.IpFormat(connInfo.Host) - - if connInfo.Port == 0 { - if connInfo.HTTPS { - connInfo.Port = DefaultHTTPSPort - } else { - connInfo.Port = DefaultPort - } - } - if connInfo.ScriptPath == "" { - connInfo.ScriptPath = DefaultScriptPath - } - if connInfo.Timeout != "" { - connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) - } else { - connInfo.TimeoutVal = DefaultTimeout - } - - return connInfo, nil -} - -// safeDuration returns either the parsed duration or a default value -func safeDuration(dur string, defaultDur time.Duration) time.Duration { - d, err := time.ParseDuration(dur) - if err != nil { - log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur) - return defaultDur - } - return d -} - -func formatDuration(duration time.Duration) string { - h := int(duration.Hours()) - m := int(duration.Minutes()) - h*60 - s := int(duration.Seconds()) - (h*3600 + m*60) - - res := "PT" - if h > 0 { - res = fmt.Sprintf("%s%dH", res, h) - } - if m > 0 { - res = fmt.Sprintf("%s%dM", res, m) - } - if s > 0 { - res = fmt.Sprintf("%s%dS", res, s) - } - - return res -} diff --git a/internal/configs/backend.go b/internal/configs/backend.go deleted file mode 100644 index 4bf968e6ad7d..000000000000 --- a/internal/configs/backend.go +++ /dev/null @@ -1,55 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// Backend represents a "backend" block inside a "terraform" block in a module -// or file. -type Backend struct { - Type string - Config hcl.Body - - TypeRange hcl.Range - DeclRange hcl.Range -} - -func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { - return &Backend{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - Config: block.Body, - DeclRange: block.DefRange, - }, nil -} - -// Hash produces a hash value for the reciever that covers the type and the -// portions of the config that conform to the given schema. -// -// If the config does not conform to the schema then the result is not -// meaningful for comparison since it will be based on an incomplete result. -// -// As an exception, required attributes in the schema are treated as optional -// for the purpose of hashing, so that an incomplete configuration can still -// be hashed. Other errors, such as extraneous attributes, have no such special -// case. -func (b *Backend) Hash(schema *configschema.Block) int { - // Don't fail if required attributes are not set. Instead, we'll just - // hash them as nulls. - schema = schema.NoneRequired() - spec := schema.DecoderSpec() - val, _ := hcldec.Decode(b.Config, spec, nil) - if val == cty.NilVal { - val = cty.UnknownVal(schema.ImpliedType()) - } - - toHash := cty.TupleVal([]cty.Value{ - cty.StringVal(b.Type), - val, - }) - - return toHash.Hash() -} diff --git a/internal/configs/checks.go b/internal/configs/checks.go deleted file mode 100644 index 417dff45eece..000000000000 --- a/internal/configs/checks.go +++ /dev/null @@ -1,141 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/lang" -) - -// CheckRule represents a configuration-defined validation rule, precondition, -// or postcondition. Blocks of this sort can appear in a few different places -// in configuration, including "validation" blocks for variables, -// and "precondition" and "postcondition" blocks for resources. -type CheckRule struct { - // Condition is an expression that must evaluate to true if the condition - // holds or false if it does not. If the expression produces an error then - // that's considered to be a bug in the module defining the check. - // - // The available variables in a condition expression vary depending on what - // a check is attached to. For example, validation rules attached to - // input variables can only refer to the variable that is being validated. - Condition hcl.Expression - - // ErrorMessage should be one or more full sentences, which should be in - // English for consistency with the rest of the error message output but - // can in practice be in any language. The message should describe what is - // required for the condition to return true in a way that would make sense - // to a caller of the module. - // - // The error message expression has the same variables available for - // interpolation as the corresponding condition. - ErrorMessage hcl.Expression - - DeclRange hcl.Range -} - -// validateSelfReferences looks for references in the check rule matching the -// specified resource address, returning error diagnostics if such a reference -// is found. -func (cr *CheckRule) validateSelfReferences(checkType string, addr addrs.Resource) hcl.Diagnostics { - var diags hcl.Diagnostics - exprs := []hcl.Expression{ - cr.Condition, - cr.ErrorMessage, - } - for _, expr := range exprs { - if expr == nil { - continue - } - refs, _ := lang.References(expr.Variables()) - for _, ref := range refs { - var refAddr addrs.Resource - - switch rs := ref.Subject.(type) { - case addrs.Resource: - refAddr = rs - case addrs.ResourceInstance: - refAddr = rs.Resource - default: - continue - } - - if refAddr.Equal(addr) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Invalid reference in %s", checkType), - Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addr.String()), - Subject: expr.Range().Ptr(), - }) - break - } - } - } - return diags -} - -// decodeCheckRuleBlock decodes the contents of the given block as a check rule. -// -// Unlike most of our "decode..." functions, this one can be applied to blocks -// of various types as long as their body structures are "check-shaped". The -// function takes the containing block only because some error messages will -// refer to its location, and the returned object's DeclRange will be the -// block's header. -func decodeCheckRuleBlock(block *hcl.Block, override bool) (*CheckRule, hcl.Diagnostics) { - var diags hcl.Diagnostics - cr := &CheckRule{ - DeclRange: block.DefRange, - } - - if override { - // For now we'll just forbid overriding check blocks, to simplify - // the initial design. If we can find a clear use-case for overriding - // checks in override files and there's a way to define it that - // isn't confusing then we could relax this. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Can't override %s blocks", block.Type), - Detail: fmt.Sprintf("Override files cannot override %q blocks.", block.Type), - Subject: cr.DeclRange.Ptr(), - }) - return cr, diags - } - - content, moreDiags := block.Body.Content(checkRuleBlockSchema) - diags = append(diags, moreDiags...) - - if attr, exists := content.Attributes["condition"]; exists { - cr.Condition = attr.Expr - - if len(cr.Condition.Variables()) == 0 { - // A condition expression that doesn't refer to any variable is - // pointless, because its result would always be a constant. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Invalid %s expression", block.Type), - Detail: "The condition expression must refer to at least one object from elsewhere in the configuration, or else its result would not be checking anything.", - Subject: cr.Condition.Range().Ptr(), - }) - } - } - - if attr, exists := content.Attributes["error_message"]; exists { - cr.ErrorMessage = attr.Expr - } - - return cr, diags -} - -var checkRuleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "condition", - Required: true, - }, - { - Name: "error_message", - Required: true, - }, - }, -} diff --git a/internal/configs/config.go b/internal/configs/config.go deleted file mode 100644 index f38d3cd85daa..000000000000 --- a/internal/configs/config.go +++ /dev/null @@ -1,557 +0,0 @@ -package configs - -import ( - "fmt" - "log" - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -// -// A module tree described in *this* package represents the static tree -// represented by configuration. During evaluation a static ModuleNode may -// expand into zero or more module instances depending on the use of count and -// for_each configuration attributes within each call. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *Module - - // CallRange is the source range for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallRange hcl.Range - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. SourceAddrRaw is the same - // information, but as the raw string the user originally entered. - // - // These fields are meaningless for the root module, where their contents are undefined. - SourceAddr addrs.ModuleSource - SourceAddrRaw string - - // SourceAddrRange is the location in the configuration source where the - // SourceAddr value was set, for use in diagnostic messages. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddrRange hcl.Range - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} - -// ModuleRequirements represents the provider requirements for an individual -// module, along with references to any child modules. This is used to -// determine which modules require which providers. -type ModuleRequirements struct { - Name string - SourceAddr addrs.ModuleSource - SourceDir string - Requirements getproviders.Requirements - Children map[string]*ModuleRequirements -} - -// NewEmptyConfig constructs a single-node configuration tree with an empty -// root module. This is generally a pretty useless thing to do, so most callers -// should instead use BuildConfig. -func NewEmptyConfig() *Config { - ret := &Config{} - ret.Root = ret - ret.Children = make(map[string]*Config) - ret.Module = &Module{} - return ret -} - -// Depth returns the number of "hops" the receiver is from the root of its -// module tree, with the root module having a depth of zero. -func (c *Config) Depth() int { - ret := 0 - this := c - for this.Parent != nil { - ret++ - this = this.Parent - } - return ret -} - -// DeepEach calls the given function once for each module in the tree, starting -// with the receiver. -// -// A parent is always called before its children and children of a particular -// node are visited in lexicographic order by their names. -func (c *Config) DeepEach(cb func(c *Config)) { - cb(c) - - names := make([]string, 0, len(c.Children)) - for name := range c.Children { - names = append(names, name) - } - - for _, name := range names { - c.Children[name].DeepEach(cb) - } -} - -// AllModules returns a slice of all the receiver and all of its descendent -// nodes in the module tree, in the same order they would be visited by -// DeepEach. -func (c *Config) AllModules() []*Config { - var ret []*Config - c.DeepEach(func(c *Config) { - ret = append(ret, c) - }) - return ret -} - -// Descendent returns the descendent config that has the given path beneath -// the receiver, or nil if there is no such module. -// -// The path traverses the static module tree, prior to any expansion to handle -// count and for_each arguments. -// -// An empty path will just return the receiver, and is therefore pointless. -func (c *Config) Descendent(path addrs.Module) *Config { - current := c - for _, name := range path { - current = current.Children[name] - if current == nil { - return nil - } - } - return current -} - -// DescendentForInstance is like Descendent except that it accepts a path -// to a particular module instance in the dynamic module graph, returning -// the node from the static module graph that corresponds to it. -// -// All instances created by a particular module call share the same -// configuration, so the keys within the given path are disregarded. -func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { - current := c - for _, step := range path { - current = current.Children[step.Name] - if current == nil { - return nil - } - } - return current -} - -// EntersNewPackage returns true if this call is to an external module, either -// directly via a remote source address or indirectly via a registry source -// address. -// -// Other behaviors in Terraform may treat package crossings as a special -// situation, because that indicates that the caller and callee can change -// independently of one another and thus we should disallow using any features -// where the caller assumes anything about the callee other than its input -// variables, required provider configurations, and output values. -// -// It's not meaningful to ask if the Config representing the root module enters -// a new package because the root module is always outside of all module -// packages, and so this function will arbitrarily return false in that case. -func (c *Config) EntersNewPackage() bool { - return moduleSourceAddrEntersNewPackage(c.SourceAddr) -} - -// VerifyDependencySelections checks whether the given locked dependencies -// are acceptable for all of the version constraints reported in the -// configuration tree represented by the reciever. -// -// This function will errors only if any of the locked dependencies are out of -// range for corresponding constraints in the configuration. If there are -// multiple inconsistencies then it will attempt to describe as many of them -// as possible, rather than stopping at the first problem. -// -// It's typically the responsibility of "terraform init" to change the locked -// dependencies to conform with the configuration, and so -// VerifyDependencySelections is intended for other commands to check whether -// it did so correctly and to catch if anything has changed in configuration -// since the last "terraform init" which requires re-initialization. However, -// it's up to the caller to decide how to advise users recover from these -// errors, because the advise can vary depending on what operation the user -// is attempting. -func (c *Config) VerifyDependencySelections(depLocks *depsfile.Locks) []error { - var errs []error - - reqs, diags := c.ProviderRequirements() - if diags.HasErrors() { - // It should be very unusual to get here, but unfortunately we can - // end up here in some edge cases where the config loader doesn't - // process version constraint strings in exactly the same way as - // the requirements resolver. (See the addProviderRequirements method - // for more information.) - errs = append(errs, fmt.Errorf("failed to determine the configuration's provider requirements: %s", diags.Error())) - } - - for providerAddr, constraints := range reqs { - if !depsfile.ProviderIsLockable(providerAddr) { - continue // disregard builtin providers, and such - } - if depLocks != nil && depLocks.ProviderIsOverridden(providerAddr) { - // The "overridden" case is for unusual special situations like - // dev overrides, so we'll explicitly note it in the logs just in - // case we see bug reports with these active and it helps us - // understand why we ended up using the "wrong" plugin. - log.Printf("[DEBUG] Config.VerifyDependencySelections: skipping %s because it's overridden by a special configuration setting", providerAddr) - continue - } - - var lock *depsfile.ProviderLock - if depLocks != nil { // Should always be true in main code, but unfortunately sometimes not true in old tests that don't fill out arguments completely - lock = depLocks.Provider(providerAddr) - } - if lock == nil { - log.Printf("[TRACE] Config.VerifyDependencySelections: provider %s has no lock file entry to satisfy %q", providerAddr, getproviders.VersionConstraintsString(constraints)) - errs = append(errs, fmt.Errorf("provider %s: required by this configuration but no version is selected", providerAddr)) - continue - } - - selectedVersion := lock.Version() - allowedVersions := getproviders.MeetingConstraints(constraints) - log.Printf("[TRACE] Config.VerifyDependencySelections: provider %s has %s to satisfy %q", providerAddr, selectedVersion.String(), getproviders.VersionConstraintsString(constraints)) - if !allowedVersions.Has(selectedVersion) { - // The most likely cause of this is that the author of a module - // has changed its constraints, but this could also happen in - // some other unusual situations, such as the user directly - // editing the lock file to record something invalid. We'll - // distinguish those cases here in order to avoid the more - // specific error message potentially being a red herring in - // the edge-cases. - currentConstraints := getproviders.VersionConstraintsString(constraints) - lockedConstraints := getproviders.VersionConstraintsString(lock.VersionConstraints()) - switch { - case currentConstraints != lockedConstraints: - errs = append(errs, fmt.Errorf("provider %s: locked version selection %s doesn't match the updated version constraints %q", providerAddr, selectedVersion.String(), currentConstraints)) - default: - errs = append(errs, fmt.Errorf("provider %s: version constraints %q don't match the locked version selection %s", providerAddr, currentConstraints, selectedVersion.String())) - } - } - } - - // Return multiple errors in an arbitrary-but-deterministic order. - sort.Slice(errs, func(i, j int) bool { - return errs[i].Error() < errs[j].Error() - }) - - return errs -} - -// ProviderRequirements searches the full tree of modules under the receiver -// for both explicit and implicit dependencies on providers. -// -// The result is a full manifest of all of the providers that must be available -// in order to work with the receiving configuration. -// -// If the returned diagnostics includes errors then the resulting Requirements -// may be incomplete. -func (c *Config) ProviderRequirements() (getproviders.Requirements, hcl.Diagnostics) { - reqs := make(getproviders.Requirements) - diags := c.addProviderRequirements(reqs, true) - - return reqs, diags -} - -// ProviderRequirementsShallow searches only the direct receiver for explicit -// and implicit dependencies on providers. Descendant modules are ignored. -// -// If the returned diagnostics includes errors then the resulting Requirements -// may be incomplete. -func (c *Config) ProviderRequirementsShallow() (getproviders.Requirements, hcl.Diagnostics) { - reqs := make(getproviders.Requirements) - diags := c.addProviderRequirements(reqs, false) - - return reqs, diags -} - -// ProviderRequirementsByModule searches the full tree of modules under the -// receiver for both explicit and implicit dependencies on providers, -// constructing a tree where the requirements are broken out by module. -// -// If the returned diagnostics includes errors then the resulting Requirements -// may be incomplete. -func (c *Config) ProviderRequirementsByModule() (*ModuleRequirements, hcl.Diagnostics) { - reqs := make(getproviders.Requirements) - diags := c.addProviderRequirements(reqs, false) - - children := make(map[string]*ModuleRequirements) - for name, child := range c.Children { - childReqs, childDiags := child.ProviderRequirementsByModule() - childReqs.Name = name - children[name] = childReqs - diags = append(diags, childDiags...) - } - - ret := &ModuleRequirements{ - SourceAddr: c.SourceAddr, - SourceDir: c.Module.SourceDir, - Requirements: reqs, - Children: children, - } - - return ret, diags -} - -// addProviderRequirements is the main part of the ProviderRequirements -// implementation, gradually mutating a shared requirements object to -// eventually return. If the recurse argument is true, the requirements will -// include all descendant modules; otherwise, only the specified module. -func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse bool) hcl.Diagnostics { - var diags hcl.Diagnostics - - // First we'll deal with the requirements directly in _our_ module... - if c.Module.ProviderRequirements != nil { - for _, providerReqs := range c.Module.ProviderRequirements.RequiredProviders { - fqn := providerReqs.Type - if _, ok := reqs[fqn]; !ok { - // We'll at least have an unconstrained dependency then, but might - // add to this in the loop below. - reqs[fqn] = nil - } - // The model of version constraints in this package is still the - // old one using a different upstream module to represent versions, - // so we'll need to shim that out here for now. The two parsers - // don't exactly agree in practice 🙄 so this might produce new errors. - // TODO: Use the new parser throughout this package so we can get the - // better error messages it produces in more situations. - constraints, err := getproviders.ParseVersionConstraints(providerReqs.Requirement.Required.String()) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - // The errors returned by ParseVersionConstraint already include - // the section of input that was incorrect, so we don't need to - // include that here. - Detail: fmt.Sprintf("Incorrect version constraint syntax: %s.", err.Error()), - Subject: providerReqs.Requirement.DeclRange.Ptr(), - }) - } - reqs[fqn] = append(reqs[fqn], constraints...) - } - } - - // Each resource in the configuration creates an *implicit* provider - // dependency, though we'll only record it if there isn't already - // an explicit dependency on the same provider. - for _, rc := range c.Module.ManagedResources { - fqn := rc.Provider - if _, exists := reqs[fqn]; exists { - // Explicit dependency already present - continue - } - reqs[fqn] = nil - } - for _, rc := range c.Module.DataResources { - fqn := rc.Provider - if _, exists := reqs[fqn]; exists { - // Explicit dependency already present - continue - } - reqs[fqn] = nil - } - - // "provider" block can also contain version constraints - for _, provider := range c.Module.ProviderConfigs { - fqn := c.Module.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: provider.Name}) - if _, ok := reqs[fqn]; !ok { - // We'll at least have an unconstrained dependency then, but might - // add to this in the loop below. - reqs[fqn] = nil - } - if provider.Version.Required != nil { - // The model of version constraints in this package is still the - // old one using a different upstream module to represent versions, - // so we'll need to shim that out here for now. The two parsers - // don't exactly agree in practice 🙄 so this might produce new errors. - // TODO: Use the new parser throughout this package so we can get the - // better error messages it produces in more situations. - constraints, err := getproviders.ParseVersionConstraints(provider.Version.Required.String()) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - // The errors returned by ParseVersionConstraint already include - // the section of input that was incorrect, so we don't need to - // include that here. - Detail: fmt.Sprintf("Incorrect version constraint syntax: %s.", err.Error()), - Subject: provider.Version.DeclRange.Ptr(), - }) - } - reqs[fqn] = append(reqs[fqn], constraints...) - } - } - - if recurse { - for _, childConfig := range c.Children { - moreDiags := childConfig.addProviderRequirements(reqs, true) - diags = append(diags, moreDiags...) - } - } - - return diags -} - -// resolveProviderTypes walks through the providers in the module and ensures -// the true types are assigned based on the provider requirements for the -// module. -func (c *Config) resolveProviderTypes() { - for _, child := range c.Children { - child.resolveProviderTypes() - } - - // collect the required_providers, and then add any missing default providers - providers := map[string]addrs.Provider{} - for name, p := range c.Module.ProviderRequirements.RequiredProviders { - providers[name] = p.Type - } - - // ensure all provider configs know their correct type - for _, p := range c.Module.ProviderConfigs { - addr, required := providers[p.Name] - if required { - p.providerType = addr - } else { - addr := addrs.NewDefaultProvider(p.Name) - p.providerType = addr - providers[p.Name] = addr - } - } - - // connect module call providers to the correct type - for _, mod := range c.Module.ModuleCalls { - for _, p := range mod.Providers { - if addr, known := providers[p.InParent.Name]; known { - p.InParent.providerType = addr - } - } - } - - // fill in parent module calls too - if c.Parent != nil { - for _, mod := range c.Parent.Module.ModuleCalls { - for _, p := range mod.Providers { - if addr, known := providers[p.InChild.Name]; known { - p.InChild.providerType = addr - } - } - } - } -} - -// ProviderTypes returns the FQNs of each distinct provider type referenced -// in the receiving configuration. -// -// This is a helper for easily determining which provider types are required -// to fully interpret the configuration, though it does not include version -// information and so callers are expected to have already dealt with -// provider version selection in an earlier step and have identified suitable -// versions for each provider. -func (c *Config) ProviderTypes() []addrs.Provider { - // Ignore diagnostics here because they relate to version constraints - reqs, _ := c.ProviderRequirements() - - ret := make([]addrs.Provider, 0, len(reqs)) - for k := range reqs { - ret = append(ret, k) - } - sort.Slice(ret, func(i, j int) bool { - return ret[i].String() < ret[j].String() - }) - return ret -} - -// ResolveAbsProviderAddr returns the AbsProviderConfig represented by the given -// ProviderConfig address, which must not be nil or this method will panic. -// -// If the given address is already an AbsProviderConfig then this method returns -// it verbatim, and will always succeed. If it's a LocalProviderConfig then -// it will consult the local-to-FQN mapping table for the given module -// to find the absolute address corresponding to the given local one. -// -// The module address to resolve local addresses in must be given in the second -// argument, and must refer to a module that exists under the receiver or -// else this method will panic. -func (c *Config) ResolveAbsProviderAddr(addr addrs.ProviderConfig, inModule addrs.Module) addrs.AbsProviderConfig { - switch addr := addr.(type) { - - case addrs.AbsProviderConfig: - return addr - - case addrs.LocalProviderConfig: - // Find the descendent Config that contains the module that this - // local config belongs to. - mc := c.Descendent(inModule) - if mc == nil { - panic(fmt.Sprintf("ResolveAbsProviderAddr with non-existent module %s", inModule.String())) - } - - var provider addrs.Provider - if providerReq, exists := c.Module.ProviderRequirements.RequiredProviders[addr.LocalName]; exists { - provider = providerReq.Type - } else { - provider = addrs.ImpliedProviderForUnqualifiedType(addr.LocalName) - } - - return addrs.AbsProviderConfig{ - Module: inModule, - Provider: provider, - Alias: addr.Alias, - } - - default: - panic(fmt.Sprintf("cannot ResolveAbsProviderAddr(%v, ...)", addr)) - } - -} - -// ProviderForConfigAddr returns the FQN for a given addrs.ProviderConfig, first -// by checking for the provider in module.ProviderRequirements and falling -// back to addrs.NewDefaultProvider if it is not found. -func (c *Config) ProviderForConfigAddr(addr addrs.LocalProviderConfig) addrs.Provider { - if provider, exists := c.Module.ProviderRequirements.RequiredProviders[addr.LocalName]; exists { - return provider.Type - } - return c.ResolveAbsProviderAddr(addr, addrs.RootModule).Provider -} diff --git a/internal/configs/config_build.go b/internal/configs/config_build.go deleted file mode 100644 index 4e2dddaa1b06..000000000000 --- a/internal/configs/config_build.go +++ /dev/null @@ -1,200 +0,0 @@ -package configs - -import ( - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -// -// The result is a module tree that has so far only had basic module- and -// file-level invariants validated. If the returned diagnostics contains errors, -// the returned module tree may be incomplete but can still be used carefully -// for static analysis. -func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - - // Skip provider resolution if there are any errors, since the provider - // configurations themselves may not be valid. - if !diags.HasErrors() { - // Now that the config is built, we can connect the provider names to all - // the known types for validation. - cfg.resolveProviderTypes() - } - - diags = append(diags, validateProviderConfigs(nil, cfg, nil)...) - - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := map[string]*Config{} - - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - VersionConstraint: call.Version, - Parent: parent, - CallRange: call.DeclRange, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallRange: call.DeclRange, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = append(diags, modDiags...) - - if mod.Backend != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Backend configuration ignored", - Detail: "Any selected backend applies to the entire configuration, so Terraform expects provider configurations only in the root module.\n\nThis is a warning rather than an error because it's sometimes convenient to temporarily call a root module as a child module for testing purposes, but this backend configuration block will have no effect.", - Subject: mod.Backend.DeclRange.Ptr(), - }) - } - - ret[call.Name] = child - } - - return ret, diags -} - -// A ModuleWalker knows how to find and load a child module given details about -// the module to be loaded and a reference to its partially-loaded parent -// Config. -type ModuleWalker interface { - // LoadModule finds and loads a requested child module. - // - // If errors are detected during loading, implementations should return them - // in the diagnostics object. If the diagnostics object contains any errors - // then the caller will tolerate the returned module being nil or incomplete. - // If no errors are returned, it should be non-nil and complete. - // - // Full validation need not have been performed but an implementation should - // ensure that the basic file- and module-validations performed by the - // LoadConfigDir function (valid syntax, no namespace collisions, etc) have - // been performed before returning a module. - LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) - -// LoadModule implements ModuleWalker. -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { - return f(req) -} - -// ModuleRequest is used with the ModuleWalker interface to describe a child -// module that must be loaded. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr addrs.ModuleSource - - // SourceAddrRange is the source range for the SourceAddr value as it - // was provided in configuration. This can and should be used to generate - // diagnostics about the source address having invalid syntax, referring - // to a non-existent object, etc. - SourceAddrRange hcl.Range - - // VersionConstraint is the version constraint applied to the module in - // configuration. This data structure includes the source range for - // the constraint, which can and should be used to generate diagnostics - // about constraint-related issues, such as constraints that eliminate all - // available versions of a module whose source is otherwise valid. - VersionConstraint VersionConstraint - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source range for the header of the "module" block - // in configuration that prompted this request. This can be used as the - // subject of an error diagnostic that relates to the module call itself, - // rather than to either its source address or its version number. - CallRange hcl.Range -} - -// DisabledModuleWalker is a ModuleWalker that doesn't support -// child modules at all, and so will return an error if asked to load one. -// -// This is provided primarily for testing. There is no good reason to use this -// in the main application. -var DisabledModuleWalker ModuleWalker - -func init() { - DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Child modules are not supported", - Detail: "Child module calls are not allowed in this context.", - Subject: &req.CallRange, - }, - } - }) -} diff --git a/internal/configs/config_test.go b/internal/configs/config_test.go deleted file mode 100644 index b5360278df7b..000000000000 --- a/internal/configs/config_test.go +++ /dev/null @@ -1,421 +0,0 @@ -package configs - -import ( - "testing" - - "github.com/go-test/deep" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/zclconf/go-cty/cty" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2/hclsyntax" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" -) - -func TestConfigProviderTypes(t *testing.T) { - // nil cfg should return an empty map - got := NewEmptyConfig().ProviderTypes() - if len(got) != 0 { - t.Fatal("expected empty result from empty config") - } - - cfg, diags := testModuleConfigFromFile("testdata/valid-files/providers-explicit-implied.tf") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - got = cfg.ProviderTypes() - want := []addrs.Provider{ - addrs.NewDefaultProvider("aws"), - addrs.NewDefaultProvider("null"), - addrs.NewDefaultProvider("template"), - addrs.NewDefaultProvider("test"), - } - for _, problem := range deep.Equal(got, want) { - t.Error(problem) - } -} - -func TestConfigProviderTypes_nested(t *testing.T) { - // basic test with a nil config - c := NewEmptyConfig() - got := c.ProviderTypes() - if len(got) != 0 { - t.Fatalf("wrong result!\ngot: %#v\nwant: nil\n", got) - } - - // config with two provider sources, and one implicit (default) provider - cfg, diags := testNestedModuleConfigFromDir(t, "testdata/valid-modules/nested-providers-fqns") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - got = cfg.ProviderTypes() - want := []addrs.Provider{ - addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test"), - addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test"), - addrs.NewDefaultProvider("test"), - } - - for _, problem := range deep.Equal(got, want) { - t.Error(problem) - } -} - -func TestConfigResolveAbsProviderAddr(t *testing.T) { - cfg, diags := testModuleConfigFromDir("testdata/providers-explicit-fqn") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - t.Run("already absolute", func(t *testing.T) { - addr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("test"), - Alias: "boop", - } - got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) - if got, want := got.String(), addr.String(); got != want { - t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) - } - }) - t.Run("local, implied mapping", func(t *testing.T) { - addr := addrs.LocalProviderConfig{ - LocalName: "implied", - Alias: "boop", - } - got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) - want := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("implied"), - Alias: "boop", - } - if got, want := got.String(), want.String(); got != want { - t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) - } - }) - t.Run("local, explicit mapping", func(t *testing.T) { - addr := addrs.LocalProviderConfig{ - LocalName: "foo-test", // this is explicitly set in the config - Alias: "boop", - } - got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) - want := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test"), - Alias: "boop", - } - if got, want := got.String(), want.String(); got != want { - t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) - } - }) -} - -func TestConfigProviderRequirements(t *testing.T) { - cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") - // TODO: Version Constraint Deprecation. - // Once we've removed the version argument from provider configuration - // blocks, this can go back to expected 0 diagnostics. - // assertNoDiagnostics(t, diags) - assertDiagnosticCount(t, diags, 1) - assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") - - tlsProvider := addrs.NewProvider( - addrs.DefaultProviderRegistryHost, - "hashicorp", "tls", - ) - happycloudProvider := addrs.NewProvider( - svchost.Hostname("tf.example.com"), - "awesomecorp", "happycloud", - ) - nullProvider := addrs.NewDefaultProvider("null") - randomProvider := addrs.NewDefaultProvider("random") - impliedProvider := addrs.NewDefaultProvider("implied") - terraformProvider := addrs.NewBuiltInProvider("terraform") - configuredProvider := addrs.NewDefaultProvider("configured") - grandchildProvider := addrs.NewDefaultProvider("grandchild") - - got, diags := cfg.ProviderRequirements() - assertNoDiagnostics(t, diags) - want := getproviders.Requirements{ - // the nullProvider constraints from the two modules are merged - nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), - randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), - tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), - configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), - impliedProvider: nil, - happycloudProvider: nil, - terraformProvider: nil, - grandchildProvider: nil, - } - - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("wrong result\n%s", diff) - } -} - -func TestConfigProviderRequirementsDuplicate(t *testing.T) { - _, diags := testNestedModuleConfigFromDir(t, "testdata/duplicate-local-name") - assertDiagnosticCount(t, diags, 3) - assertDiagnosticSummary(t, diags, "Duplicate required provider") -} - -func TestConfigProviderRequirementsShallow(t *testing.T) { - cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") - // TODO: Version Constraint Deprecation. - // Once we've removed the version argument from provider configuration - // blocks, this can go back to expected 0 diagnostics. - // assertNoDiagnostics(t, diags) - assertDiagnosticCount(t, diags, 1) - assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") - - tlsProvider := addrs.NewProvider( - addrs.DefaultProviderRegistryHost, - "hashicorp", "tls", - ) - nullProvider := addrs.NewDefaultProvider("null") - randomProvider := addrs.NewDefaultProvider("random") - impliedProvider := addrs.NewDefaultProvider("implied") - terraformProvider := addrs.NewBuiltInProvider("terraform") - configuredProvider := addrs.NewDefaultProvider("configured") - - got, diags := cfg.ProviderRequirementsShallow() - assertNoDiagnostics(t, diags) - want := getproviders.Requirements{ - // the nullProvider constraint is only from the root module - nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"), - randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), - tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), - configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), - impliedProvider: nil, - terraformProvider: nil, - } - - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("wrong result\n%s", diff) - } -} - -func TestConfigProviderRequirementsByModule(t *testing.T) { - cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") - // TODO: Version Constraint Deprecation. - // Once we've removed the version argument from provider configuration - // blocks, this can go back to expected 0 diagnostics. - // assertNoDiagnostics(t, diags) - assertDiagnosticCount(t, diags, 1) - assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") - - tlsProvider := addrs.NewProvider( - addrs.DefaultProviderRegistryHost, - "hashicorp", "tls", - ) - happycloudProvider := addrs.NewProvider( - svchost.Hostname("tf.example.com"), - "awesomecorp", "happycloud", - ) - nullProvider := addrs.NewDefaultProvider("null") - randomProvider := addrs.NewDefaultProvider("random") - impliedProvider := addrs.NewDefaultProvider("implied") - terraformProvider := addrs.NewBuiltInProvider("terraform") - configuredProvider := addrs.NewDefaultProvider("configured") - grandchildProvider := addrs.NewDefaultProvider("grandchild") - - got, diags := cfg.ProviderRequirementsByModule() - assertNoDiagnostics(t, diags) - want := &ModuleRequirements{ - Name: "", - SourceAddr: nil, - SourceDir: "testdata/provider-reqs", - Requirements: getproviders.Requirements{ - // Only the root module's version is present here - nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"), - randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), - tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), - configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), - impliedProvider: nil, - terraformProvider: nil, - }, - Children: map[string]*ModuleRequirements{ - "kinder": { - Name: "kinder", - SourceAddr: addrs.ModuleSourceLocal("./child"), - SourceDir: "testdata/provider-reqs/child", - Requirements: getproviders.Requirements{ - nullProvider: getproviders.MustParseVersionConstraints("= 2.0.1"), - happycloudProvider: nil, - }, - Children: map[string]*ModuleRequirements{ - "nested": { - Name: "nested", - SourceAddr: addrs.ModuleSourceLocal("./grandchild"), - SourceDir: "testdata/provider-reqs/child/grandchild", - Requirements: getproviders.Requirements{ - grandchildProvider: nil, - }, - Children: map[string]*ModuleRequirements{}, - }, - }, - }, - }, - } - - ignore := cmpopts.IgnoreUnexported(version.Constraint{}, cty.Value{}, hclsyntax.Body{}) - if diff := cmp.Diff(want, got, ignore); diff != "" { - t.Errorf("wrong result\n%s", diff) - } -} - -func TestVerifyDependencySelections(t *testing.T) { - cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") - // TODO: Version Constraint Deprecation. - // Once we've removed the version argument from provider configuration - // blocks, this can go back to expected 0 diagnostics. - // assertNoDiagnostics(t, diags) - assertDiagnosticCount(t, diags, 1) - assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") - - tlsProvider := addrs.NewProvider( - addrs.DefaultProviderRegistryHost, - "hashicorp", "tls", - ) - happycloudProvider := addrs.NewProvider( - svchost.Hostname("tf.example.com"), - "awesomecorp", "happycloud", - ) - nullProvider := addrs.NewDefaultProvider("null") - randomProvider := addrs.NewDefaultProvider("random") - impliedProvider := addrs.NewDefaultProvider("implied") - configuredProvider := addrs.NewDefaultProvider("configured") - grandchildProvider := addrs.NewDefaultProvider("grandchild") - - tests := map[string]struct { - PrepareLocks func(*depsfile.Locks) - WantErrs []string - }{ - "empty locks": { - func(*depsfile.Locks) { - // Intentionally blank - }, - []string{ - `provider registry.terraform.io/hashicorp/configured: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/grandchild: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/implied: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/null: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/random: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/tls: required by this configuration but no version is selected`, - `provider tf.example.com/awesomecorp/happycloud: required by this configuration but no version is selected`, - }, - }, - "suitable locks": { - func(locks *depsfile.Locks) { - locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) - locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) - locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) - locks.SetProvider(nullProvider, getproviders.MustParseVersion("2.0.1"), nil, nil) - locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) - locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) - locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) - }, - nil, - }, - "null provider constraints changed": { - func(locks *depsfile.Locks) { - locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) - locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) - locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) - locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), nil, nil) - locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) - locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) - locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) - }, - []string{ - `provider registry.terraform.io/hashicorp/null: locked version selection 3.0.0 doesn't match the updated version constraints "~> 2.0.0, 2.0.1"`, - }, - }, - "null provider lock changed": { - func(locks *depsfile.Locks) { - // In this case, we set the lock file version constraints to - // match the configuration, and so our error message changes - // to not assume the configuration changed anymore. - locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), nil) - - locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) - locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) - locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) - locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) - locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) - locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) - }, - []string{ - `provider registry.terraform.io/hashicorp/null: version constraints "~> 2.0.0, 2.0.1" don't match the locked version selection 3.0.0`, - }, - }, - "overridden provider": { - func(locks *depsfile.Locks) { - locks.SetProviderOverridden(happycloudProvider) - }, - []string{ - // We still catch all of the other ones, because only happycloud was overridden - `provider registry.terraform.io/hashicorp/configured: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/grandchild: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/implied: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/null: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/random: required by this configuration but no version is selected`, - `provider registry.terraform.io/hashicorp/tls: required by this configuration but no version is selected`, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - depLocks := depsfile.NewLocks() - test.PrepareLocks(depLocks) - gotErrs := cfg.VerifyDependencySelections(depLocks) - - var gotErrsStr []string - if gotErrs != nil { - gotErrsStr = make([]string, len(gotErrs)) - for i, err := range gotErrs { - gotErrsStr[i] = err.Error() - } - } - - if diff := cmp.Diff(test.WantErrs, gotErrsStr); diff != "" { - t.Errorf("wrong errors\n%s", diff) - } - }) - } -} - -func TestConfigProviderForConfigAddr(t *testing.T) { - cfg, diags := testModuleConfigFromDir("testdata/valid-modules/providers-fqns") - assertNoDiagnostics(t, diags) - - got := cfg.ProviderForConfigAddr(addrs.NewDefaultLocalProviderConfig("foo-test")) - want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") - if !got.Equals(want) { - t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) - } - - // now check a provider that isn't in the configuration. It should return a DefaultProvider. - got = cfg.ProviderForConfigAddr(addrs.NewDefaultLocalProviderConfig("bar-test")) - want = addrs.NewDefaultProvider("bar-test") - if !got.Equals(want) { - t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) - } -} - -func TestConfigAddProviderRequirements(t *testing.T) { - cfg, diags := testModuleConfigFromFile("testdata/valid-files/providers-explicit-implied.tf") - assertNoDiagnostics(t, diags) - - reqs := getproviders.Requirements{ - addrs.NewDefaultProvider("null"): nil, - } - diags = cfg.addProviderRequirements(reqs, true) - assertNoDiagnostics(t, diags) -} diff --git a/internal/configs/configschema/marks.go b/internal/configs/configschema/marks.go deleted file mode 100644 index f16bad711e1c..000000000000 --- a/internal/configs/configschema/marks.go +++ /dev/null @@ -1,153 +0,0 @@ -package configschema - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/zclconf/go-cty/cty" -) - -// copyAndExtendPath returns a copy of a cty.Path with some additional -// `cty.PathStep`s appended to its end, to simplify creating new child paths. -func copyAndExtendPath(path cty.Path, nextSteps ...cty.PathStep) cty.Path { - newPath := make(cty.Path, len(path), len(path)+len(nextSteps)) - copy(newPath, path) - newPath = append(newPath, nextSteps...) - return newPath -} - -// ValueMarks returns a set of path value marks for a given value and path, -// based on the sensitive flag for each attribute within the schema. Nested -// blocks are descended (if present in the given value). -func (b *Block) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { - var pvm []cty.PathValueMarks - - // We can mark attributes as sensitive even if the value is null - for name, attrS := range b.Attributes { - if attrS.Sensitive { - // Create a copy of the path, with this step added, to add to our PathValueMarks slice - attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) - pvm = append(pvm, cty.PathValueMarks{ - Path: attrPath, - Marks: cty.NewValueMarks(marks.Sensitive), - }) - } - } - - // If the value is null, no other marks are possible - if val.IsNull() { - return pvm - } - - // Extract marks for nested attribute type values - for name, attrS := range b.Attributes { - // If the attribute has no nested type, or the nested type doesn't - // contain any sensitive attributes, skip inspecting it - if attrS.NestedType == nil || !attrS.NestedType.ContainsSensitive() { - continue - } - - // Create a copy of the path, with this step added, to add to our PathValueMarks slice - attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) - - pvm = append(pvm, attrS.NestedType.ValueMarks(val.GetAttr(name), attrPath)...) - } - - // Extract marks for nested blocks - for name, blockS := range b.BlockTypes { - // If our block doesn't contain any sensitive attributes, skip inspecting it - if !blockS.Block.ContainsSensitive() { - continue - } - - blockV := val.GetAttr(name) - if blockV.IsNull() || !blockV.IsKnown() { - continue - } - - // Create a copy of the path, with this step added, to add to our PathValueMarks slice - blockPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) - - switch blockS.Nesting { - case NestingSingle, NestingGroup: - pvm = append(pvm, blockS.Block.ValueMarks(blockV, blockPath)...) - case NestingList, NestingMap, NestingSet: - for it := blockV.ElementIterator(); it.Next(); { - idx, blockEV := it.Element() - // Create a copy of the path, with this block instance's index - // step added, to add to our PathValueMarks slice - blockInstancePath := copyAndExtendPath(blockPath, cty.IndexStep{Key: idx}) - morePaths := blockS.Block.ValueMarks(blockEV, blockInstancePath) - pvm = append(pvm, morePaths...) - } - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - return pvm -} - -// ValueMarks returns a set of path value marks for a given value and path, -// based on the sensitive flag for each attribute within the nested attribute. -// Attributes with nested types are descended (if present in the given value). -func (o *Object) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { - var pvm []cty.PathValueMarks - - if val.IsNull() || !val.IsKnown() { - return pvm - } - - for name, attrS := range o.Attributes { - // Skip attributes which can never produce sensitive path value marks - if !attrS.Sensitive && (attrS.NestedType == nil || !attrS.NestedType.ContainsSensitive()) { - continue - } - - switch o.Nesting { - case NestingSingle, NestingGroup: - // Create a path to this attribute - attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) - - if attrS.Sensitive { - // If the entire attribute is sensitive, mark it so - pvm = append(pvm, cty.PathValueMarks{ - Path: attrPath, - Marks: cty.NewValueMarks(marks.Sensitive), - }) - } else { - // The attribute has a nested type which contains sensitive - // attributes, so recurse - pvm = append(pvm, attrS.NestedType.ValueMarks(val.GetAttr(name), attrPath)...) - } - case NestingList, NestingMap, NestingSet: - // For nested attribute types which have a non-single nesting mode, - // we add path value marks for each element of the collection - for it := val.ElementIterator(); it.Next(); { - idx, attrEV := it.Element() - attrV := attrEV.GetAttr(name) - - // Create a path to this element of the attribute's collection. Note - // that the path is extended in opposite order to the iteration order - // of the loops: index into the collection, then the contained - // attribute name. This is because we have one type - // representing multiple collection elements. - attrPath := copyAndExtendPath(path, cty.IndexStep{Key: idx}, cty.GetAttrStep{Name: name}) - - if attrS.Sensitive { - // If the entire attribute is sensitive, mark it so - pvm = append(pvm, cty.PathValueMarks{ - Path: attrPath, - Marks: cty.NewValueMarks(marks.Sensitive), - }) - } else { - // The attribute has a nested type which contains sensitive - // attributes, so recurse - pvm = append(pvm, attrS.NestedType.ValueMarks(attrV, attrPath)...) - } - } - default: - panic(fmt.Sprintf("unsupported nesting mode %s", attrS.NestedType.Nesting)) - } - } - return pvm -} diff --git a/internal/configs/configschema/marks_test.go b/internal/configs/configschema/marks_test.go deleted file mode 100644 index 2077e5e805b0..000000000000 --- a/internal/configs/configschema/marks_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package configschema - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/zclconf/go-cty/cty" -) - -func TestBlockValueMarks(t *testing.T) { - schema := &Block{ - Attributes: map[string]*Attribute{ - "unsensitive": { - Type: cty.String, - Optional: true, - }, - "sensitive": { - Type: cty.String, - Sensitive: true, - }, - "nested": { - NestedType: &Object{ - Attributes: map[string]*Attribute{ - "boop": { - Type: cty.String, - }, - "honk": { - Type: cty.String, - Sensitive: true, - }, - }, - Nesting: NestingList, - }, - }, - }, - - BlockTypes: map[string]*NestedBlock{ - "list": { - Nesting: NestingList, - Block: Block{ - Attributes: map[string]*Attribute{ - "unsensitive": { - Type: cty.String, - Optional: true, - }, - "sensitive": { - Type: cty.String, - Sensitive: true, - }, - }, - }, - }, - }, - } - - testCases := map[string]struct { - given cty.Value - expect cty.Value - }{ - "unknown object": { - cty.UnknownVal(schema.ImpliedType()), - cty.UnknownVal(schema.ImpliedType()), - }, - "null object": { - cty.NullVal(schema.ImpliedType()), - cty.NullVal(schema.ImpliedType()), - }, - "object with unknown attributes and blocks": { - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.UnknownVal(cty.String), - "unsensitive": cty.UnknownVal(cty.String), - "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "boop": cty.String, - "honk": cty.String, - }))), - "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), - }), - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.UnknownVal(cty.String).Mark(marks.Sensitive), - "unsensitive": cty.UnknownVal(cty.String), - "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "boop": cty.String, - "honk": cty.String, - }))), - "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), - }), - }, - "object with block value": { - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.NullVal(cty.String), - "unsensitive": cty.UnknownVal(cty.String), - "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "boop": cty.String, - "honk": cty.String, - }))), - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.UnknownVal(cty.String), - "unsensitive": cty.UnknownVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.NullVal(cty.String), - "unsensitive": cty.NullVal(cty.String), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.NullVal(cty.String).Mark(marks.Sensitive), - "unsensitive": cty.UnknownVal(cty.String), - "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "boop": cty.String, - "honk": cty.String, - }))), - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.UnknownVal(cty.String).Mark(marks.Sensitive), - "unsensitive": cty.UnknownVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.NullVal(cty.String).Mark(marks.Sensitive), - "unsensitive": cty.NullVal(cty.String), - }), - }), - }), - }, - "object with known values and nested attribute": { - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.StringVal("foo"), - "unsensitive": cty.StringVal("bar"), - "nested": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "boop": cty.StringVal("foo"), - "honk": cty.StringVal("bar"), - }), - cty.ObjectVal(map[string]cty.Value{ - "boop": cty.NullVal(cty.String), - "honk": cty.NullVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "boop": cty.UnknownVal(cty.String), - "honk": cty.UnknownVal(cty.String), - }), - }), - "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "sensitive": cty.String, - "unsensitive": cty.String, - }))), - }), - cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.StringVal("foo").Mark(marks.Sensitive), - "unsensitive": cty.StringVal("bar"), - "nested": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "boop": cty.StringVal("foo"), - "honk": cty.StringVal("bar").Mark(marks.Sensitive), - }), - cty.ObjectVal(map[string]cty.Value{ - "boop": cty.NullVal(cty.String), - "honk": cty.NullVal(cty.String).Mark(marks.Sensitive), - }), - cty.ObjectVal(map[string]cty.Value{ - "boop": cty.UnknownVal(cty.String), - "honk": cty.UnknownVal(cty.String).Mark(marks.Sensitive), - }), - }), - "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "sensitive": cty.String, - "unsensitive": cty.String, - }))), - }), - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - got := tc.given.MarkWithPaths(schema.ValueMarks(tc.given, nil)) - if !got.RawEquals(tc.expect) { - t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.expect, got) - } - }) - } -} diff --git a/internal/configs/hcl2shim/values.go b/internal/configs/hcl2shim/values.go deleted file mode 100644 index 7b0e09607f83..000000000000 --- a/internal/configs/hcl2shim/values.go +++ /dev/null @@ -1,230 +0,0 @@ -package hcl2shim - -import ( - "fmt" - "math/big" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/configs/configschema" -) - -// UnknownVariableValue is a sentinel value that can be used -// to denote that the value of a variable is unknown at this time. -// RawConfig uses this information to build up data about -// unknown keys. -const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" - -// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for -// known object values and uses the provided block schema to perform some -// additional normalization to better mimic the shape of value that the old -// HCL1/HIL-based codepaths would've produced. -// -// In particular, it discards the collections that we use to represent nested -// blocks (other than NestingSingle) if they are empty, which better mimics -// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't -// know that an unspecified block _could_ exist. -// -// The given object value must conform to the schema's implied type or this -// function will panic or produce incorrect results. -// -// This is primarily useful for the final transition from new-style values to -// terraform.ResourceConfig before calling to a legacy provider, since -// helper/schema (the old provider SDK) is particularly sensitive to these -// subtle differences within its validation code. -func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { - if v.IsNull() { - return nil - } - if !v.IsKnown() { - panic("ConfigValueFromHCL2Block used with unknown value") - } - if !v.Type().IsObjectType() { - panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) - } - - atys := v.Type().AttributeTypes() - ret := make(map[string]interface{}) - - for name := range schema.Attributes { - if _, exists := atys[name]; !exists { - continue - } - - av := v.GetAttr(name) - if av.IsNull() { - // Skip nulls altogether, to better mimic how HCL1 would behave - continue - } - ret[name] = ConfigValueFromHCL2(av) - } - - for name, blockS := range schema.BlockTypes { - if _, exists := atys[name]; !exists { - continue - } - bv := v.GetAttr(name) - if !bv.IsKnown() { - ret[name] = UnknownVariableValue - continue - } - if bv.IsNull() { - continue - } - - switch blockS.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) - - case configschema.NestingList, configschema.NestingSet: - l := bv.LengthInt() - if l == 0 { - // skip empty collections to better mimic how HCL1 would behave - continue - } - - elems := make([]interface{}, 0, l) - for it := bv.ElementIterator(); it.Next(); { - _, ev := it.Element() - if !ev.IsKnown() { - elems = append(elems, UnknownVariableValue) - continue - } - elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) - } - ret[name] = elems - - case configschema.NestingMap: - if bv.LengthInt() == 0 { - // skip empty collections to better mimic how HCL1 would behave - continue - } - - elems := make(map[string]interface{}) - for it := bv.ElementIterator(); it.Next(); { - ek, ev := it.Element() - if !ev.IsKnown() { - elems[ek.AsString()] = UnknownVariableValue - continue - } - elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) - } - ret[name] = elems - } - } - - return ret -} - -// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic -// types library that HCL2 uses) to a value type that matches what would've -// been produced from the HCL-based interpolator for an equivalent structure. -// -// This function will transform a cty null value into a Go nil value, which -// isn't a possible outcome of the HCL/HIL-based decoder and so callers may -// need to detect and reject any null values. -func ConfigValueFromHCL2(v cty.Value) interface{} { - if !v.IsKnown() { - return UnknownVariableValue - } - if v.IsNull() { - return nil - } - - switch v.Type() { - case cty.Bool: - return v.True() // like HCL.BOOL - case cty.String: - return v.AsString() // like HCL token.STRING or token.HEREDOC - case cty.Number: - // We can't match HCL _exactly_ here because it distinguishes between - // int and float values, but we'll get as close as we can by using - // an int if the number is exactly representable, and a float if not. - // The conversion to float will force precision to that of a float64, - // which is potentially losing information from the specific number - // given, but no worse than what HCL would've done in its own conversion - // to float. - - f := v.AsBigFloat() - if i, acc := f.Int64(); acc == big.Exact { - // if we're on a 32-bit system and the number is too big for 32-bit - // int then we'll fall through here and use a float64. - const MaxInt = int(^uint(0) >> 1) - const MinInt = -MaxInt - 1 - if i <= int64(MaxInt) && i >= int64(MinInt) { - return int(i) // Like HCL token.NUMBER - } - } - - f64, _ := f.Float64() - return f64 // like HCL token.FLOAT - } - - if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { - l := make([]interface{}, 0, v.LengthInt()) - it := v.ElementIterator() - for it.Next() { - _, ev := it.Element() - l = append(l, ConfigValueFromHCL2(ev)) - } - return l - } - - if v.Type().IsMapType() || v.Type().IsObjectType() { - l := make(map[string]interface{}) - it := v.ElementIterator() - for it.Next() { - ek, ev := it.Element() - cv := ConfigValueFromHCL2(ev) - if cv != nil { - l[ek.AsString()] = cv - } - } - return l - } - - // If we fall out here then we have some weird type that we haven't - // accounted for. This should never happen unless the caller is using - // capsule types, and we don't currently have any such types defined. - panic(fmt.Errorf("can't convert %#v to config value", v)) -} - -// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes -// a value as would be returned from the old interpolator and turns it into -// a cty.Value so it can be used within, for example, an HCL2 EvalContext. -func HCL2ValueFromConfigValue(v interface{}) cty.Value { - if v == nil { - return cty.NullVal(cty.DynamicPseudoType) - } - if v == UnknownVariableValue { - return cty.DynamicVal - } - - switch tv := v.(type) { - case bool: - return cty.BoolVal(tv) - case string: - return cty.StringVal(tv) - case int: - return cty.NumberIntVal(int64(tv)) - case float64: - return cty.NumberFloatVal(tv) - case []interface{}: - vals := make([]cty.Value, len(tv)) - for i, ev := range tv { - vals[i] = HCL2ValueFromConfigValue(ev) - } - return cty.TupleVal(vals) - case map[string]interface{}: - vals := map[string]cty.Value{} - for k, ev := range tv { - vals[k] = HCL2ValueFromConfigValue(ev) - } - return cty.ObjectVal(vals) - default: - // HCL/HIL should never generate anything that isn't caught by - // the above, so if we get here something has gone very wrong. - panic(fmt.Errorf("can't convert %#v to cty.Value", v)) - } -} diff --git a/internal/configs/hcl2shim/values_test.go b/internal/configs/hcl2shim/values_test.go deleted file mode 100644 index 4bc816dd9be5..000000000000 --- a/internal/configs/hcl2shim/values_test.go +++ /dev/null @@ -1,415 +0,0 @@ -package hcl2shim - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func TestConfigValueFromHCL2Block(t *testing.T) { - tests := []struct { - Input cty.Value - Schema *configschema.Block - Want map[string]interface{} - }{ - { - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("Ermintrude"), - "age": cty.NumberIntVal(19), - "address": cty.ObjectVal(map[string]cty.Value{ - "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), - "city": cty.StringVal("Fridgewater"), - "state": cty.StringVal("MA"), - "zip": cty.StringVal("91037"), - }), - }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - "age": {Type: cty.Number, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "street": {Type: cty.List(cty.String), Optional: true}, - "city": {Type: cty.String, Optional: true}, - "state": {Type: cty.String, Optional: true}, - "zip": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - map[string]interface{}{ - "name": "Ermintrude", - "age": int(19), - "address": map[string]interface{}{ - "street": []interface{}{"421 Shoreham Loop"}, - "city": "Fridgewater", - "state": "MA", - "zip": "91037", - }, - }, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("Ermintrude"), - "age": cty.NumberIntVal(19), - "address": cty.NullVal(cty.Object(map[string]cty.Type{ - "street": cty.List(cty.String), - "city": cty.String, - "state": cty.String, - "zip": cty.String, - })), - }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - "age": {Type: cty.Number, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "street": {Type: cty.List(cty.String), Optional: true}, - "city": {Type: cty.String, Optional: true}, - "state": {Type: cty.String, Optional: true}, - "zip": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - map[string]interface{}{ - "name": "Ermintrude", - "age": int(19), - }, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("Ermintrude"), - "age": cty.NumberIntVal(19), - "address": cty.ObjectVal(map[string]cty.Value{ - "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), - "city": cty.StringVal("Fridgewater"), - "state": cty.StringVal("MA"), - "zip": cty.NullVal(cty.String), // should be omitted altogether in result - }), - }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - "age": {Type: cty.Number, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "street": {Type: cty.List(cty.String), Optional: true}, - "city": {Type: cty.String, Optional: true}, - "state": {Type: cty.String, Optional: true}, - "zip": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }, - map[string]interface{}{ - "name": "Ermintrude", - "age": int(19), - "address": map[string]interface{}{ - "street": []interface{}{"421 Shoreham Loop"}, - "city": "Fridgewater", - "state": "MA", - }, - }, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "address": cty.ListVal([]cty.Value{cty.EmptyObjectVal}), - }), - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingList, - Block: configschema.Block{}, - }, - }, - }, - map[string]interface{}{ - "address": []interface{}{ - map[string]interface{}{}, - }, - }, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "address": cty.ListValEmpty(cty.EmptyObject), // should be omitted altogether in result - }), - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingList, - Block: configschema.Block{}, - }, - }, - }, - map[string]interface{}{}, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "address": cty.SetVal([]cty.Value{cty.EmptyObjectVal}), - }), - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingSet, - Block: configschema.Block{}, - }, - }, - }, - map[string]interface{}{ - "address": []interface{}{ - map[string]interface{}{}, - }, - }, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "address": cty.SetValEmpty(cty.EmptyObject), - }), - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingSet, - Block: configschema.Block{}, - }, - }, - }, - map[string]interface{}{}, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "address": cty.MapVal(map[string]cty.Value{"foo": cty.EmptyObjectVal}), - }), - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingMap, - Block: configschema.Block{}, - }, - }, - }, - map[string]interface{}{ - "address": map[string]interface{}{ - "foo": map[string]interface{}{}, - }, - }, - }, - { - cty.ObjectVal(map[string]cty.Value{ - "address": cty.MapValEmpty(cty.EmptyObject), - }), - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "address": { - Nesting: configschema.NestingMap, - Block: configschema.Block{}, - }, - }, - }, - map[string]interface{}{}, - }, - { - cty.NullVal(cty.EmptyObject), - &configschema.Block{}, - nil, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { - got := ConfigValueFromHCL2Block(test.Input, test.Schema) - if !reflect.DeepEqual(got, test.Want) { - t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) - } - }) - } -} - -func TestConfigValueFromHCL2(t *testing.T) { - tests := []struct { - Input cty.Value - Want interface{} - }{ - { - cty.True, - true, - }, - { - cty.False, - false, - }, - { - cty.NumberIntVal(12), - int(12), - }, - { - cty.NumberFloatVal(12.5), - float64(12.5), - }, - { - cty.StringVal("hello world"), - "hello world", - }, - { - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("Ermintrude"), - "age": cty.NumberIntVal(19), - "address": cty.ObjectVal(map[string]cty.Value{ - "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), - "city": cty.StringVal("Fridgewater"), - "state": cty.StringVal("MA"), - "zip": cty.StringVal("91037"), - }), - }), - map[string]interface{}{ - "name": "Ermintrude", - "age": int(19), - "address": map[string]interface{}{ - "street": []interface{}{"421 Shoreham Loop"}, - "city": "Fridgewater", - "state": "MA", - "zip": "91037", - }, - }, - }, - { - cty.MapVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - "bar": cty.StringVal("baz"), - }), - map[string]interface{}{ - "foo": "bar", - "bar": "baz", - }, - }, - { - cty.TupleVal([]cty.Value{ - cty.StringVal("foo"), - cty.True, - }), - []interface{}{ - "foo", - true, - }, - }, - { - cty.NullVal(cty.String), - nil, - }, - { - cty.UnknownVal(cty.String), - UnknownVariableValue, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { - got := ConfigValueFromHCL2(test.Input) - if !reflect.DeepEqual(got, test.Want) { - t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) - } - }) - } -} - -func TestHCL2ValueFromConfigValue(t *testing.T) { - tests := []struct { - Input interface{} - Want cty.Value - }{ - { - nil, - cty.NullVal(cty.DynamicPseudoType), - }, - { - UnknownVariableValue, - cty.DynamicVal, - }, - { - true, - cty.True, - }, - { - false, - cty.False, - }, - { - int(12), - cty.NumberIntVal(12), - }, - { - int(0), - cty.Zero, - }, - { - float64(12.5), - cty.NumberFloatVal(12.5), - }, - { - "hello world", - cty.StringVal("hello world"), - }, - { - "O\u0308", // decomposed letter + diacritic - cty.StringVal("\u00D6"), // NFC-normalized on entry into cty - }, - { - []interface{}{}, - cty.EmptyTupleVal, - }, - { - []interface{}(nil), - cty.EmptyTupleVal, - }, - { - []interface{}{"hello", "world"}, - cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world")}), - }, - { - map[string]interface{}{}, - cty.EmptyObjectVal, - }, - { - map[string]interface{}(nil), - cty.EmptyObjectVal, - }, - { - map[string]interface{}{ - "foo": "bar", - "bar": "baz", - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - "bar": cty.StringVal("baz"), - }), - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { - got := HCL2ValueFromConfigValue(test.Input) - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) - } - }) - } -} diff --git a/internal/configs/module.go b/internal/configs/module.go deleted file mode 100644 index c2088b9fde9c..000000000000 --- a/internal/configs/module.go +++ /dev/null @@ -1,591 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/experiments" -) - -// Module is a container for a set of configuration constructs that are -// evaluated within a common namespace. -type Module struct { - // SourceDir is the filesystem directory that the module was loaded from. - // - // This is populated automatically only for configurations loaded with - // LoadConfigDir. If the parser is using a virtual filesystem then the - // path here will be in terms of that virtual filesystem. - - // Any other caller that constructs a module directly with NewModule may - // assign a suitable value to this attribute before using it for other - // purposes. It should be treated as immutable by all consumers of Module - // values. - SourceDir string - - CoreVersionConstraints []VersionConstraint - - ActiveExperiments experiments.Set - - Backend *Backend - CloudConfig *CloudConfig - ProviderConfigs map[string]*Provider - ProviderRequirements *RequiredProviders - ProviderLocalNames map[addrs.Provider]string - ProviderMetas map[addrs.Provider]*ProviderMeta - - Variables map[string]*Variable - Locals map[string]*Local - Outputs map[string]*Output - - ModuleCalls map[string]*ModuleCall - - ManagedResources map[string]*Resource - DataResources map[string]*Resource - - Moved []*Moved -} - -// File describes the contents of a single configuration file. -// -// Individual files are not usually used alone, but rather combined together -// with other files (conventionally, those in the same directory) to produce -// a *Module, using NewModule. -// -// At the level of an individual file we represent directly the structural -// elements present in the file, without any attempt to detect conflicting -// declarations. A File object can therefore be used for some basic static -// analysis of individual elements, but must be built into a Module to detect -// duplicate declarations. -type File struct { - CoreVersionConstraints []VersionConstraint - - ActiveExperiments experiments.Set - - Backends []*Backend - CloudConfigs []*CloudConfig - ProviderConfigs []*Provider - ProviderMetas []*ProviderMeta - RequiredProviders []*RequiredProviders - - Variables []*Variable - Locals []*Local - Outputs []*Output - - ModuleCalls []*ModuleCall - - ManagedResources []*Resource - DataResources []*Resource - - Moved []*Moved -} - -// NewModule takes a list of primary files and a list of override files and -// produces a *Module by combining the files together. -// -// If there are any conflicting declarations in the given files -- for example, -// if the same variable name is defined twice -- then the resulting module -// will be incomplete and error diagnostics will be returned. Careful static -// analysis of the returned Module is still possible in this case, but the -// module will probably not be semantically valid. -func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { - var diags hcl.Diagnostics - mod := &Module{ - ProviderConfigs: map[string]*Provider{}, - ProviderLocalNames: map[addrs.Provider]string{}, - Variables: map[string]*Variable{}, - Locals: map[string]*Local{}, - Outputs: map[string]*Output{}, - ModuleCalls: map[string]*ModuleCall{}, - ManagedResources: map[string]*Resource{}, - DataResources: map[string]*Resource{}, - ProviderMetas: map[addrs.Provider]*ProviderMeta{}, - } - - // Process the required_providers blocks first, to ensure that all - // resources have access to the correct provider FQNs - for _, file := range primaryFiles { - for _, r := range file.RequiredProviders { - if mod.ProviderRequirements != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate required providers configuration", - Detail: fmt.Sprintf("A module may have only one required providers configuration. The required providers were previously configured at %s.", mod.ProviderRequirements.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - mod.ProviderRequirements = r - } - } - - // If no required_providers block is configured, create a useful empty - // state to reduce nil checks elsewhere - if mod.ProviderRequirements == nil { - mod.ProviderRequirements = &RequiredProviders{ - RequiredProviders: make(map[string]*RequiredProvider), - } - } - - // Any required_providers blocks in override files replace the entire - // block for each provider - for _, file := range overrideFiles { - for _, override := range file.RequiredProviders { - for name, rp := range override.RequiredProviders { - mod.ProviderRequirements.RequiredProviders[name] = rp - } - } - } - - for _, file := range primaryFiles { - fileDiags := mod.appendFile(file) - diags = append(diags, fileDiags...) - } - - for _, file := range overrideFiles { - fileDiags := mod.mergeFile(file) - diags = append(diags, fileDiags...) - } - - diags = append(diags, checkModuleExperiments(mod)...) - - // Generate the FQN -> LocalProviderName map - mod.gatherProviderLocalNames() - - return mod, diags -} - -// ResourceByAddr returns the configuration for the resource with the given -// address, or nil if there is no such resource. -func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { - key := addr.String() - switch addr.Mode { - case addrs.ManagedResourceMode: - return m.ManagedResources[key] - case addrs.DataResourceMode: - return m.DataResources[key] - default: - return nil - } -} - -func (m *Module) appendFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - // If there are any conflicting requirements then we'll catch them - // when we actually check these constraints. - m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...) - - m.ActiveExperiments = experiments.SetUnion(m.ActiveExperiments, file.ActiveExperiments) - - for _, b := range file.Backends { - if m.Backend != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), - Subject: &b.DeclRange, - }) - continue - } - m.Backend = b - } - - for _, c := range file.CloudConfigs { - if m.CloudConfig != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate Terraform Cloud configurations", - Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring Terraform Cloud. Terraform Cloud was previously configured at %s.", m.CloudConfig.DeclRange), - Subject: &c.DeclRange, - }) - continue - } - - m.CloudConfig = c - } - - if m.Backend != nil && m.CloudConfig != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Both a backend and Terraform Cloud configuration are present", - Detail: fmt.Sprintf("A module may declare either one 'cloud' block configuring Terraform Cloud OR one 'backend' block configuring a state backend. Terraform Cloud is configured at %s; a backend is configured at %s. Remove the backend block to configure Terraform Cloud.", m.CloudConfig.DeclRange, m.Backend.DeclRange), - Subject: &m.Backend.DeclRange, - }) - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - if existing, exists := m.ProviderConfigs[key]; exists { - if existing.Alias == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } - continue - } - m.ProviderConfigs[key] = pc - } - - for _, pm := range file.ProviderMetas { - provider := m.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: pm.Provider}) - if existing, exists := m.ProviderMetas[provider]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider_meta block", - Detail: fmt.Sprintf("A provider_meta block for provider %q was already declared at %s. Providers may only have one provider_meta block per module.", existing.Provider, existing.DeclRange), - Subject: &pm.DeclRange, - }) - } - m.ProviderMetas[provider] = pm - } - - for _, v := range file.Variables { - if existing, exists := m.Variables[v.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate variable declaration", - Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &v.DeclRange, - }) - } - m.Variables[v.Name] = v - } - - for _, l := range file.Locals { - if existing, exists := m.Locals[l.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate local value definition", - Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &l.DeclRange, - }) - } - m.Locals[l.Name] = l - } - - for _, o := range file.Outputs { - if existing, exists := m.Outputs[o.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate output definition", - Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &o.DeclRange, - }) - } - m.Outputs[o.Name] = o - } - - for _, mc := range file.ModuleCalls { - if existing, exists := m.ModuleCalls[mc.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate module call", - Detail: fmt.Sprintf("A module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), - Subject: &mc.DeclRange, - }) - } - m.ModuleCalls[mc.Name] = mc - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - if existing, exists := m.ManagedResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.ManagedResources[key] = r - - // set the provider FQN for the resource - if r.ProviderConfigRef != nil { - r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr()) - } else { - // an invalid resource name (for e.g. "null resource" instead of - // "null_resource") can cause a panic down the line in addrs: - // https://github.com/hashicorp/terraform/issues/25560 - implied, err := addrs.ParseProviderPart(r.Addr().ImpliedProvider()) - if err == nil { - r.Provider = m.ImpliedProviderForUnqualifiedType(implied) - } - // We don't return a diagnostic because the invalid resource name - // will already have been caught. - } - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - if existing, exists := m.DataResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.DataResources[key] = r - - // set the provider FQN for the resource - if r.ProviderConfigRef != nil { - r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr()) - } else { - // an invalid data source name (for e.g. "null resource" instead of - // "null_resource") can cause a panic down the line in addrs: - // https://github.com/hashicorp/terraform/issues/25560 - implied, err := addrs.ParseProviderPart(r.Addr().ImpliedProvider()) - if err == nil { - r.Provider = m.ImpliedProviderForUnqualifiedType(implied) - } - // We don't return a diagnostic because the invalid resource name - // will already have been caught. - } - } - - // "Moved" blocks just append, because they are all independent - // of one another at this level. (We handle any references between - // them at runtime.) - m.Moved = append(m.Moved, file.Moved...) - - return diags -} - -func (m *Module) mergeFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - if len(file.CoreVersionConstraints) != 0 { - // This is a bit of a strange case for overriding since we normally - // would union together across multiple files anyway, but we'll - // allow it and have each override file clobber any existing list. - m.CoreVersionConstraints = nil - m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...) - } - - if len(file.Backends) != 0 { - switch len(file.Backends) { - case 1: - m.CloudConfig = nil // A backend block is mutually exclusive with a cloud one, and overwrites any cloud config - m.Backend = file.Backends[0] - default: - // An override file with multiple backends is still invalid, even - // though it can override backends from _other_ files. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), - Subject: &file.Backends[1].DeclRange, - }) - } - } - - if len(file.CloudConfigs) != 0 { - switch len(file.CloudConfigs) { - case 1: - m.Backend = nil // A cloud block is mutually exclusive with a backend one, and overwrites any backend - m.CloudConfig = file.CloudConfigs[0] - default: - // An override file with multiple cloud blocks is still invalid, even - // though it can override cloud/backend blocks from _other_ files. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate Terraform Cloud configurations", - Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring Terraform Cloud. Terraform Cloud was previously configured at %s.", file.CloudConfigs[0].DeclRange), - Subject: &file.CloudConfigs[1].DeclRange, - }) - } - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - existing, exists := m.ProviderConfigs[key] - if pc.Alias == "" { - // We allow overriding a non-existing _default_ provider configuration - // because the user model is that an absent provider configuration - // implies an empty provider configuration, which is what the user - // is therefore overriding here. - if exists { - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } else { - m.ProviderConfigs[key] = pc - } - } else { - // For aliased providers, there must be a base configuration to - // override. This allows us to detect and report alias typos - // that might otherwise cause the override to not apply. - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base provider configuration for override", - Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), - Subject: &pc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } - } - - for _, v := range file.Variables { - existing, exists := m.Variables[v.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base variable declaration to override", - Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), - Subject: &v.DeclRange, - }) - continue - } - mergeDiags := existing.merge(v) - diags = append(diags, mergeDiags...) - } - - for _, l := range file.Locals { - existing, exists := m.Locals[l.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base local value definition to override", - Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), - Subject: &l.DeclRange, - }) - continue - } - mergeDiags := existing.merge(l) - diags = append(diags, mergeDiags...) - } - - for _, o := range file.Outputs { - existing, exists := m.Outputs[o.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base output definition to override", - Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), - Subject: &o.DeclRange, - }) - continue - } - mergeDiags := existing.merge(o) - diags = append(diags, mergeDiags...) - } - - for _, mc := range file.ModuleCalls { - existing, exists := m.ModuleCalls[mc.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing module call to override", - Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), - Subject: &mc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(mc) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - existing, exists := m.ManagedResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing resource to override", - Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - existing, exists := m.DataResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing data resource to override", - Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders) - diags = append(diags, mergeDiags...) - } - - for _, m := range file.Moved { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Cannot override 'moved' blocks", - Detail: "Records of moved objects can appear only in normal files, not in override files.", - Subject: m.DeclRange.Ptr(), - }) - } - - return diags -} - -// gatherProviderLocalNames is a helper function that populatesA a map of -// provider FQNs -> provider local names. This information is useful for -// user-facing output, which should include both the FQN and LocalName. It must -// only be populated after the module has been parsed. -func (m *Module) gatherProviderLocalNames() { - providers := make(map[addrs.Provider]string) - for k, v := range m.ProviderRequirements.RequiredProviders { - providers[v.Type] = k - } - m.ProviderLocalNames = providers -} - -// LocalNameForProvider returns the module-specific user-supplied local name for -// a given provider FQN, or the default local name if none was supplied. -func (m *Module) LocalNameForProvider(p addrs.Provider) string { - if existing, exists := m.ProviderLocalNames[p]; exists { - return existing - } else { - // If there isn't a map entry, fall back to the default: - // Type = LocalName - return p.Type - } -} - -// ProviderForLocalConfig returns the provider FQN for a given -// LocalProviderConfig, based on its local name. -func (m *Module) ProviderForLocalConfig(pc addrs.LocalProviderConfig) addrs.Provider { - return m.ImpliedProviderForUnqualifiedType(pc.LocalName) -} - -// ImpliedProviderForUnqualifiedType returns the provider FQN for a given type, -// first by looking up the type in the provider requirements map, and falling -// back to an implied default provider. -// -// The intended behaviour is that configuring a provider with local name "foo" -// in a required_providers block will result in resources with type "foo" using -// that provider. -func (m *Module) ImpliedProviderForUnqualifiedType(pType string) addrs.Provider { - if provider, exists := m.ProviderRequirements.RequiredProviders[pType]; exists { - return provider.Type - } - return addrs.ImpliedProviderForUnqualifiedType(pType) -} diff --git a/internal/configs/module_test.go b/internal/configs/module_test.go deleted file mode 100644 index 5a74dda3e36d..000000000000 --- a/internal/configs/module_test.go +++ /dev/null @@ -1,415 +0,0 @@ -package configs - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// TestNewModule_provider_fqns exercises module.gatherProviderLocalNames() -func TestNewModule_provider_local_name(t *testing.T) { - mod, diags := testModuleFromDir("testdata/providers-explicit-fqn") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - p := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") - if name, exists := mod.ProviderLocalNames[p]; !exists { - t.Fatal("provider FQN foo/test not found") - } else { - if name != "foo-test" { - t.Fatalf("provider localname mismatch: got %s, want foo-test", name) - } - } - - // ensure the reverse lookup (fqn to local name) works as well - localName := mod.LocalNameForProvider(p) - if localName != "foo-test" { - t.Fatal("provider local name not found") - } - - // if there is not a local name for a provider, it should return the type name - localName = mod.LocalNameForProvider(addrs.NewDefaultProvider("nonexist")) - if localName != "nonexist" { - t.Error("wrong local name returned for a non-local provider") - } - - // can also look up the "terraform" provider and see that it sources is - // allowed to be overridden, even though there is a builtin provider - // called "terraform". - p = addrs.NewProvider(addrs.DefaultProviderRegistryHost, "not-builtin", "not-terraform") - if name, exists := mod.ProviderLocalNames[p]; !exists { - t.Fatal("provider FQN not-builtin/not-terraform not found") - } else { - if name != "terraform" { - t.Fatalf("provider localname mismatch: got %s, want terraform", name) - } - } -} - -// This test validates the provider FQNs set in each Resource -func TestNewModule_resource_providers(t *testing.T) { - cfg, diags := testNestedModuleConfigFromDir(t, "testdata/valid-modules/nested-providers-fqns") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - // both the root and child module have two resources, one which should use - // the default implied provider and one explicitly using a provider set in - // required_providers - wantImplicit := addrs.NewDefaultProvider("test") - wantFoo := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") - wantBar := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test") - - // root module - if !cfg.Module.ManagedResources["test_instance.explicit"].Provider.Equals(wantFoo) { - t.Fatalf("wrong provider for \"test_instance.explicit\"\ngot: %s\nwant: %s", - cfg.Module.ManagedResources["test_instance.explicit"].Provider, - wantFoo, - ) - } - if !cfg.Module.ManagedResources["test_instance.implicit"].Provider.Equals(wantImplicit) { - t.Fatalf("wrong provider for \"test_instance.implicit\"\ngot: %s\nwant: %s", - cfg.Module.ManagedResources["test_instance.implicit"].Provider, - wantImplicit, - ) - } - - // a data source - if !cfg.Module.DataResources["data.test_resource.explicit"].Provider.Equals(wantFoo) { - t.Fatalf("wrong provider for \"module.child.test_instance.explicit\"\ngot: %s\nwant: %s", - cfg.Module.ManagedResources["test_instance.explicit"].Provider, - wantBar, - ) - } - - // child module - cm := cfg.Children["child"].Module - if !cm.ManagedResources["test_instance.explicit"].Provider.Equals(wantBar) { - t.Fatalf("wrong provider for \"module.child.test_instance.explicit\"\ngot: %s\nwant: %s", - cfg.Module.ManagedResources["test_instance.explicit"].Provider, - wantBar, - ) - } - if !cm.ManagedResources["test_instance.implicit"].Provider.Equals(wantImplicit) { - t.Fatalf("wrong provider for \"module.child.test_instance.implicit\"\ngot: %s\nwant: %s", - cfg.Module.ManagedResources["test_instance.implicit"].Provider, - wantImplicit, - ) - } -} - -func TestProviderForLocalConfig(t *testing.T) { - mod, diags := testModuleFromDir("testdata/providers-explicit-fqn") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - lc := addrs.LocalProviderConfig{LocalName: "foo-test"} - got := mod.ProviderForLocalConfig(lc) - want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") - if !got.Equals(want) { - t.Fatalf("wrong result! got %#v, want %#v\n", got, want) - } -} - -// At most one required_providers block per module is permitted. -func TestModule_required_providers_multiple(t *testing.T) { - _, diags := testModuleFromDir("testdata/invalid-modules/multiple-required-providers") - if !diags.HasErrors() { - t.Fatal("module should have error diags, but does not") - } - - want := `Duplicate required providers configuration` - if got := diags.Error(); !strings.Contains(got, want) { - t.Fatalf("expected error to contain %q\nerror was:\n%s", want, got) - } -} - -// A module may have required_providers configured in files loaded later than -// resources. These provider settings should still be reflected in the -// resources' configuration. -func TestModule_required_providers_after_resource(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/required-providers-after-resource") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") - - req, exists := mod.ProviderRequirements.RequiredProviders["test"] - if !exists { - t.Fatal("no provider requirements found for \"test\"") - } - if req.Type != want { - t.Errorf("wrong provider addr for \"test\"\ngot: %s\nwant: %s", - req.Type, want, - ) - } - - if got := mod.ManagedResources["test_instance.my-instance"].Provider; !got.Equals(want) { - t.Errorf("wrong provider addr for \"test_instance.my-instance\"\ngot: %s\nwant: %s", - got, want, - ) - } -} - -// We support overrides for required_providers blocks, which should replace the -// entire block for each provider localname, leaving other blocks unaffected. -// This should also be reflected in any resources in the module using this -// provider. -func TestModule_required_provider_overrides(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/required-providers-overrides") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - // The foo provider and resource should be unaffected - want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "foo") - req, exists := mod.ProviderRequirements.RequiredProviders["foo"] - if !exists { - t.Fatal("no provider requirements found for \"foo\"") - } - if req.Type != want { - t.Errorf("wrong provider addr for \"foo\"\ngot: %s\nwant: %s", - req.Type, want, - ) - } - if got := mod.ManagedResources["foo_thing.ft"].Provider; !got.Equals(want) { - t.Errorf("wrong provider addr for \"foo_thing.ft\"\ngot: %s\nwant: %s", - got, want, - ) - } - - // The bar provider and resource should be using the override config - want = addrs.NewProvider(addrs.DefaultProviderRegistryHost, "blorp", "bar") - req, exists = mod.ProviderRequirements.RequiredProviders["bar"] - if !exists { - t.Fatal("no provider requirements found for \"bar\"") - } - if req.Type != want { - t.Errorf("wrong provider addr for \"bar\"\ngot: %s\nwant: %s", - req.Type, want, - ) - } - if gotVer, wantVer := req.Requirement.Required.String(), "~>2.0.0"; gotVer != wantVer { - t.Errorf("wrong provider version constraint for \"bar\"\ngot: %s\nwant: %s", - gotVer, wantVer, - ) - } - if got := mod.ManagedResources["bar_thing.bt"].Provider; !got.Equals(want) { - t.Errorf("wrong provider addr for \"bar_thing.bt\"\ngot: %s\nwant: %s", - got, want, - ) - } -} - -// Resources without explicit provider configuration are assigned a provider -// implied based on the resource type. For example, this resource: -// -// resource "foo_instance" "test" {} -// -// ...is assigned to whichever provider has local name "foo" in the current -// module. -// -// To find the correct provider, we first look in the module's provider -// requirements map for a local name matching the resource type, and fall back -// to a default provider if none is found. This applies to both managed and -// data resources. -func TestModule_implied_provider(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/implied-providers") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - // The three providers used in the config resources - foo := addrs.NewProvider("registry.acme.corp", "acme", "foo") - whatever := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "something") - bar := addrs.NewDefaultProvider("bar") - - // Verify that the registry.acme.corp/acme/foo provider is defined in the - // module provider requirements with local name "foo" - req, exists := mod.ProviderRequirements.RequiredProviders["foo"] - if !exists { - t.Fatal("no provider requirements found for \"foo\"") - } - if req.Type != foo { - t.Errorf("wrong provider addr for \"foo\"\ngot: %s\nwant: %s", - req.Type, foo, - ) - } - - // Verify that the acme/something provider is defined in the - // module provider requirements with local name "whatever" - req, exists = mod.ProviderRequirements.RequiredProviders["whatever"] - if !exists { - t.Fatal("no provider requirements found for \"foo\"") - } - if req.Type != whatever { - t.Errorf("wrong provider addr for \"whatever\"\ngot: %s\nwant: %s", - req.Type, whatever, - ) - } - - // Check that resources are assigned the correct providers: foo_* resources - // should have the custom foo provider, bar_* resources the default bar - // provider. - tests := []struct { - Address string - Provider addrs.Provider - }{ - {"foo_resource.a", foo}, - {"data.foo_resource.b", foo}, - {"bar_resource.c", bar}, - {"data.bar_resource.d", bar}, - {"whatever_resource.e", whatever}, - {"data.whatever_resource.f", whatever}, - } - for _, test := range tests { - resources := mod.ManagedResources - if strings.HasPrefix(test.Address, "data.") { - resources = mod.DataResources - } - resource, exists := resources[test.Address] - if !exists { - t.Errorf("could not find resource %q in %#v", test.Address, resources) - continue - } - if got := resource.Provider; !got.Equals(test.Provider) { - t.Errorf("wrong provider addr for %q\ngot: %s\nwant: %s", - test.Address, got, test.Provider, - ) - } - } -} - -func TestImpliedProviderForUnqualifiedType(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/implied-providers") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - foo := addrs.NewProvider("registry.acme.corp", "acme", "foo") - whatever := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "something") - bar := addrs.NewDefaultProvider("bar") - tf := addrs.NewBuiltInProvider("terraform") - - tests := []struct { - Type string - Provider addrs.Provider - }{ - {"foo", foo}, - {"whatever", whatever}, - {"bar", bar}, - {"terraform", tf}, - } - for _, test := range tests { - got := mod.ImpliedProviderForUnqualifiedType(test.Type) - if !got.Equals(test.Provider) { - t.Errorf("wrong result for %q: got %#v, want %#v\n", test.Type, got, test.Provider) - } - } -} - -func TestModule_backend_override(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/override-backend") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - gotType := mod.Backend.Type - wantType := "bar" - - if gotType != wantType { - t.Errorf("wrong result for backend type: got %#v, want %#v\n", gotType, wantType) - } - - attrs, _ := mod.Backend.Config.JustAttributes() - - gotAttr, diags := attrs["path"].Expr.Value(nil) - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - wantAttr := cty.StringVal("CHANGED/relative/path/to/terraform.tfstate") - - if !gotAttr.RawEquals(wantAttr) { - t.Errorf("wrong result for backend 'path': got %#v, want %#v\n", gotAttr, wantAttr) - } -} - -// Unlike most other overrides, backend blocks do not require a base configuration in a primary -// configuration file, as an omitted backend there implies the local backend. -func TestModule_backend_override_no_base(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/override-backend-no-base") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - if mod.Backend == nil { - t.Errorf("expected module Backend not to be nil") - } -} - -func TestModule_cloud_override_backend(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/override-backend-with-cloud") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - if mod.Backend != nil { - t.Errorf("expected module Backend to be nil") - } - - if mod.CloudConfig == nil { - t.Errorf("expected module CloudConfig not to be nil") - } -} - -// Unlike most other overrides, cloud blocks do not require a base configuration in a primary -// configuration file, as an omitted backend there implies the local backend and cloud blocks -// override backends. -func TestModule_cloud_override_no_base(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/override-cloud-no-base") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - if mod.CloudConfig == nil { - t.Errorf("expected module CloudConfig not to be nil") - } -} - -func TestModule_cloud_override(t *testing.T) { - mod, diags := testModuleFromDir("testdata/valid-modules/override-cloud") - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - attrs, _ := mod.CloudConfig.Config.JustAttributes() - - gotAttr, diags := attrs["organization"].Expr.Value(nil) - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - wantAttr := cty.StringVal("CHANGED") - - if !gotAttr.RawEquals(wantAttr) { - t.Errorf("wrong result for Cloud 'organization': got %#v, want %#v\n", gotAttr, wantAttr) - } - - // The override should have completely replaced the cloud block in the primary file, no merging - if attrs["should_not_be_present_with_override"] != nil { - t.Errorf("expected 'should_not_be_present_with_override' attribute to be nil") - } -} - -func TestModule_cloud_duplicate_overrides(t *testing.T) { - _, diags := testModuleFromDir("testdata/invalid-modules/override-cloud-duplicates") - want := `Duplicate Terraform Cloud configurations` - if got := diags.Error(); !strings.Contains(got, want) { - t.Fatalf("expected module error to contain %q\nerror was:\n%s", want, got) - } -} diff --git a/internal/configs/provider.go b/internal/configs/provider.go deleted file mode 100644 index 6ed24f63af39..000000000000 --- a/internal/configs/provider.go +++ /dev/null @@ -1,282 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Provider represents a "provider" block in a module or file. A provider -// block is a provider configuration, and there can be zero or more -// configurations for each actual provider. -type Provider struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if no alias set - - Version VersionConstraint - - Config hcl.Body - - DeclRange hcl.Range - - // TODO: this may not be set in some cases, so it is not yet suitable for - // use outside of this package. We currently only use it for internal - // validation, but once we verify that this can be set in all cases, we can - // export this so providers don't need to be re-resolved. - // This same field is also added to the ProviderConfigRef struct. - providerType addrs.Provider -} - -func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { - var diags hcl.Diagnostics - - content, config, moreDiags := block.Body.PartialContent(providerBlockSchema) - diags = append(diags, moreDiags...) - - // Provider names must be localized. Produce an error with a message - // indicating the action the user can take to fix this message if the local - // name is not localized. - name := block.Labels[0] - nameDiags := checkProviderNameNormalized(name, block.DefRange) - diags = append(diags, nameDiags...) - if nameDiags.HasErrors() { - // If the name is invalid then we mustn't produce a result because - // downstreams could try to use it as a provider type and then crash. - return nil, diags - } - - provider := &Provider{ - Name: name, - NameRange: block.LabelRanges[0], - Config: config, - DeclRange: block.DefRange, - } - - if attr, exists := content.Attributes["alias"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) - diags = append(diags, valDiags...) - provider.AliasRange = attr.Expr.Range().Ptr() - - if !hclsyntax.ValidIdentifier(provider.Alias) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration alias", - Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), - }) - } - } - - if attr, exists := content.Attributes["version"]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Version constraints inside provider configuration blocks are deprecated", - Detail: "Terraform 0.13 and earlier allowed provider version constraints inside the provider configuration block, but that is now deprecated and will be removed in a future version of Terraform. To silence this warning, move the provider version constraint into the required_providers block.", - Subject: attr.Expr.Range().Ptr(), - }) - var versionDiags hcl.Diagnostics - provider.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - // Reserved attribute names - for _, name := range []string{"count", "depends_on", "for_each", "source"} { - if attr, exists := content.Attributes[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in provider block", - Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), - Subject: &attr.NameRange, - }) - } - } - - var seenEscapeBlock *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - case "_": - if seenEscapeBlock != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate escaping block", - Detail: fmt.Sprintf( - "The special block type \"_\" can be used to force particular arguments to be interpreted as provider-specific rather than as meta-arguments, but each provider block can have only one such block. The first escaping block was at %s.", - seenEscapeBlock.DefRange, - ), - Subject: &block.DefRange, - }) - continue - } - seenEscapeBlock = block - - // When there's an escaping block its content merges with the - // existing config we extracted earlier, so later decoding - // will see a blend of both. - provider.Config = hcl.MergeBodies([]hcl.Body{provider.Config, block.Body}) - - default: - // All of the other block types in our schema are reserved for - // future expansion. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provider block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return provider, diags -} - -// Addr returns the address of the receiving provider configuration, relative -// to its containing module. -func (p *Provider) Addr() addrs.LocalProviderConfig { - return addrs.LocalProviderConfig{ - LocalName: p.Name, - Alias: p.Alias, - } -} - -func (p *Provider) moduleUniqueKey() string { - if p.Alias != "" { - return fmt.Sprintf("%s.%s", p.Name, p.Alias) - } - return p.Name -} - -// ParseProviderConfigCompact parses the given absolute traversal as a relative -// provider address in compact form. The following are examples of traversals -// that can be successfully parsed as compact relative provider configuration -// addresses: -// -// - aws -// - aws.foo -// -// This function will panic if given a relative traversal. -// -// If the returned diagnostics contains errors then the result value is invalid -// and must not be used. -func ParseProviderConfigCompact(traversal hcl.Traversal) (addrs.LocalProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := addrs.LocalProviderConfig{ - LocalName: traversal.RootName(), - } - - if len(traversal) < 2 { - // Just a type name, then. - return ret, diags - } - - aliasStep := traversal[1] - switch ts := aliasStep.(type) { - case hcl.TraverseAttr: - ret.Alias = ts.Name - return ret, diags - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", - Subject: aliasStep.SourceRange().Ptr(), - }) - } - - if len(traversal) > 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous extra operators after provider configuration address.", - Subject: traversal[2:].SourceRange().Ptr(), - }) - } - - return ret, diags -} - -// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseProviderConfigCompact. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseProviderConfigCompactStr(str string) (addrs.LocalProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return addrs.LocalProviderConfig{}, diags - } - - addr, addrDiags := ParseProviderConfigCompact(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -var providerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "alias", - }, - { - Name: "version", - }, - - // Attribute names reserved for future expansion. - {Name: "count"}, - {Name: "depends_on"}, - {Name: "for_each"}, - {Name: "source"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "_"}, // meta-argument escaping block - - // The rest of these are reserved for future expansion. - {Type: "lifecycle"}, - {Type: "locals"}, - }, -} - -// checkProviderNameNormalized verifies that the given string is already -// normalized and returns an error if not. -func checkProviderNameNormalized(name string, declrange hcl.Range) hcl.Diagnostics { - var diags hcl.Diagnostics - // verify that the provider local name is normalized - normalized, err := addrs.IsProviderPartNormalized(name) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider local name", - Detail: fmt.Sprintf("%s is an invalid provider local name: %s", name, err), - Subject: &declrange, - }) - return diags - } - if !normalized { - // we would have returned this error already - normalizedProvider, _ := addrs.ParseProviderPart(name) - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider local name", - Detail: fmt.Sprintf("Provider names must be normalized. Replace %q with %q to fix this error.", name, normalizedProvider), - Subject: &declrange, - }) - } - return diags -} diff --git a/internal/configs/provider_test.go b/internal/configs/provider_test.go deleted file mode 100644 index 65924f085fee..000000000000 --- a/internal/configs/provider_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package configs - -import ( - "io/ioutil" - "testing" - - "github.com/go-test/deep" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestProviderReservedNames(t *testing.T) { - src, err := ioutil.ReadFile("testdata/invalid-files/provider-reserved.tf") - if err != nil { - t.Fatal(err) - } - parser := testParser(map[string]string{ - "config.tf": string(src), - }) - _, diags := parser.LoadConfigFile("config.tf") - - assertExactDiagnostics(t, diags, []string{ - //TODO: This deprecation warning will be removed in terraform v0.15. - `config.tf:4,13-20: Version constraints inside provider configuration blocks are deprecated; Terraform 0.13 and earlier allowed provider version constraints inside the provider configuration block, but that is now deprecated and will be removed in a future version of Terraform. To silence this warning, move the provider version constraint into the required_providers block.`, - `config.tf:10,3-8: Reserved argument name in provider block; The provider argument name "count" is reserved for use by Terraform in a future version.`, - `config.tf:11,3-13: Reserved argument name in provider block; The provider argument name "depends_on" is reserved for use by Terraform in a future version.`, - `config.tf:12,3-11: Reserved argument name in provider block; The provider argument name "for_each" is reserved for use by Terraform in a future version.`, - `config.tf:14,3-12: Reserved block type name in provider block; The block type name "lifecycle" is reserved for use by Terraform in a future version.`, - `config.tf:15,3-9: Reserved block type name in provider block; The block type name "locals" is reserved for use by Terraform in a future version.`, - `config.tf:13,3-9: Reserved argument name in provider block; The provider argument name "source" is reserved for use by Terraform in a future version.`, - }) -} - -func TestParseProviderConfigCompact(t *testing.T) { - tests := []struct { - Input string - Want addrs.LocalProviderConfig - WantDiag string - }{ - { - `aws`, - addrs.LocalProviderConfig{ - LocalName: "aws", - }, - ``, - }, - { - `aws.foo`, - addrs.LocalProviderConfig{ - LocalName: "aws", - Alias: "foo", - }, - ``, - }, - { - `aws["foo"]`, - addrs.LocalProviderConfig{}, - `The provider type name must either stand alone or be followed by an alias name separated with a dot.`, - }, - } - - for _, test := range tests { - t.Run(test.Input, func(t *testing.T) { - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.Pos{}) - if len(parseDiags) != 0 { - t.Errorf("unexpected diagnostics during parse") - for _, diag := range parseDiags { - t.Logf("- %s", diag) - } - return - } - - got, diags := ParseProviderConfigCompact(traversal) - - if test.WantDiag != "" { - if len(diags) != 1 { - t.Fatalf("got %d diagnostics; want 1", len(diags)) - } - gotDetail := diags[0].Description().Detail - if gotDetail != test.WantDiag { - t.Fatalf("wrong diagnostic detail\ngot: %s\nwant: %s", gotDetail, test.WantDiag) - } - return - } else { - if len(diags) != 0 { - t.Fatalf("got %d diagnostics; want 0", len(diags)) - } - } - - for _, problem := range deep.Equal(got, test.Want) { - t.Error(problem) - } - }) - } -} - -func TestParseProviderConfigCompactStr(t *testing.T) { - tests := []struct { - Input string - Want addrs.LocalProviderConfig - WantDiag string - }{ - { - `aws`, - addrs.LocalProviderConfig{ - LocalName: "aws", - }, - ``, - }, - { - `aws.foo`, - addrs.LocalProviderConfig{ - LocalName: "aws", - Alias: "foo", - }, - ``, - }, - { - `aws["foo"]`, - addrs.LocalProviderConfig{}, - `The provider type name must either stand alone or be followed by an alias name separated with a dot.`, - }, - } - - for _, test := range tests { - t.Run(test.Input, func(t *testing.T) { - got, diags := ParseProviderConfigCompactStr(test.Input) - - if test.WantDiag != "" { - if len(diags) != 1 { - t.Fatalf("got %d diagnostics; want 1", len(diags)) - } - gotDetail := diags[0].Description().Detail - if gotDetail != test.WantDiag { - t.Fatalf("wrong diagnostic detail\ngot: %s\nwant: %s", gotDetail, test.WantDiag) - } - return - } else { - if len(diags) != 0 { - t.Fatalf("got %d diagnostics; want 0", len(diags)) - } - } - - for _, problem := range deep.Equal(got, test.Want) { - t.Error(problem) - } - }) - } -} diff --git a/internal/configs/resource.go b/internal/configs/resource.go deleted file mode 100644 index 1f67c6c40f68..000000000000 --- a/internal/configs/resource.go +++ /dev/null @@ -1,777 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - hcljson "github.com/hashicorp/hcl/v2/json" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Resource represents a "resource" or "data" block in a module or file. -type Resource struct { - Mode addrs.ResourceMode - Name string - Type string - Config hcl.Body - Count hcl.Expression - ForEach hcl.Expression - - ProviderConfigRef *ProviderConfigRef - Provider addrs.Provider - - Preconditions []*CheckRule - Postconditions []*CheckRule - - DependsOn []hcl.Traversal - - TriggersReplacement []hcl.Expression - - // Managed is populated only for Mode = addrs.ManagedResourceMode, - // containing the additional fields that apply to managed resources. - // For all other resource modes, this field is nil. - Managed *ManagedResource - - DeclRange hcl.Range - TypeRange hcl.Range -} - -// ManagedResource represents a "resource" block in a module or file. -type ManagedResource struct { - Connection *Connection - Provisioners []*Provisioner - - CreateBeforeDestroy bool - PreventDestroy bool - IgnoreChanges []hcl.Traversal - IgnoreAllChanges bool - - CreateBeforeDestroySet bool - PreventDestroySet bool -} - -func (r *Resource) moduleUniqueKey() string { - return r.Addr().String() -} - -// Addr returns a resource address for the receiver that is relative to the -// resource's containing module. -func (r *Resource) Addr() addrs.Resource { - return addrs.Resource{ - Mode: r.Mode, - Type: r.Type, - Name: r.Name, - } -} - -// ProviderConfigAddr returns the address for the provider configuration that -// should be used for this resource. This function returns a default provider -// config addr if an explicit "provider" argument was not provided. -func (r *Resource) ProviderConfigAddr() addrs.LocalProviderConfig { - if r.ProviderConfigRef == nil { - // If no specific "provider" argument is given, we want to look up the - // provider config where the local name matches the implied provider - // from the resource type. This may be different from the resource's - // provider type. - return addrs.LocalProviderConfig{ - LocalName: r.Addr().ImpliedProvider(), - } - } - - return addrs.LocalProviderConfig{ - LocalName: r.ProviderConfigRef.Name, - Alias: r.ProviderConfigRef.Alias, - } -} - -// HasCustomConditions returns true if and only if the resource has at least -// one author-specified custom condition. -func (r *Resource) HasCustomConditions() bool { - return len(r.Postconditions) != 0 || len(r.Preconditions) != 0 -} - -func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { - var diags hcl.Diagnostics - r := &Resource{ - Mode: addrs.ManagedResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - Managed: &ManagedResource{}, - } - - content, remain, moreDiags := block.Body.PartialContent(resourceBlockSchema) - diags = append(diags, moreDiags...) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same resource block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - var seenLifecycle *hcl.Block - var seenConnection *hcl.Block - var seenEscapeBlock *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - case "lifecycle": - if seenLifecycle != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate lifecycle block", - Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenLifecycle = block - - lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) - diags = append(diags, lcDiags...) - - if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) - diags = append(diags, valDiags...) - r.Managed.CreateBeforeDestroySet = true - } - - if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) - diags = append(diags, valDiags...) - r.Managed.PreventDestroySet = true - } - - if attr, exists := lcContent.Attributes["replace_triggered_by"]; exists { - exprs, hclDiags := decodeReplaceTriggeredBy(attr.Expr) - diags = diags.Extend(hclDiags) - - r.TriggersReplacement = append(r.TriggersReplacement, exprs...) - } - - if attr, exists := lcContent.Attributes["ignore_changes"]; exists { - - // ignore_changes can either be a list of relative traversals - // or it can be just the keyword "all" to ignore changes to this - // resource entirely. - // ignore_changes = [ami, instance_type] - // ignore_changes = all - // We also allow two legacy forms for compatibility with earlier - // versions: - // ignore_changes = ["ami", "instance_type"] - // ignore_changes = ["*"] - - kw := hcl.ExprAsKeyword(attr.Expr) - - switch { - case kw == "all": - r.Managed.IgnoreAllChanges = true - default: - exprs, listDiags := hcl.ExprList(attr.Expr) - diags = append(diags, listDiags...) - - var ignoreAllRange hcl.Range - - for _, expr := range exprs { - - // our expr might be the literal string "*", which - // we accept as a deprecated way of saying "all". - if shimIsIgnoreChangesStar(expr) { - r.Managed.IgnoreAllChanges = true - ignoreAllRange = expr.Range() - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid ignore_changes wildcard", - Detail: "The [\"*\"] form of ignore_changes wildcard is was deprecated and is now invalid. Use \"ignore_changes = all\" to ignore changes to all attributes.", - Subject: attr.Expr.Range().Ptr(), - }) - continue - } - - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.RelTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) - } - } - - if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid ignore_changes ruleset", - Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", - Subject: &ignoreAllRange, - Context: attr.Expr.Range().Ptr(), - }) - } - - } - } - - for _, block := range lcContent.Blocks { - switch block.Type { - case "precondition", "postcondition": - cr, moreDiags := decodeCheckRuleBlock(block, override) - diags = append(diags, moreDiags...) - - moreDiags = cr.validateSelfReferences(block.Type, r.Addr()) - diags = append(diags, moreDiags...) - - switch block.Type { - case "precondition": - r.Preconditions = append(r.Preconditions, cr) - case "postcondition": - r.Postconditions = append(r.Postconditions, cr) - } - default: - // The cases above should be exhaustive for all block types - // defined in the lifecycle schema, so this shouldn't happen. - panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) - } - } - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - r.Managed.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - case "provisioner": - pv, pvDiags := decodeProvisionerBlock(block) - diags = append(diags, pvDiags...) - if pv != nil { - r.Managed.Provisioners = append(r.Managed.Provisioners, pv) - } - - case "_": - if seenEscapeBlock != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate escaping block", - Detail: fmt.Sprintf( - "The special block type \"_\" can be used to force particular arguments to be interpreted as resource-type-specific rather than as meta-arguments, but each resource block can have only one such block. The first escaping block was at %s.", - seenEscapeBlock.DefRange, - ), - Subject: &block.DefRange, - }) - continue - } - seenEscapeBlock = block - - // When there's an escaping block its content merges with the - // existing config we extracted earlier, so later decoding - // will see a blend of both. - r.Config = hcl.MergeBodies([]hcl.Body{r.Config, block.Body}) - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in resource block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - // Now we can validate the connection block references if there are any destroy provisioners. - // TODO: should we eliminate standalone connection blocks? - if r.Managed.Connection != nil { - for _, p := range r.Managed.Provisioners { - if p.When == ProvisionerWhenDestroy { - diags = append(diags, onlySelfRefs(r.Managed.Connection.Config)...) - break - } - } - } - - return r, diags -} - -func decodeDataBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { - var diags hcl.Diagnostics - r := &Resource{ - Mode: addrs.DataResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - } - - content, remain, moreDiags := block.Body.PartialContent(dataBlockSchema) - diags = append(diags, moreDiags...) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same data block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - var seenEscapeBlock *hcl.Block - var seenLifecycle *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - - case "_": - if seenEscapeBlock != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate escaping block", - Detail: fmt.Sprintf( - "The special block type \"_\" can be used to force particular arguments to be interpreted as resource-type-specific rather than as meta-arguments, but each data block can have only one such block. The first escaping block was at %s.", - seenEscapeBlock.DefRange, - ), - Subject: &block.DefRange, - }) - continue - } - seenEscapeBlock = block - - // When there's an escaping block its content merges with the - // existing config we extracted earlier, so later decoding - // will see a blend of both. - r.Config = hcl.MergeBodies([]hcl.Body{r.Config, block.Body}) - - case "lifecycle": - if seenLifecycle != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate lifecycle block", - Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), - Subject: block.DefRange.Ptr(), - }) - continue - } - seenLifecycle = block - - lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) - diags = append(diags, lcDiags...) - - // All of the attributes defined for resource lifecycle are for - // managed resources only, so we can emit a common error message - // for any given attributes that HCL accepted. - for name, attr := range lcContent.Attributes { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data resource lifecycle argument", - Detail: fmt.Sprintf("The lifecycle argument %q is defined only for managed resources (\"resource\" blocks), and is not valid for data resources.", name), - Subject: attr.NameRange.Ptr(), - }) - } - - for _, block := range lcContent.Blocks { - switch block.Type { - case "precondition", "postcondition": - cr, moreDiags := decodeCheckRuleBlock(block, override) - diags = append(diags, moreDiags...) - - moreDiags = cr.validateSelfReferences(block.Type, r.Addr()) - diags = append(diags, moreDiags...) - - switch block.Type { - case "precondition": - r.Preconditions = append(r.Preconditions, cr) - case "postcondition": - r.Postconditions = append(r.Postconditions, cr) - } - default: - // The cases above should be exhaustive for all block types - // defined in the lifecycle schema, so this shouldn't happen. - panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) - } - } - - default: - // Any other block types are ones we're reserving for future use, - // but don't have any defined meaning today. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in data block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: block.TypeRange.Ptr(), - }) - } - } - - return r, diags -} - -// decodeReplaceTriggeredBy decodes and does basic validation of the -// replace_triggered_by expressions, ensuring they only contains references to -// a single resource, and the only extra variables are count.index or each.key. -func decodeReplaceTriggeredBy(expr hcl.Expression) ([]hcl.Expression, hcl.Diagnostics) { - // Since we are manually parsing the replace_triggered_by argument, we - // need to specially handle json configs, in which case the values will - // be json strings rather than hcl. To simplify parsing however we will - // decode the individual list elements, rather than the entire expression. - isJSON := hcljson.IsJSONExpression(expr) - - exprs, diags := hcl.ExprList(expr) - - for i, expr := range exprs { - if isJSON { - // We can abuse the hcl json api and rely on the fact that calling - // Value on a json expression with no EvalContext will return the - // raw string. We can then parse that as normal hcl syntax, and - // continue with the decoding. - v, ds := expr.Value(nil) - diags = diags.Extend(ds) - if diags.HasErrors() { - continue - } - - expr, ds = hclsyntax.ParseExpression([]byte(v.AsString()), "", expr.Range().Start) - diags = diags.Extend(ds) - if diags.HasErrors() { - continue - } - // make sure to swap out the expression we're returning too - exprs[i] = expr - } - - refs, refDiags := lang.ReferencesInExpr(expr) - for _, diag := range refDiags { - severity := hcl.DiagError - if diag.Severity() == tfdiags.Warning { - severity = hcl.DiagWarning - } - - desc := diag.Description() - - diags = append(diags, &hcl.Diagnostic{ - Severity: severity, - Summary: desc.Summary, - Detail: desc.Detail, - Subject: expr.Range().Ptr(), - }) - } - - if refDiags.HasErrors() { - continue - } - - resourceCount := 0 - for _, ref := range refs { - switch sub := ref.Subject.(type) { - case addrs.Resource, addrs.ResourceInstance: - resourceCount++ - - case addrs.ForEachAttr: - if sub.Name != "key" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid each reference in replace_triggered_by expression", - Detail: "Only each.key may be used in replace_triggered_by.", - Subject: expr.Range().Ptr(), - }) - } - case addrs.CountAttr: - if sub.Name != "index" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count reference in replace_triggered_by expression", - Detail: "Only count.index may be used in replace_triggered_by.", - Subject: expr.Range().Ptr(), - }) - } - default: - // everything else should be simple traversals - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference in replace_triggered_by expression", - Detail: "Only resources, count.index, and each.key may be used in replace_triggered_by.", - Subject: expr.Range().Ptr(), - }) - } - } - - switch { - case resourceCount == 0: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid replace_triggered_by expression", - Detail: "Missing resource reference in replace_triggered_by expression.", - Subject: expr.Range().Ptr(), - }) - case resourceCount > 1: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid replace_triggered_by expression", - Detail: "Multiple resource references in replace_triggered_by expression.", - Subject: expr.Range().Ptr(), - }) - } - } - return exprs, diags -} - -type ProviderConfigRef struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if alias not set - - // TODO: this may not be set in some cases, so it is not yet suitable for - // use outside of this package. We currently only use it for internal - // validation, but once we verify that this can be set in all cases, we can - // export this so providers don't need to be re-resolved. - // This same field is also added to the Provider struct. - providerType addrs.Provider -} - -func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var shimDiags hcl.Diagnostics - expr, shimDiags = shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - - // AbsTraversalForExpr produces only generic errors, so we'll discard - // the errors given and produce our own with extra context. If we didn't - // get any errors then we might still have warnings, though. - if !travDiags.HasErrors() { - diags = append(diags, travDiags...) - } - - if len(traversal) < 1 || len(traversal) > 2 { - // A provider reference was given as a string literal in the legacy - // configuration language and there are lots of examples out there - // showing that usage, so we'll sniff for that situation here and - // produce a specialized error message for it to help users find - // the new correct form. - if exprIsNativeQuotedString(expr) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "A provider configuration reference must not be given in quotes.", - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - // verify that the provider local name is normalized - name := traversal.RootName() - nameDiags := checkProviderNameNormalized(name, traversal[0].SourceRange()) - diags = append(diags, nameDiags...) - if diags.HasErrors() { - return nil, diags - } - - ret := &ProviderConfigRef{ - Name: name, - NameRange: traversal[0].SourceRange(), - } - - if len(traversal) > 1 { - aliasStep, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", - Subject: traversal[1].SourceRange().Ptr(), - }) - return ret, diags - } - - ret.Alias = aliasStep.Name - ret.AliasRange = aliasStep.SourceRange().Ptr() - } - - return ret, diags -} - -// Addr returns the provider config address corresponding to the receiving -// config reference. -// -// This is a trivial conversion, essentially just discarding the source -// location information and keeping just the addressing information. -func (r *ProviderConfigRef) Addr() addrs.LocalProviderConfig { - return addrs.LocalProviderConfig{ - LocalName: r.Name, - Alias: r.Alias, - } -} - -func (r *ProviderConfigRef) String() string { - if r == nil { - return "" - } - if r.Alias != "" { - return fmt.Sprintf("%s.%s", r.Name, r.Alias) - } - return r.Name -} - -var commonResourceAttributes = []hcl.AttributeSchema{ - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "provider", - }, - { - Name: "depends_on", - }, -} - -var resourceBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "locals"}, // reserved for future use - {Type: "lifecycle"}, - {Type: "connection"}, - {Type: "provisioner", LabelNames: []string{"type"}}, - {Type: "_"}, // meta-argument escaping block - }, -} - -var dataBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "lifecycle"}, - {Type: "locals"}, // reserved for future use - {Type: "_"}, // meta-argument escaping block - }, -} - -var resourceLifecycleBlockSchema = &hcl.BodySchema{ - // We tell HCL that these elements are all valid for both "resource" - // and "data" lifecycle blocks, but the rules are actually more restrictive - // than that. We deal with that after decoding so that we can return - // more specific error messages than HCL would typically return itself. - Attributes: []hcl.AttributeSchema{ - { - Name: "create_before_destroy", - }, - { - Name: "prevent_destroy", - }, - { - Name: "ignore_changes", - }, - { - Name: "replace_triggered_by", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "precondition"}, - {Type: "postcondition"}, - }, -} diff --git a/internal/earlyconfig/config.go b/internal/earlyconfig/config.go deleted file mode 100644 index 86d93c27ba6d..000000000000 --- a/internal/earlyconfig/config.go +++ /dev/null @@ -1,210 +0,0 @@ -package earlyconfig - -import ( - "fmt" - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/moduledeps" - "github.com/hashicorp/terraform/internal/plugin/discovery" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *tfconfig.Module - - // CallPos is the source position for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallPos tfconfig.SourcePos - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr addrs.ModuleSource - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} - -// ProviderRequirements searches the full tree of modules under the receiver -// for both explicit and implicit dependencies on providers. -// -// The result is a full manifest of all of the providers that must be available -// in order to work with the receiving configuration. -// -// If the returned diagnostics includes errors then the resulting Requirements -// may be incomplete. -func (c *Config) ProviderRequirements() (getproviders.Requirements, tfdiags.Diagnostics) { - reqs := make(getproviders.Requirements) - diags := c.addProviderRequirements(reqs) - return reqs, diags -} - -// addProviderRequirements is the main part of the ProviderRequirements -// implementation, gradually mutating a shared requirements object to -// eventually return. -func (c *Config) addProviderRequirements(reqs getproviders.Requirements) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // First we'll deal with the requirements directly in _our_ module... - for localName, providerReqs := range c.Module.RequiredProviders { - var fqn addrs.Provider - if source := providerReqs.Source; source != "" { - addr, moreDiags := addrs.ParseProviderSourceString(source) - if moreDiags.HasErrors() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider source address", - fmt.Sprintf("Invalid source %q for provider %q in %s", source, localName, c.Path), - )) - continue - } - fqn = addr - } - if fqn.IsZero() { - fqn = addrs.ImpliedProviderForUnqualifiedType(localName) - } - if _, ok := reqs[fqn]; !ok { - // We'll at least have an unconstrained dependency then, but might - // add to this in the loop below. - reqs[fqn] = nil - } - for _, constraintsStr := range providerReqs.VersionConstraints { - if constraintsStr != "" { - constraints, err := getproviders.ParseVersionConstraints(constraintsStr) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider version constraint", - fmt.Sprintf("Provider %q in %s has invalid version constraint %q: %s.", localName, c.Path, constraintsStr, err), - )) - continue - } - reqs[fqn] = append(reqs[fqn], constraints...) - } - } - } - - // ...and now we'll recursively visit all of the child modules to merge - // in their requirements too. - for _, childConfig := range c.Children { - moreDiags := childConfig.addProviderRequirements(reqs) - diags = diags.Append(moreDiags) - } - - return diags -} - -// ProviderDependencies is a deprecated variant of ProviderRequirements which -// uses the moduledeps models for representation. This is preserved to allow -// a gradual transition over to ProviderRequirements, but note that its -// support for fully-qualified provider addresses has some idiosyncracies. -func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var name string - if len(c.Path) > 0 { - name = c.Path[len(c.Path)-1] - } - - ret := &moduledeps.Module{ - Name: name, - } - - providers := make(moduledeps.Providers) - for name, reqs := range c.Module.RequiredProviders { - var fqn addrs.Provider - if source := reqs.Source; source != "" { - addr, parseDiags := addrs.ParseProviderSourceString(source) - if parseDiags.HasErrors() { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid provider source", - Detail: fmt.Sprintf("Invalid source %q for provider", name), - })) - continue - } - fqn = addr - } - if fqn.IsZero() { - fqn = addrs.NewDefaultProvider(name) - } - var constraints version.Constraints - for _, reqStr := range reqs.VersionConstraints { - if reqStr != "" { - constraint, err := version.NewConstraint(reqStr) - if err != nil { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid provider version constraint", - Detail: fmt.Sprintf("Invalid version constraint %q for provider %s.", reqStr, fqn.String()), - })) - continue - } - constraints = append(constraints, constraint...) - } - } - providers[fqn] = moduledeps.ProviderDependency{ - Constraints: discovery.NewConstraints(constraints), - Reason: moduledeps.ProviderDependencyExplicit, - } - } - ret.Providers = providers - - childNames := make([]string, 0, len(c.Children)) - for name := range c.Children { - childNames = append(childNames, name) - } - sort.Strings(childNames) - - for _, name := range childNames { - child, childDiags := c.Children[name].ProviderDependencies() - ret.Children = append(ret.Children, child) - diags = diags.Append(childDiags) - } - - return ret, diags -} diff --git a/internal/earlyconfig/config_build.go b/internal/earlyconfig/config_build.go deleted file mode 100644 index dd84cf9cccbf..000000000000 --- a/internal/earlyconfig/config_build.go +++ /dev/null @@ -1,173 +0,0 @@ -package earlyconfig - -import ( - "fmt" - "sort" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := map[string]*Config{} - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - var vc version.Constraints - haveVersionArg := false - if strings.TrimSpace(call.Version) != "" { - haveVersionArg = true - - var err error - vc, err = version.NewConstraint(call.Version) - if err != nil { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid version constraint", - Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), - })) - continue - } - } - - var sourceAddr addrs.ModuleSource - var err error - if haveVersionArg { - sourceAddr, err = addrs.ParseModuleSourceRegistry(call.Source) - } else { - sourceAddr, err = addrs.ParseModuleSource(call.Source) - } - if err != nil { - if haveVersionArg { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid registry module source address", - Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid source address %q: %s.\n\nTerraform assumed that you intended a module registry source address because you also set the argument \"version\", which applies only to registry modules.", callName, call.Pos.Filename, call.Pos.Line, call.Source, err), - })) - } else { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid module source address", - Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid source address %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Source, err), - })) - } - // If we didn't have a valid source address then we can't continue - // down the module tree with this one. - continue - } - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: sourceAddr, - VersionConstraints: vc, - Parent: parent, - CallPos: call.Pos, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallPos: call.Pos, - SourceAddr: sourceAddr, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = diags.Append(modDiags) - - ret[call.Name] = child - } - - return ret, diags -} - -// ModuleRequest is used as part of the ModuleWalker interface used with -// function BuildConfig. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr addrs.ModuleSource - - // VersionConstraint is the version constraint applied to the module in - // configuration. - VersionConstraints version.Constraints - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source position for the header of the "module" block - // in configuration that prompted this request. - CallPos tfconfig.SourcePos -} - -// ModuleWalker is an interface used with BuildConfig. -type ModuleWalker interface { - LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) - -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - return f(req) -} diff --git a/internal/earlyconfig/config_test.go b/internal/earlyconfig/config_test.go deleted file mode 100644 index 21aa71beeabf..000000000000 --- a/internal/earlyconfig/config_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package earlyconfig - -import ( - "log" - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestConfigProviderRequirements(t *testing.T) { - cfg := testConfig(t, "testdata/provider-reqs") - - impliedProvider := addrs.NewProvider( - addrs.DefaultProviderRegistryHost, - "hashicorp", "implied", - ) - nullProvider := addrs.NewProvider( - addrs.DefaultProviderRegistryHost, - "hashicorp", "null", - ) - randomProvider := addrs.NewProvider( - addrs.DefaultProviderRegistryHost, - "hashicorp", "random", - ) - tlsProvider := addrs.NewProvider( - addrs.DefaultProviderRegistryHost, - "hashicorp", "tls", - ) - happycloudProvider := addrs.NewProvider( - svchost.Hostname("tf.example.com"), - "awesomecorp", "happycloud", - ) - - got, diags := cfg.ProviderRequirements() - if diags.HasErrors() { - t.Fatalf("unexpected diagnostics: %s", diags.Err().Error()) - } - want := getproviders.Requirements{ - // the nullProvider constraints from the two modules are merged - nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), - randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), - tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), - impliedProvider: nil, - happycloudProvider: nil, - } - - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("wrong result\n%s", diff) - } -} - -func testConfig(t *testing.T, baseDir string) *Config { - rootMod, diags := LoadModule(baseDir) - if diags.HasErrors() { - t.Fatalf("unexpected diagnostics: %s", diags.Err().Error()) - } - - cfg, diags := BuildConfig(rootMod, ModuleWalkerFunc(testModuleWalkerFunc)) - if diags.HasErrors() { - t.Fatalf("unexpected diagnostics: %s", diags.Err().Error()) - } - - return cfg -} - -// testModuleWalkerFunc is a simple implementation of ModuleWalkerFunc that -// only understands how to resolve relative filesystem paths, using source -// location information from the call. -func testModuleWalkerFunc(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - callFilename := req.CallPos.Filename - sourcePath := req.SourceAddr.String() - finalPath := filepath.Join(filepath.Dir(callFilename), sourcePath) - log.Printf("[TRACE] %s in %s -> %s", sourcePath, callFilename, finalPath) - - newMod, diags := LoadModule(finalPath) - return newMod, version.Must(version.NewVersion("0.0.0")), diags -} diff --git a/internal/earlyconfig/diagnostics.go b/internal/earlyconfig/diagnostics.go deleted file mode 100644 index 15adad56385a..000000000000 --- a/internal/earlyconfig/diagnostics.go +++ /dev/null @@ -1,82 +0,0 @@ -package earlyconfig - -import ( - "fmt" - - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { - ret := make(tfdiags.Diagnostics, len(diags)) - for i, diag := range diags { - ret[i] = wrapDiagnostic(diag) - } - return ret -} - -func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { - return wrappedDiagnostic{ - d: diag, - } -} - -type wrappedDiagnostic struct { - d tfconfig.Diagnostic -} - -func (d wrappedDiagnostic) Severity() tfdiags.Severity { - switch d.d.Severity { - case tfconfig.DiagError: - return tfdiags.Error - case tfconfig.DiagWarning: - return tfdiags.Warning - default: - // Should never happen since there are no other severities - return 0 - } -} - -func (d wrappedDiagnostic) Description() tfdiags.Description { - // Since the inspect library doesn't produce precise source locations, - // we include the position information as part of the error message text. - // See the comment inside method "Source" for more information. - switch { - case d.d.Pos == nil: - return tfdiags.Description{ - Summary: d.d.Summary, - Detail: d.d.Detail, - } - case d.d.Detail != "": - return tfdiags.Description{ - Summary: d.d.Summary, - Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), - } - default: - return tfdiags.Description{ - Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), - } - } -} - -func (d wrappedDiagnostic) Source() tfdiags.Source { - // Since the inspect library is constrained by the lowest common denominator - // between legacy HCL and modern HCL, it only returns ranges at whole-line - // granularity, and that isn't sufficient to populate a tfdiags.Source - // and so we'll just omit ranges altogether and include the line number in - // the Description text. - // - // Callers that want to return nicer errors should consider reacting to - // earlyconfig errors by attempting a follow-up parse with the normal - // config loader, which can produce more precise source location - // information. - return tfdiags.Source{} -} - -func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { - return nil -} - -func (d wrappedDiagnostic) ExtraInfo() interface{} { - return nil -} diff --git a/internal/earlyconfig/module.go b/internal/earlyconfig/module.go deleted file mode 100644 index e4edba0e0582..000000000000 --- a/internal/earlyconfig/module.go +++ /dev/null @@ -1,13 +0,0 @@ -package earlyconfig - -import ( - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// LoadModule loads some top-level metadata for the module in the given -// directory. -func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { - mod, diags := tfconfig.LoadModule(dir) - return mod, wrapDiagnostics(diags) -} diff --git a/internal/getproviders/errors.go b/internal/getproviders/errors.go deleted file mode 100644 index 7d2720c0f8cf..000000000000 --- a/internal/getproviders/errors.go +++ /dev/null @@ -1,246 +0,0 @@ -package getproviders - -import ( - "fmt" - "net/url" - - svchost "github.com/hashicorp/terraform-svchost" - - "github.com/hashicorp/terraform/internal/addrs" -) - -// ErrHostNoProviders is an error type used to indicate that a hostname given -// in a provider address does not support the provider registry protocol. -type ErrHostNoProviders struct { - Hostname svchost.Hostname - - // HasOtherVersionis set to true if the discovery process detected - // declarations of services named "providers" whose version numbers did not - // match any version supported by the current version of Terraform. - // - // If this is set, it's helpful to hint to the user in an error message - // that the provider host may be expecting an older or a newer version - // of Terraform, rather than that it isn't a provider registry host at all. - HasOtherVersion bool -} - -func (err ErrHostNoProviders) Error() string { - switch { - case err.HasOtherVersion: - return fmt.Sprintf("host %s does not support the provider registry protocol required by this Terraform version, but may be compatible with a different Terraform version", err.Hostname.ForDisplay()) - default: - return fmt.Sprintf("host %s does not offer a Terraform provider registry", err.Hostname.ForDisplay()) - } -} - -// ErrHostUnreachable is an error type used to indicate that a hostname -// given in a provider address did not resolve in DNS, did not respond to an -// HTTPS request for service discovery, or otherwise failed to correctly speak -// the service discovery protocol. -type ErrHostUnreachable struct { - Hostname svchost.Hostname - Wrapped error -} - -func (err ErrHostUnreachable) Error() string { - return fmt.Sprintf("could not connect to %s: %s", err.Hostname.ForDisplay(), err.Wrapped.Error()) -} - -// Unwrap returns the underlying error that occurred when trying to reach the -// indicated host. -func (err ErrHostUnreachable) Unwrap() error { - return err.Wrapped -} - -// ErrUnauthorized is an error type used to indicate that a hostname -// given in a provider address returned a "401 Unauthorized" or "403 Forbidden" -// error response when we tried to access it. -type ErrUnauthorized struct { - Hostname svchost.Hostname - - // HaveCredentials is true when the request that failed included some - // credentials, and thus it seems that those credentials were invalid. - // Conversely, HaveCredentials is false if the request did not include - // credentials at all, in which case it seems that credentials must be - // provided. - HaveCredentials bool -} - -func (err ErrUnauthorized) Error() string { - switch { - case err.HaveCredentials: - return fmt.Sprintf("host %s rejected the given authentication credentials", err.Hostname) - default: - return fmt.Sprintf("host %s requires authentication credentials", err.Hostname) - } -} - -// ErrProviderNotFound is an error type used to indicate that requested provider -// was not found in the source(s) included in the Description field. This can be -// used to produce user-friendly error messages. -type ErrProviderNotFound struct { - Provider addrs.Provider - Sources []string -} - -func (err ErrProviderNotFound) Error() string { - return fmt.Sprintf( - "provider %s was not found in any of the search locations", - err.Provider, - ) -} - -// ErrRegistryProviderNotKnown is an error type used to indicate that the hostname -// given in a provider address does appear to be a provider registry but that -// registry does not know about the given provider namespace or type. -// -// A caller serving requests from an end-user should recognize this error type -// and use it to produce user-friendly hints for common errors such as failing -// to specify an explicit source for a provider not in the default namespace -// (one not under registry.terraform.io/hashicorp/). The default error message -// for this type is a direct description of the problem with no such hints, -// because we expect that the caller will have better context to decide what -// hints are appropriate, e.g. by looking at the configuration given by the -// user. -type ErrRegistryProviderNotKnown struct { - Provider addrs.Provider -} - -func (err ErrRegistryProviderNotKnown) Error() string { - return fmt.Sprintf( - "provider registry %s does not have a provider named %s", - err.Provider.Hostname.ForDisplay(), - err.Provider, - ) -} - -// ErrPlatformNotSupported is an error type used to indicate that a particular -// version of a provider isn't available for a particular target platform. -// -// This is returned when DownloadLocation encounters a 404 Not Found response -// from the underlying registry, because it presumes that a caller will only -// ask for the DownloadLocation for a version it already found the existence -// of via AvailableVersions. -type ErrPlatformNotSupported struct { - Provider addrs.Provider - Version Version - Platform Platform - - // MirrorURL, if non-nil, is the base URL of the mirror that serviced - // the request in place of the provider's origin registry. MirrorURL - // is nil for a direct query. - MirrorURL *url.URL -} - -func (err ErrPlatformNotSupported) Error() string { - if err.MirrorURL != nil { - return fmt.Sprintf( - "provider mirror %s does not have a package of %s %s for %s", - err.MirrorURL.String(), - err.Provider, - err.Version, - err.Platform, - ) - } - return fmt.Sprintf( - "provider %s %s is not available for %s", - err.Provider, - err.Version, - err.Platform, - ) -} - -// ErrProtocolNotSupported is an error type used to indicate that a particular -// version of a provider is not supported by the current version of Terraform. -// -// Specfically, this is returned when the version's plugin protocol is not supported. -// -// When available, the error will include a suggested version that can be displayed to -// the user. Otherwise it will return UnspecifiedVersion -type ErrProtocolNotSupported struct { - Provider addrs.Provider - Version Version - Suggestion Version -} - -func (err ErrProtocolNotSupported) Error() string { - return fmt.Sprintf( - "provider %s %s is not supported by this version of terraform", - err.Provider, - err.Version, - ) -} - -// ErrQueryFailed is an error type used to indicate that the hostname given -// in a provider address does appear to be a provider registry but that when -// we queried it for metadata for the given provider the server returned an -// unexpected error. -// -// This is used for any error responses other than "Not Found", which would -// indicate the absense of a provider and is thus reported using -// ErrProviderNotKnown instead. -type ErrQueryFailed struct { - Provider addrs.Provider - Wrapped error - - // MirrorURL, if non-nil, is the base URL of the mirror that serviced - // the request in place of the provider's origin registry. MirrorURL - // is nil for a direct query. - MirrorURL *url.URL -} - -func (err ErrQueryFailed) Error() string { - if err.MirrorURL != nil { - return fmt.Sprintf( - "failed to query provider mirror %s for %s: %s", - err.MirrorURL.String(), - err.Provider.String(), - err.Wrapped.Error(), - ) - } - return fmt.Sprintf( - "could not query provider registry for %s: %s", - err.Provider.String(), - err.Wrapped.Error(), - ) -} - -// Unwrap returns the underlying error that occurred when trying to reach the -// indicated host. -func (err ErrQueryFailed) Unwrap() error { - return err.Wrapped -} - -// ErrRequestCanceled is an error type used to indicate that an operation -// failed due to being cancelled via the given context.Context object. -// -// This error type doesn't include information about what was cancelled, -// because the expected treatment of this error type is to quickly abort and -// exit with minimal ceremony. -type ErrRequestCanceled struct { -} - -func (err ErrRequestCanceled) Error() string { - return "request canceled" -} - -// ErrIsNotExist returns true if and only if the given error is one of the -// errors from this package that represents an affirmative response that a -// requested object does not exist. -// -// This is as opposed to errors indicating that the source is unavailable -// or misconfigured in some way, where we therefore cannot say for certain -// whether the requested object exists. -// -// If a caller needs to take a special action based on something not existing, -// such as falling back on some other source, use this function rather than -// direct type assertions so that the set of possible "not exist" errors can -// grow in future. -func ErrIsNotExist(err error) bool { - switch err.(type) { - case ErrProviderNotFound, ErrRegistryProviderNotKnown, ErrPlatformNotSupported: - return true - default: - return false - } -} diff --git a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/windows_amd64/terraform-provider-null.exe b/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/windows_amd64/terraform-provider-null.exe deleted file mode 100644 index daa9e3509f65..000000000000 --- a/internal/getproviders/testdata/filesystem-mirror/registry.terraform.io/hashicorp/null/2.0.0/windows_amd64/terraform-provider-null.exe +++ /dev/null @@ -1 +0,0 @@ -# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/internal/grpcwrap/provider.go b/internal/grpcwrap/provider.go deleted file mode 100644 index 170cea63881a..000000000000 --- a/internal/grpcwrap/provider.go +++ /dev/null @@ -1,419 +0,0 @@ -package grpcwrap - -import ( - "context" - - "github.com/hashicorp/terraform/internal/plugin/convert" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/zclconf/go-cty/cty/msgpack" -) - -// New wraps a providers.Interface to implement a grpc ProviderServer. -// This is useful for creating a test binary out of an internal provider -// implementation. -func Provider(p providers.Interface) tfplugin5.ProviderServer { - return &provider{ - provider: p, - schema: p.GetProviderSchema(), - } -} - -type provider struct { - provider providers.Interface - schema providers.GetProviderSchemaResponse -} - -func (p *provider) GetSchema(_ context.Context, req *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { - resp := &tfplugin5.GetProviderSchema_Response{ - ResourceSchemas: make(map[string]*tfplugin5.Schema), - DataSourceSchemas: make(map[string]*tfplugin5.Schema), - } - - resp.Provider = &tfplugin5.Schema{ - Block: &tfplugin5.Schema_Block{}, - } - if p.schema.Provider.Block != nil { - resp.Provider.Block = convert.ConfigSchemaToProto(p.schema.Provider.Block) - } - - resp.ProviderMeta = &tfplugin5.Schema{ - Block: &tfplugin5.Schema_Block{}, - } - if p.schema.ProviderMeta.Block != nil { - resp.ProviderMeta.Block = convert.ConfigSchemaToProto(p.schema.ProviderMeta.Block) - } - - for typ, res := range p.schema.ResourceTypes { - resp.ResourceSchemas[typ] = &tfplugin5.Schema{ - Version: res.Version, - Block: convert.ConfigSchemaToProto(res.Block), - } - } - for typ, dat := range p.schema.DataSources { - resp.DataSourceSchemas[typ] = &tfplugin5.Schema{ - Version: dat.Version, - Block: convert.ConfigSchemaToProto(dat.Block), - } - } - - resp.ServerCapabilities = &tfplugin5.GetProviderSchema_ServerCapabilities{ - PlanDestroy: p.schema.ServerCapabilities.PlanDestroy, - } - - // include any diagnostics from the original GetSchema call - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, p.schema.Diagnostics) - - return resp, nil -} - -func (p *provider) PrepareProviderConfig(_ context.Context, req *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { - resp := &tfplugin5.PrepareProviderConfig_Response{} - ty := p.schema.Provider.Block.ImpliedType() - - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - prepareResp := p.provider.ValidateProviderConfig(providers.ValidateProviderConfigRequest{ - Config: configVal, - }) - - // the PreparedConfig value is no longer used - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, prepareResp.Diagnostics) - return resp, nil -} - -func (p *provider) ValidateResourceTypeConfig(_ context.Context, req *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { - resp := &tfplugin5.ValidateResourceTypeConfig_Response{} - ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() - - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - validateResp := p.provider.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ - TypeName: req.TypeName, - Config: configVal, - }) - - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) - return resp, nil -} - -func (p *provider) ValidateDataSourceConfig(_ context.Context, req *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { - resp := &tfplugin5.ValidateDataSourceConfig_Response{} - ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() - - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - validateResp := p.provider.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ - TypeName: req.TypeName, - Config: configVal, - }) - - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) - return resp, nil -} - -func (p *provider) UpgradeResourceState(_ context.Context, req *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { - resp := &tfplugin5.UpgradeResourceState_Response{} - ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() - - upgradeResp := p.provider.UpgradeResourceState(providers.UpgradeResourceStateRequest{ - TypeName: req.TypeName, - Version: req.Version, - RawStateJSON: req.RawState.Json, - }) - - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, upgradeResp.Diagnostics) - if upgradeResp.Diagnostics.HasErrors() { - return resp, nil - } - - dv, err := encodeDynamicValue(upgradeResp.UpgradedState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.UpgradedState = dv - - return resp, nil -} - -func (p *provider) Configure(_ context.Context, req *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { - resp := &tfplugin5.Configure_Response{} - ty := p.schema.Provider.Block.ImpliedType() - - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - configureResp := p.provider.ConfigureProvider(providers.ConfigureProviderRequest{ - TerraformVersion: req.TerraformVersion, - Config: configVal, - }) - - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, configureResp.Diagnostics) - return resp, nil -} - -func (p *provider) ReadResource(_ context.Context, req *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { - resp := &tfplugin5.ReadResource_Response{} - ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() - - stateVal, err := decodeDynamicValue(req.CurrentState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - metaTy := p.schema.ProviderMeta.Block.ImpliedType() - metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - readResp := p.provider.ReadResource(providers.ReadResourceRequest{ - TypeName: req.TypeName, - PriorState: stateVal, - Private: req.Private, - ProviderMeta: metaVal, - }) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) - if readResp.Diagnostics.HasErrors() { - return resp, nil - } - resp.Private = readResp.Private - - dv, err := encodeDynamicValue(readResp.NewState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.NewState = dv - - return resp, nil -} - -func (p *provider) PlanResourceChange(_ context.Context, req *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { - resp := &tfplugin5.PlanResourceChange_Response{} - ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() - - priorStateVal, err := decodeDynamicValue(req.PriorState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - proposedStateVal, err := decodeDynamicValue(req.ProposedNewState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - metaTy := p.schema.ProviderMeta.Block.ImpliedType() - metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - planResp := p.provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: req.TypeName, - PriorState: priorStateVal, - ProposedNewState: proposedStateVal, - Config: configVal, - PriorPrivate: req.PriorPrivate, - ProviderMeta: metaVal, - }) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, planResp.Diagnostics) - if planResp.Diagnostics.HasErrors() { - return resp, nil - } - - resp.PlannedPrivate = planResp.PlannedPrivate - - resp.PlannedState, err = encodeDynamicValue(planResp.PlannedState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - for _, path := range planResp.RequiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, convert.PathToAttributePath(path)) - } - - return resp, nil -} - -func (p *provider) ApplyResourceChange(_ context.Context, req *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { - resp := &tfplugin5.ApplyResourceChange_Response{} - ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() - - priorStateVal, err := decodeDynamicValue(req.PriorState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal, err := decodeDynamicValue(req.PlannedState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - metaTy := p.schema.ProviderMeta.Block.ImpliedType() - metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - applyResp := p.provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: req.TypeName, - PriorState: priorStateVal, - PlannedState: plannedStateVal, - Config: configVal, - PlannedPrivate: req.PlannedPrivate, - ProviderMeta: metaVal, - }) - - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, applyResp.Diagnostics) - if applyResp.Diagnostics.HasErrors() { - return resp, nil - } - resp.Private = applyResp.Private - - resp.NewState, err = encodeDynamicValue(applyResp.NewState, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - return resp, nil -} - -func (p *provider) ImportResourceState(_ context.Context, req *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { - resp := &tfplugin5.ImportResourceState_Response{} - - importResp := p.provider.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: req.TypeName, - ID: req.Id, - }) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, importResp.Diagnostics) - - for _, res := range importResp.ImportedResources { - ty := p.schema.ResourceTypes[res.TypeName].Block.ImpliedType() - state, err := encodeDynamicValue(res.State, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - continue - } - - resp.ImportedResources = append(resp.ImportedResources, &tfplugin5.ImportResourceState_ImportedResource{ - TypeName: res.TypeName, - State: state, - Private: res.Private, - }) - } - - return resp, nil -} - -func (p *provider) ReadDataSource(_ context.Context, req *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { - resp := &tfplugin5.ReadDataSource_Response{} - ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() - - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - metaTy := p.schema.ProviderMeta.Block.ImpliedType() - metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - readResp := p.provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: req.TypeName, - Config: configVal, - ProviderMeta: metaVal, - }) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) - if readResp.Diagnostics.HasErrors() { - return resp, nil - } - - resp.State, err = encodeDynamicValue(readResp.State, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - return resp, nil -} - -func (p *provider) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { - resp := &tfplugin5.Stop_Response{} - err := p.provider.Stop() - if err != nil { - resp.Error = err.Error() - } - return resp, nil -} - -// decode a DynamicValue from either the JSON or MsgPack encoding. -func decodeDynamicValue(v *tfplugin5.DynamicValue, ty cty.Type) (cty.Value, error) { - // always return a valid value - var err error - res := cty.NullVal(ty) - if v == nil { - return res, nil - } - - switch { - case len(v.Msgpack) > 0: - res, err = msgpack.Unmarshal(v.Msgpack, ty) - case len(v.Json) > 0: - res, err = ctyjson.Unmarshal(v.Json, ty) - } - return res, err -} - -// encode a cty.Value into a DynamicValue msgpack payload. -func encodeDynamicValue(v cty.Value, ty cty.Type) (*tfplugin5.DynamicValue, error) { - mp, err := msgpack.Marshal(v, ty) - return &tfplugin5.DynamicValue{ - Msgpack: mp, - }, err -} diff --git a/internal/grpcwrap/provisioner.go b/internal/grpcwrap/provisioner.go deleted file mode 100644 index ef265248a6e2..000000000000 --- a/internal/grpcwrap/provisioner.go +++ /dev/null @@ -1,116 +0,0 @@ -package grpcwrap - -import ( - "context" - "log" - "strings" - "unicode/utf8" - - "github.com/hashicorp/terraform/internal/communicator/shared" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plugin/convert" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/tfplugin5" -) - -// New wraps a provisioners.Interface to implement a grpc ProviderServer. -// This is useful for creating a test binary out of an internal provider -// implementation. -func Provisioner(p provisioners.Interface) tfplugin5.ProvisionerServer { - return &provisioner{ - provisioner: p, - schema: p.GetSchema().Provisioner, - } -} - -type provisioner struct { - provisioner provisioners.Interface - schema *configschema.Block -} - -func (p *provisioner) GetSchema(_ context.Context, req *tfplugin5.GetProvisionerSchema_Request) (*tfplugin5.GetProvisionerSchema_Response, error) { - resp := &tfplugin5.GetProvisionerSchema_Response{} - - resp.Provisioner = &tfplugin5.Schema{ - Block: &tfplugin5.Schema_Block{}, - } - - if p.schema != nil { - resp.Provisioner.Block = convert.ConfigSchemaToProto(p.schema) - } - - return resp, nil -} - -func (p *provisioner) ValidateProvisionerConfig(_ context.Context, req *tfplugin5.ValidateProvisionerConfig_Request) (*tfplugin5.ValidateProvisionerConfig_Response, error) { - resp := &tfplugin5.ValidateProvisionerConfig_Response{} - ty := p.schema.ImpliedType() - - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - validateResp := p.provisioner.ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ - Config: configVal, - }) - - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) - return resp, nil -} - -func (p *provisioner) ProvisionResource(req *tfplugin5.ProvisionResource_Request, srv tfplugin5.Provisioner_ProvisionResourceServer) error { - // We send back a diagnostics over the stream if there was a - // provisioner-side problem. - srvResp := &tfplugin5.ProvisionResource_Response{} - - ty := p.schema.ImpliedType() - configVal, err := decodeDynamicValue(req.Config, ty) - if err != nil { - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) - srv.Send(srvResp) - return nil - } - - connVal, err := decodeDynamicValue(req.Connection, shared.ConnectionBlockSupersetSchema.ImpliedType()) - if err != nil { - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) - srv.Send(srvResp) - return nil - } - - resp := p.provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: configVal, - Connection: connVal, - UIOutput: uiOutput{srv}, - }) - - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, resp.Diagnostics) - srv.Send(srvResp) - return nil -} - -func (p *provisioner) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { - resp := &tfplugin5.Stop_Response{} - err := p.provisioner.Stop() - if err != nil { - resp.Error = err.Error() - } - return resp, nil -} - -// uiOutput implements the terraform.UIOutput interface to adapt the grpc -// stream to the legacy Provisioner.Apply method. -type uiOutput struct { - srv tfplugin5.Provisioner_ProvisionResourceServer -} - -func (o uiOutput) Output(s string) { - err := o.srv.Send(&tfplugin5.ProvisionResource_Response{ - Output: strings.ToValidUTF8(s, string(utf8.RuneError)), - }) - if err != nil { - log.Printf("[ERROR] %s", err) - } -} diff --git a/internal/initwd/testing.go b/internal/initwd/testing.go deleted file mode 100644 index 406718159c43..000000000000 --- a/internal/initwd/testing.go +++ /dev/null @@ -1,74 +0,0 @@ -package initwd - -import ( - "context" - "testing" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests, -// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows -// a test configuration to be loaded in a single step. -// -// If module installation fails, t.Fatal (or similar) is called to halt -// execution of the test, under the assumption that installation failures are -// not expected. If installation failures _are_ expected then use -// NewLoaderForTests and work with the loader object directly. If module -// installation succeeds but generates warnings, these warnings are discarded. -// -// If installation succeeds but errors are detected during loading then a -// possibly-incomplete config is returned along with error diagnostics. The -// test run is not aborted in this case, so that the caller can make assertions -// against the returned diagnostics. -// -// As with NewLoaderForTests, a cleanup function is returned which must be -// called before the test completes in order to remove the temporary -// modules directory. -func LoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) { - t.Helper() - - var diags tfdiags.Diagnostics - - loader, cleanup := configload.NewLoaderForTests(t) - inst := NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil)) - - _, moreDiags := inst.InstallModules(context.Background(), rootDir, true, ModuleInstallHooksImpl{}) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - cleanup() - t.Fatal(diags.Err()) - return nil, nil, func() {}, diags - } - - // Since module installer has modified the module manifest on disk, we need - // to refresh the cache of it in the loader. - if err := loader.RefreshModules(); err != nil { - t.Fatalf("failed to refresh modules after installation: %s", err) - } - - config, hclDiags := loader.LoadConfig(rootDir) - diags = diags.Append(hclDiags) - return config, loader, cleanup, diags -} - -// MustLoadConfigForTests is a variant of LoadConfigForTests which calls -// t.Fatal (or similar) if there are any errors during loading, and thus -// does not return diagnostics at all. -// -// This is useful for concisely writing tests that don't expect errors at -// all. For tests that expect errors and need to assert against them, use -// LoadConfigForTests instead. -func MustLoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func()) { - t.Helper() - - config, loader, cleanup, diags := LoadConfigForTests(t, rootDir) - if diags.HasErrors() { - cleanup() - t.Fatal(diags.Err()) - } - return config, loader, cleanup -} diff --git a/internal/instances/set.go b/internal/instances/set.go deleted file mode 100644 index 701a2d27e042..000000000000 --- a/internal/instances/set.go +++ /dev/null @@ -1,51 +0,0 @@ -package instances - -import ( - "github.com/hashicorp/terraform/internal/addrs" -) - -// Set is a set of instances, intended mainly for the return value of -// Expander.AllInstances, where it therefore represents all of the module -// and resource instances known to the expander. -type Set struct { - // Set currently really just wraps Expander with a reduced API that - // only supports lookups, to make it clear that a holder of a Set should - // not be modifying the expander any further. - exp *Expander -} - -// HasModuleInstance returns true if and only if the set contains the module -// instance with the given address. -func (s Set) HasModuleInstance(want addrs.ModuleInstance) bool { - return s.exp.knowsModuleInstance(want) -} - -// HasModuleCall returns true if and only if the set contains the module -// call with the given address, even if that module call has no instances. -func (s Set) HasModuleCall(want addrs.AbsModuleCall) bool { - return s.exp.knowsModuleCall(want) -} - -// HasResourceInstance returns true if and only if the set contains the resource -// instance with the given address. -// TODO: -func (s Set) HasResourceInstance(want addrs.AbsResourceInstance) bool { - return s.exp.knowsResourceInstance(want) -} - -// HasResource returns true if and only if the set contains the resource with -// the given address, even if that resource has no instances. -// TODO: -func (s Set) HasResource(want addrs.AbsResource) bool { - return s.exp.knowsResource(want) -} - -// InstancesForModule returns all of the module instances that correspond with -// the given static module path. -// -// If there are multiple module calls in the path that have repetition enabled -// then the result is the full expansion of all combinations of all of their -// declared instance keys. -func (s Set) InstancesForModule(modAddr addrs.Module) []addrs.ModuleInstance { - return s.exp.expandModule(modAddr, true) -} diff --git a/internal/lang/blocktoattr/schema.go b/internal/lang/blocktoattr/schema.go deleted file mode 100644 index f704a0487019..000000000000 --- a/internal/lang/blocktoattr/schema.go +++ /dev/null @@ -1,146 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func ambiguousNames(schema *configschema.Block) map[string]struct{} { - if schema == nil { - return nil - } - ambiguousNames := make(map[string]struct{}) - for name, attrS := range schema.Attributes { - aty := attrS.Type - if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { - ambiguousNames[name] = struct{}{} - } - } - return ambiguousNames -} - -func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { - ret := &hcl.BodySchema{} - - appearsAsBlock := make(map[string]struct{}) - { - // We'll construct some throwaway schemas here just to probe for - // whether each of our ambiguous names seems to be being used as - // an attribute or a block. We need to check both because in JSON - // syntax we rely on the schema to decide between attribute or block - // interpretation and so JSON will always answer yes to both of - // these questions and we want to prefer the attribute interpretation - // in that case. - var probeSchema hcl.BodySchema - - for name := range ambiguousNames { - probeSchema = hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: name, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - if _, exists := content.Attributes[name]; exists { - // Can decode as an attribute, so we'll go with that. - continue - } - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: name, - }, - }, - } - content, _, _ = body.PartialContent(&probeSchema) - if len(content.Blocks) > 0 || dynamicExpanded { - // A dynamic block with an empty iterator returns nothing. - // If there's no attribute and we have either a block or a - // dynamic expansion, we need to rewrite this one as a - // block for a successful result. - appearsAsBlock[name] = struct{}{} - } - } - if !dynamicExpanded { - // If we're deciding for a context where dynamic blocks haven't - // been expanded yet then we need to probe for those too. - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "dynamic", - LabelNames: []string{"type"}, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - for _, block := range content.Blocks { - if _, exists := ambiguousNames[block.Labels[0]]; exists { - appearsAsBlock[block.Labels[0]] = struct{}{} - } - } - } - } - - for _, attrS := range given.Attributes { - if _, exists := appearsAsBlock[attrS.Name]; exists { - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: attrS.Name, - }) - } else { - ret.Attributes = append(ret.Attributes, attrS) - } - } - - // Anything that is specified as a block type in the input schema remains - // that way by just passing through verbatim. - ret.Blocks = append(ret.Blocks, given.Blocks...) - - return ret -} - -// SchemaForCtyElementType converts a cty object type into an -// approximately-equivalent configschema.Block representing the element of -// a list or set. If the given type is not an object type then this -// function will panic. -func SchemaForCtyElementType(ty cty.Type) *configschema.Block { - atys := ty.AttributeTypes() - ret := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute, len(atys)), - } - for name, aty := range atys { - ret.Attributes[name] = &configschema.Attribute{ - Type: aty, - Optional: true, - } - } - return ret -} - -// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type -// into an approximately-equivalent configschema.NestedBlock. If the given type -// is not of the expected kind then this function will panic. -func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { - var nesting configschema.NestingMode - switch { - case ty.IsListType(): - nesting = configschema.NestingList - case ty.IsSetType(): - nesting = configschema.NestingSet - default: - panic("unsuitable type") - } - nested := SchemaForCtyElementType(ty.ElementType()) - return &configschema.NestedBlock{ - Nesting: nesting, - Block: *nested, - } -} - -// TypeCanBeBlocks returns true if the given type is a list-of-object or -// set-of-object type, and would thus be subject to the blocktoattr fixup -// if used as an attribute type. -func TypeCanBeBlocks(ty cty.Type) bool { - return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() -} diff --git a/internal/lang/blocktoattr/variables.go b/internal/lang/blocktoattr/variables.go deleted file mode 100644 index 92d593160798..000000000000 --- a/internal/lang/blocktoattr/variables.go +++ /dev/null @@ -1,45 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/internal/configs/configschema" -) - -// ExpandedVariables finds all of the global variables referenced in the -// given body with the given schema while taking into account the possibilities -// both of "dynamic" blocks being expanded and the possibility of certain -// attributes being written instead as nested blocks as allowed by the -// FixUpBlockAttrs function. -// -// This function exists to allow variables to be analyzed prior to dynamic -// block expansion while also dealing with the fact that dynamic block expansion -// might in turn produce nested blocks that are subject to FixUpBlockAttrs. -// -// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, -// which is itself a drop-in replacement for hcldec.Variables. -func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { - rootNode := dynblock.WalkVariables(body) - return walkVariables(rootNode, body, schema) -} - -func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { - givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) - ambiguousNames := ambiguousNames(schema) - effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) - vars, children := node.Visit(effectiveRawSchema) - - for _, child := range children { - if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { - vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) - } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { - // ☝️Check for collection type before element type, because if this is a mis-placed reference, - // a panic here will prevent other useful diags from being elevated to show the user what to fix - synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) - vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) - } - } - - return vars -} diff --git a/internal/lang/blocktoattr/variables_test.go b/internal/lang/blocktoattr/variables_test.go deleted file mode 100644 index 94076b75c274..000000000000 --- a/internal/lang/blocktoattr/variables_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package blocktoattr - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - hcljson "github.com/hashicorp/hcl/v2/json" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func TestExpandedVariables(t *testing.T) { - fooSchema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.List(cty.Object(map[string]cty.Type{ - "bar": cty.String, - })), - Optional: true, - }, - "bar": { - Type: cty.Map(cty.String), - Optional: true, - }, - }, - } - - tests := map[string]struct { - src string - json bool - schema *configschema.Block - want []hcl.Traversal - }{ - "empty": { - src: ``, - schema: &configschema.Block{}, - want: nil, - }, - "attribute syntax": { - src: ` -foo = [ - { - bar = baz - }, -] -`, - schema: fooSchema, - want: []hcl.Traversal{ - { - hcl.TraverseRoot{ - Name: "baz", - SrcRange: hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 4, Column: 11, Byte: 23}, - End: hcl.Pos{Line: 4, Column: 14, Byte: 26}, - }, - }, - }, - }, - }, - "block syntax": { - src: ` -foo { - bar = baz -} -`, - schema: fooSchema, - want: []hcl.Traversal{ - { - hcl.TraverseRoot{ - Name: "baz", - SrcRange: hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 3, Column: 9, Byte: 15}, - End: hcl.Pos{Line: 3, Column: 12, Byte: 18}, - }, - }, - }, - }, - }, - "block syntax with nested blocks": { - src: ` -foo { - bar { - boop = baz - } -} -`, - schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.List(cty.Object(map[string]cty.Type{ - "bar": cty.List(cty.Object(map[string]cty.Type{ - "boop": cty.String, - })), - })), - Optional: true, - }, - }, - }, - want: []hcl.Traversal{ - { - hcl.TraverseRoot{ - Name: "baz", - SrcRange: hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 4, Column: 12, Byte: 26}, - End: hcl.Pos{Line: 4, Column: 15, Byte: 29}, - }, - }, - }, - }, - }, - "dynamic block syntax": { - src: ` -dynamic "foo" { - for_each = beep - content { - bar = baz - } -} -`, - schema: fooSchema, - want: []hcl.Traversal{ - { - hcl.TraverseRoot{ - Name: "beep", - SrcRange: hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 3, Column: 14, Byte: 30}, - End: hcl.Pos{Line: 3, Column: 18, Byte: 34}, - }, - }, - }, - { - hcl.TraverseRoot{ - Name: "baz", - SrcRange: hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 5, Column: 11, Byte: 57}, - End: hcl.Pos{Line: 5, Column: 14, Byte: 60}, - }, - }, - }, - }, - }, - "misplaced dynamic block": { - src: ` -dynamic "bar" { - for_each = beep - content { - key = val - } -} -`, - schema: fooSchema, - want: []hcl.Traversal{ - { - hcl.TraverseRoot{ - Name: "beep", - SrcRange: hcl.Range{ - Filename: "test.tf", - Start: hcl.Pos{Line: 3, Column: 14, Byte: 30}, - End: hcl.Pos{Line: 3, Column: 18, Byte: 34}, - }, - }, - }, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var f *hcl.File - var diags hcl.Diagnostics - if test.json { - f, diags = hcljson.Parse([]byte(test.src), "test.tf.json") - } else { - f, diags = hclsyntax.ParseConfig([]byte(test.src), "test.tf", hcl.Pos{Line: 1, Column: 1}) - } - if diags.HasErrors() { - for _, diag := range diags { - t.Errorf("unexpected diagnostic: %s", diag) - } - t.FailNow() - } - - got := ExpandedVariables(f.Body, test.schema) - - co := cmpopts.IgnoreUnexported(hcl.TraverseRoot{}) - if !cmp.Equal(got, test.want, co) { - t.Errorf("wrong result\n%s", cmp.Diff(test.want, got, co)) - } - }) - } - -} diff --git a/internal/lang/funcs/filesystem_test.go b/internal/lang/funcs/filesystem_test.go deleted file mode 100644 index 037137ae64f4..000000000000 --- a/internal/lang/funcs/filesystem_test.go +++ /dev/null @@ -1,695 +0,0 @@ -package funcs - -import ( - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/hashicorp/terraform/internal/lang/marks" - homedir "github.com/mitchellh/go-homedir" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" -) - -func TestFile(t *testing.T) { - tests := []struct { - Path cty.Value - Want cty.Value - Err string - }{ - { - cty.StringVal("testdata/hello.txt"), - cty.StringVal("Hello World"), - ``, - }, - { - cty.StringVal("testdata/icon.png"), - cty.NilVal, - `contents of "testdata/icon.png" are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, - }, - { - cty.StringVal("testdata/icon.png").Mark(marks.Sensitive), - cty.NilVal, - `contents of (sensitive value) are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, - }, - { - cty.StringVal("testdata/missing"), - cty.NilVal, - `no file exists at "testdata/missing"; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, - }, - { - cty.StringVal("testdata/missing").Mark(marks.Sensitive), - cty.NilVal, - `no file exists at (sensitive value); this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("File(\".\", %#v)", test.Path), func(t *testing.T) { - got, err := File(".", test.Path) - - if test.Err != "" { - if err == nil { - t.Fatal("succeeded; want error") - } - if got, want := err.Error(), test.Err; got != want { - t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - -func TestTemplateFile(t *testing.T) { - tests := []struct { - Path cty.Value - Vars cty.Value - Want cty.Value - Err string - }{ - { - cty.StringVal("testdata/hello.txt"), - cty.EmptyObjectVal, - cty.StringVal("Hello World"), - ``, - }, - { - cty.StringVal("testdata/icon.png"), - cty.EmptyObjectVal, - cty.NilVal, - `contents of "testdata/icon.png" are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, - }, - { - cty.StringVal("testdata/missing"), - cty.EmptyObjectVal, - cty.NilVal, - `no file exists at "testdata/missing"; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, - }, - { - cty.StringVal("testdata/secrets.txt").Mark(marks.Sensitive), - cty.EmptyObjectVal, - cty.NilVal, - `no file exists at (sensitive value); this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, - }, - { - cty.StringVal("testdata/hello.tmpl"), - cty.MapVal(map[string]cty.Value{ - "name": cty.StringVal("Jodie"), - }), - cty.StringVal("Hello, Jodie!"), - ``, - }, - { - cty.StringVal("testdata/hello.tmpl"), - cty.MapVal(map[string]cty.Value{ - "name!": cty.StringVal("Jodie"), - }), - cty.NilVal, - `invalid template variable name "name!": must start with a letter, followed by zero or more letters, digits, and underscores`, - }, - { - cty.StringVal("testdata/hello.tmpl"), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("Jimbo"), - }), - cty.StringVal("Hello, Jimbo!"), - ``, - }, - { - cty.StringVal("testdata/hello.tmpl"), - cty.EmptyObjectVal, - cty.NilVal, - `vars map does not contain key "name", referenced at testdata/hello.tmpl:1,10-14`, - }, - { - cty.StringVal("testdata/func.tmpl"), - cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.StringVal("a"), - cty.StringVal("b"), - cty.StringVal("c"), - }), - }), - cty.StringVal("The items are a, b, c"), - ``, - }, - { - cty.StringVal("testdata/recursive.tmpl"), - cty.MapValEmpty(cty.String), - cty.NilVal, - `testdata/recursive.tmpl:1,3-16: Error in function call; Call to function "templatefile" failed: cannot recursively call templatefile from inside templatefile call.`, - }, - { - cty.StringVal("testdata/list.tmpl"), - cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.StringVal("a"), - cty.StringVal("b"), - cty.StringVal("c"), - }), - }), - cty.StringVal("- a\n- b\n- c\n"), - ``, - }, - { - cty.StringVal("testdata/list.tmpl"), - cty.ObjectVal(map[string]cty.Value{ - "list": cty.True, - }), - cty.NilVal, - `testdata/list.tmpl:1,13-17: Iteration over non-iterable value; A value of type bool cannot be used as the collection in a 'for' expression.`, - }, - { - cty.StringVal("testdata/bare.tmpl"), - cty.ObjectVal(map[string]cty.Value{ - "val": cty.True, - }), - cty.True, // since this template contains only an interpolation, its true value shines through - ``, - }, - } - - templateFileFn := MakeTemplateFileFunc(".", func() map[string]function.Function { - return map[string]function.Function{ - "join": stdlib.JoinFunc, - "templatefile": MakeFileFunc(".", false), // just a placeholder, since templatefile itself overrides this - } - }) - - for _, test := range tests { - t.Run(fmt.Sprintf("TemplateFile(%#v, %#v)", test.Path, test.Vars), func(t *testing.T) { - got, err := templateFileFn.Call([]cty.Value{test.Path, test.Vars}) - - if argErr, ok := err.(function.ArgError); ok { - if argErr.Index < 0 || argErr.Index > 1 { - t.Errorf("ArgError index %d is out of range for templatefile (must be 0 or 1)", argErr.Index) - } - } - - if test.Err != "" { - if err == nil { - t.Fatal("succeeded; want error") - } - if got, want := err.Error(), test.Err; got != want { - t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - -func TestFileExists(t *testing.T) { - tests := []struct { - Path cty.Value - Want cty.Value - Err string - }{ - { - cty.StringVal("testdata/hello.txt"), - cty.BoolVal(true), - ``, - }, - { - cty.StringVal(""), - cty.BoolVal(false), - `"." is a directory, not a file`, - }, - { - cty.StringVal("testdata").Mark(marks.Sensitive), - cty.BoolVal(false), - `(sensitive value) is a directory, not a file`, - }, - { - cty.StringVal("testdata/missing"), - cty.BoolVal(false), - ``, - }, - { - cty.StringVal("testdata/unreadable/foobar"), - cty.BoolVal(false), - `failed to stat "testdata/unreadable/foobar"`, - }, - { - cty.StringVal("testdata/unreadable/foobar").Mark(marks.Sensitive), - cty.BoolVal(false), - `failed to stat (sensitive value)`, - }, - } - - // Ensure "unreadable" directory cannot be listed during the test run - fi, err := os.Lstat("testdata/unreadable") - if err != nil { - t.Fatal(err) - } - os.Chmod("testdata/unreadable", 0000) - defer func(mode os.FileMode) { - os.Chmod("testdata/unreadable", mode) - }(fi.Mode()) - - for _, test := range tests { - t.Run(fmt.Sprintf("FileExists(\".\", %#v)", test.Path), func(t *testing.T) { - got, err := FileExists(".", test.Path) - - if test.Err != "" { - if err == nil { - t.Fatal("succeeded; want error") - } - if got, want := err.Error(), test.Err; got != want { - t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - -func TestFileSet(t *testing.T) { - tests := []struct { - Path cty.Value - Pattern cty.Value - Want cty.Value - Err string - }{ - { - cty.StringVal("."), - cty.StringVal("testdata*"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("testdata"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("{testdata,missing}"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("testdata/missing"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("testdata/missing*"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("*/missing"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("**/missing"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("testdata/*.txt"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("testdata/hello.txt"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("testdata/hello.???"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("testdata/hello*"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.tmpl"), - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("testdata/hello.{tmpl,txt}"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.tmpl"), - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("*/hello.txt"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("*/*.txt"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("*/hello*"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.tmpl"), - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("**/hello*"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.tmpl"), - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("**/hello.{tmpl,txt}"), - cty.SetVal([]cty.Value{ - cty.StringVal("testdata/hello.tmpl"), - cty.StringVal("testdata/hello.txt"), - }), - ``, - }, - { - cty.StringVal("."), - cty.StringVal("["), - cty.SetValEmpty(cty.String), - `failed to glob pattern "[": syntax error in pattern`, - }, - { - cty.StringVal("."), - cty.StringVal("[").Mark(marks.Sensitive), - cty.SetValEmpty(cty.String), - `failed to glob pattern (sensitive value): syntax error in pattern`, - }, - { - cty.StringVal("."), - cty.StringVal("\\"), - cty.SetValEmpty(cty.String), - `failed to glob pattern "\\": syntax error in pattern`, - }, - { - cty.StringVal("testdata"), - cty.StringVal("missing"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("testdata"), - cty.StringVal("missing*"), - cty.SetValEmpty(cty.String), - ``, - }, - { - cty.StringVal("testdata"), - cty.StringVal("*.txt"), - cty.SetVal([]cty.Value{ - cty.StringVal("hello.txt"), - }), - ``, - }, - { - cty.StringVal("testdata"), - cty.StringVal("hello.txt"), - cty.SetVal([]cty.Value{ - cty.StringVal("hello.txt"), - }), - ``, - }, - { - cty.StringVal("testdata"), - cty.StringVal("hello.???"), - cty.SetVal([]cty.Value{ - cty.StringVal("hello.txt"), - }), - ``, - }, - { - cty.StringVal("testdata"), - cty.StringVal("hello*"), - cty.SetVal([]cty.Value{ - cty.StringVal("hello.tmpl"), - cty.StringVal("hello.txt"), - }), - ``, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("FileSet(\".\", %#v, %#v)", test.Path, test.Pattern), func(t *testing.T) { - got, err := FileSet(".", test.Path, test.Pattern) - - if test.Err != "" { - if err == nil { - t.Fatal("succeeded; want error") - } - if got, want := err.Error(), test.Err; got != want { - t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - -func TestFileBase64(t *testing.T) { - tests := []struct { - Path cty.Value - Want cty.Value - Err bool - }{ - { - cty.StringVal("testdata/hello.txt"), - cty.StringVal("SGVsbG8gV29ybGQ="), - false, - }, - { - cty.StringVal("testdata/icon.png"), - cty.StringVal("iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAq1BMVEX///9cTuVeUeRcTuZcTuZcT+VbSe1cTuVdT+MAAP9JSbZcT+VcTuZAQLFAQLJcTuVcTuZcUuBBQbA/P7JAQLJaTuRcT+RcTuVGQ7xAQLJVVf9cTuVcTuVGRMFeUeRbTeJcTuU/P7JeTeZbTOVcTeZAQLJBQbNAQLNaUORcTeZbT+VcTuRAQLNAQLRdTuRHR8xgUOdgUN9cTuVdTeRdT+VZTulcTuVAQLL///8+GmETAAAANnRSTlMApibw+osO6DcBB3fIX87+oRk3yehB0/Nj/gNs7nsTRv3dHmu//JYUMLVr3bssjxkgEK5CaxeK03nIAAAAAWJLR0QAiAUdSAAAAAlwSFlzAAADoQAAA6EBvJf9gwAAAAd0SU1FB+EEBRIQDxZNTKsAAACCSURBVBjTfc7JFsFQEATQQpCYxyBEzJ55rvf/f0ZHcyQLvelTd1GngEwWycs5+UISyKLraSi9geWKK9Gr1j7AeqOJVtt2XtD1Bchef2BjQDAcCTC0CsA4mihMtXw2XwgsV2sFw812F+4P3y2GdI6nn3FGSs//4HJNAXDzU4Dg/oj/E+bsEbhf5cMsAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDE3LTA0LTA1VDE4OjE2OjE1KzAyOjAws5bLVQAAACV0RVh0ZGF0ZTptb2RpZnkAMjAxNy0wNC0wNVQxODoxNjoxNSswMjowMMLLc+kAAAAZdEVYdFNvZnR3YXJlAHd3dy5pbmtzY2FwZS5vcmeb7jwaAAAAC3RFWHRUaXRsZQBHcm91cJYfIowAAABXelRYdFJhdyBwcm9maWxlIHR5cGUgaXB0YwAAeJzj8gwIcVYoKMpPy8xJ5VIAAyMLLmMLEyMTS5MUAxMgRIA0w2QDI7NUIMvY1MjEzMQcxAfLgEigSi4A6hcRdPJCNZUAAAAASUVORK5CYII="), - false, - }, - { - cty.StringVal("testdata/missing"), - cty.NilVal, - true, // no file exists - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("FileBase64(\".\", %#v)", test.Path), func(t *testing.T) { - got, err := FileBase64(".", test.Path) - - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - -func TestBasename(t *testing.T) { - tests := []struct { - Path cty.Value - Want cty.Value - Err bool - }{ - { - cty.StringVal("testdata/hello.txt"), - cty.StringVal("hello.txt"), - false, - }, - { - cty.StringVal("hello.txt"), - cty.StringVal("hello.txt"), - false, - }, - { - cty.StringVal(""), - cty.StringVal("."), - false, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("Basename(%#v)", test.Path), func(t *testing.T) { - got, err := Basename(test.Path) - - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - -func TestDirname(t *testing.T) { - tests := []struct { - Path cty.Value - Want cty.Value - Err bool - }{ - { - cty.StringVal("testdata/hello.txt"), - cty.StringVal("testdata"), - false, - }, - { - cty.StringVal("testdata/foo/hello.txt"), - cty.StringVal("testdata/foo"), - false, - }, - { - cty.StringVal("hello.txt"), - cty.StringVal("."), - false, - }, - { - cty.StringVal(""), - cty.StringVal("."), - false, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("Dirname(%#v)", test.Path), func(t *testing.T) { - got, err := Dirname(test.Path) - - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - -func TestPathExpand(t *testing.T) { - homePath, err := homedir.Dir() - if err != nil { - t.Fatalf("Error getting home directory: %v", err) - } - - tests := []struct { - Path cty.Value - Want cty.Value - Err bool - }{ - { - cty.StringVal("~/test-file"), - cty.StringVal(filepath.Join(homePath, "test-file")), - false, - }, - { - cty.StringVal("~/another/test/file"), - cty.StringVal(filepath.Join(homePath, "another/test/file")), - false, - }, - { - cty.StringVal("/root/file"), - cty.StringVal("/root/file"), - false, - }, - { - cty.StringVal("/"), - cty.StringVal("/"), - false, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("Dirname(%#v)", test.Path), func(t *testing.T) { - got, err := Pathexpand(test.Path) - - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} diff --git a/internal/lang/funcs/sensitive.go b/internal/lang/funcs/sensitive.go deleted file mode 100644 index 1ce0774a3322..000000000000 --- a/internal/lang/funcs/sensitive.go +++ /dev/null @@ -1,67 +0,0 @@ -package funcs - -import ( - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// SensitiveFunc returns a value identical to its argument except that -// Terraform will consider it to be sensitive. -var SensitiveFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowNull: true, - AllowMarked: true, - AllowDynamicType: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - // This function only affects the value's marks, so the result - // type is always the same as the argument type. - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - val, _ := args[0].Unmark() - return val.Mark(marks.Sensitive), nil - }, -}) - -// NonsensitiveFunc takes a sensitive value and returns the same value without -// the sensitive marking, effectively exposing the value. -var NonsensitiveFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowNull: true, - AllowMarked: true, - AllowDynamicType: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - // This function only affects the value's marks, so the result - // type is always the same as the argument type. - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if args[0].IsKnown() && !args[0].HasMark(marks.Sensitive) { - return cty.DynamicVal, function.NewArgErrorf(0, "the given value is not sensitive, so this call is redundant") - } - v, m := args[0].Unmark() - delete(m, marks.Sensitive) // remove the sensitive marking - return v.WithMarks(m), nil - }, -}) - -func Sensitive(v cty.Value) (cty.Value, error) { - return SensitiveFunc.Call([]cty.Value{v}) -} - -func Nonsensitive(v cty.Value) (cty.Value, error) { - return NonsensitiveFunc.Call([]cty.Value{v}) -} diff --git a/internal/legacy/helper/schema/backend.go b/internal/legacy/helper/schema/backend.go deleted file mode 100644 index 7bd9426abea0..000000000000 --- a/internal/legacy/helper/schema/backend.go +++ /dev/null @@ -1,200 +0,0 @@ -package schema - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/terraform" - ctyconvert "github.com/zclconf/go-cty/cty/convert" -) - -// Backend represents a partial backend.Backend implementation and simplifies -// the creation of configuration loading and validation. -// -// Unlike other schema structs such as Provider, this struct is meant to be -// embedded within your actual implementation. It provides implementations -// only for Input and Configure and gives you a method for accessing the -// configuration in the form of a ResourceData that you're expected to call -// from the other implementation funcs. -type Backend struct { - // Schema is the schema for the configuration of this backend. If this - // Backend has no configuration this can be omitted. - Schema map[string]*Schema - - // ConfigureFunc is called to configure the backend. Use the - // FromContext* methods to extract information from the context. - // This can be nil, in which case nothing will be called but the - // config will still be stored. - ConfigureFunc func(context.Context) error - - config *ResourceData -} - -var ( - backendConfigKey = contextKey("backend config") -) - -// FromContextBackendConfig extracts a ResourceData with the configuration -// from the context. This should only be called by Backend functions. -func FromContextBackendConfig(ctx context.Context) *ResourceData { - return ctx.Value(backendConfigKey).(*ResourceData) -} - -func (b *Backend) ConfigSchema() *configschema.Block { - // This is an alias of CoreConfigSchema just to implement the - // backend.Backend interface. - return b.CoreConfigSchema() -} - -func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { - if b == nil { - return configVal, nil - } - var diags tfdiags.Diagnostics - var err error - - // In order to use Transform below, this needs to be filled out completely - // according the schema. - configVal, err = b.CoreConfigSchema().CoerceValue(configVal) - if err != nil { - return configVal, diags.Append(err) - } - - // lookup any required, top-level attributes that are Null, and see if we - // have a Default value available. - configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { - // we're only looking for top-level attributes - if len(path) != 1 { - return val, nil - } - - // nothing to do if we already have a value - if !val.IsNull() { - return val, nil - } - - // get the Schema definition for this attribute - getAttr, ok := path[0].(cty.GetAttrStep) - // these should all exist, but just ignore anything strange - if !ok { - return val, nil - } - - attrSchema := b.Schema[getAttr.Name] - // continue to ignore anything that doesn't match - if attrSchema == nil { - return val, nil - } - - // this is deprecated, so don't set it - if attrSchema.Deprecated != "" || attrSchema.Removed != "" { - return val, nil - } - - // find a default value if it exists - def, err := attrSchema.DefaultValue() - if err != nil { - diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err - } - - // no default - if def == nil { - return val, nil - } - - // create a cty.Value and make sure it's the correct type - tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) - - // helper/schema used to allow setting "" to a bool - if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { - // return a warning about the conversion - diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) - tmpVal = cty.False - } - - val, err = ctyconvert.Convert(tmpVal, val.Type()) - if err != nil { - diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) - } - - return val, err - }) - if err != nil { - // any error here was already added to the diagnostics - return configVal, diags - } - - shimRC := b.shimConfig(configVal) - warns, errs := schemaMap(b.Schema).Validate(shimRC) - for _, warn := range warns { - diags = diags.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range errs { - diags = diags.Append(err) - } - return configVal, diags -} - -func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { - if b == nil { - return nil - } - - var diags tfdiags.Diagnostics - sm := schemaMap(b.Schema) - shimRC := b.shimConfig(obj) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, shimRC, nil, nil, true) - if err != nil { - diags = diags.Append(err) - return diags - } - - data, err := sm.Data(nil, diff) - if err != nil { - diags = diags.Append(err) - return diags - } - b.config = data - - if b.ConfigureFunc != nil { - err = b.ConfigureFunc(context.WithValue( - context.Background(), backendConfigKey, data)) - if err != nil { - diags = diags.Append(err) - return diags - } - } - - return diags -} - -// shimConfig turns a new-style cty.Value configuration (which must be of -// an object type) into a minimal old-style *terraform.ResourceConfig object -// that should be populated enough to appease the not-yet-updated functionality -// in this package. This should be removed once everything is updated. -func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { - shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) - if !ok { - // If the configVal was nil, we still want a non-nil map here. - shimMap = map[string]interface{}{} - } - return &terraform.ResourceConfig{ - Config: shimMap, - Raw: shimMap, - } -} - -// Config returns the configuration. This is available after Configure is -// called. -func (b *Backend) Config() *ResourceData { - return b.config -} diff --git a/internal/legacy/helper/schema/provider.go b/internal/legacy/helper/schema/provider.go deleted file mode 100644 index c64c2e38278c..000000000000 --- a/internal/legacy/helper/schema/provider.go +++ /dev/null @@ -1,477 +0,0 @@ -package schema - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/legacy/terraform" -) - -var ReservedProviderFields = []string{ - "alias", - "version", -} - -// Provider represents a resource provider in Terraform, and properly -// implements all of the ResourceProvider API. -// -// By defining a schema for the configuration of the provider, the -// map of supporting resources, and a configuration function, the schema -// framework takes over and handles all the provider operations for you. -// -// After defining the provider structure, it is unlikely that you'll require any -// of the methods on Provider itself. -type Provider struct { - // Schema is the schema for the configuration of this provider. If this - // provider has no configuration, this can be omitted. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - Schema map[string]*Schema - - // ResourcesMap is the list of available resources that this provider - // can manage, along with their Resource structure defining their - // own schemas and CRUD operations. - // - // Provider automatically handles routing operations such as Apply, - // Diff, etc. to the proper resource. - ResourcesMap map[string]*Resource - - // DataSourcesMap is the collection of available data sources that - // this provider implements, with a Resource instance defining - // the schema and Read operation of each. - // - // Resource instances for data sources must have a Read function - // and must *not* implement Create, Update or Delete. - DataSourcesMap map[string]*Resource - - // ProviderMetaSchema is the schema for the configuration of the meta - // information for this provider. If this provider has no meta info, - // this can be omitted. This functionality is currently experimental - // and subject to change or break without warning; it should only be - // used by providers that are collaborating on its use with the - // Terraform team. - ProviderMetaSchema map[string]*Schema - - // ConfigureFunc is a function for configuring the provider. If the - // provider doesn't need to be configured, this can be omitted. - // - // See the ConfigureFunc documentation for more information. - ConfigureFunc ConfigureFunc - - // MetaReset is called by TestReset to reset any state stored in the meta - // interface. This is especially important if the StopContext is stored by - // the provider. - MetaReset func() error - - meta interface{} - - // a mutex is required because TestReset can directly replace the stopCtx - stopMu sync.Mutex - stopCtx context.Context - stopCtxCancel context.CancelFunc - stopOnce sync.Once - - TerraformVersion string -} - -// ConfigureFunc is the function used to configure a Provider. -// -// The interface{} value returned by this function is stored and passed into -// the subsequent resources as the meta parameter. This return value is -// usually used to pass along a configured API client, a configuration -// structure, etc. -type ConfigureFunc func(*ResourceData) (interface{}, error) - -// InternalValidate should be called to validate the structure -// of the provider. -// -// This should be called in a unit test for any provider to verify -// before release that a provider is properly configured for use with -// this library. -func (p *Provider) InternalValidate() error { - if p == nil { - return errors.New("provider is nil") - } - - var validationErrors error - sm := schemaMap(p.Schema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - - // Provider-specific checks - for k, _ := range sm { - if isReservedProviderFieldName(k) { - return fmt.Errorf("%s is a reserved field name for a provider", k) - } - } - - for k, r := range p.ResourcesMap { - if err := r.InternalValidate(nil, true); err != nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) - } - } - - for k, r := range p.DataSourcesMap { - if err := r.InternalValidate(nil, false); err != nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) - } - } - - return validationErrors -} - -func isReservedProviderFieldName(name string) bool { - for _, reservedName := range ReservedProviderFields { - if name == reservedName { - return true - } - } - return false -} - -// Meta returns the metadata associated with this provider that was -// returned by the Configure call. It will be nil until Configure is called. -func (p *Provider) Meta() interface{} { - return p.meta -} - -// SetMeta can be used to forcefully set the Meta object of the provider. -// Note that if Configure is called the return value will override anything -// set here. -func (p *Provider) SetMeta(v interface{}) { - p.meta = v -} - -// Stopped reports whether the provider has been stopped or not. -func (p *Provider) Stopped() bool { - ctx := p.StopContext() - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// StopCh returns a channel that is closed once the provider is stopped. -func (p *Provider) StopContext() context.Context { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - return p.stopCtx -} - -func (p *Provider) stopInit() { - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) -} - -// Stop implementation of terraform.ResourceProvider interface. -func (p *Provider) Stop() error { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtxCancel() - return nil -} - -// TestReset resets any state stored in the Provider, and will call TestReset -// on Meta if it implements the TestProvider interface. -// This may be used to reset the schema.Provider at the start of a test, and is -// automatically called by resource.Test. -func (p *Provider) TestReset() error { - p.stopInit() - if p.MetaReset != nil { - return p.MetaReset() - } - return nil -} - -// GetSchema implementation of terraform.ResourceProvider interface -func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { - resourceTypes := map[string]*configschema.Block{} - dataSources := map[string]*configschema.Block{} - - for _, name := range req.ResourceTypes { - if r, exists := p.ResourcesMap[name]; exists { - resourceTypes[name] = r.CoreConfigSchema() - } - } - for _, name := range req.DataSources { - if r, exists := p.DataSourcesMap[name]; exists { - dataSources[name] = r.CoreConfigSchema() - } - } - - return &terraform.ProviderSchema{ - Provider: schemaMap(p.Schema).CoreConfigSchema(), - ResourceTypes: resourceTypes, - DataSources: dataSources, - }, nil -} - -// Input implementation of terraform.ResourceProvider interface. -func (p *Provider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return schemaMap(p.Schema).Input(input, c) -} - -// Validate implementation of terraform.ResourceProvider interface. -func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - if err := p.InternalValidate(); err != nil { - return nil, []error{fmt.Errorf( - "Internal validation of the provider failed! This is always a bug\n"+ - "with the provider itself, and not a user issue. Please report\n"+ - "this bug:\n\n%s", err)} - } - - return schemaMap(p.Schema).Validate(c) -} - -// ValidateResource implementation of terraform.ResourceProvider interface. -func (p *Provider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := p.ResourcesMap[t] - if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support resource: %s", t)} - } - - return r.Validate(c) -} - -// Configure implementation of terraform.ResourceProvider interface. -func (p *Provider) Configure(c *terraform.ResourceConfig) error { - // No configuration - if p.ConfigureFunc == nil { - return nil - } - - sm := schemaMap(p.Schema) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c, nil, p.meta, true) - if err != nil { - return err - } - - data, err := sm.Data(nil, diff) - if err != nil { - return err - } - - meta, err := p.ConfigureFunc(data) - if err != nil { - return err - } - - p.meta = meta - return nil -} - -// Apply implementation of terraform.ResourceProvider interface. -func (p *Provider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Apply(s, d, p.meta) -} - -// Diff implementation of terraform.ResourceProvider interface. -func (p *Provider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, p.meta) -} - -// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't -// attempt to calculate ignore_changes. -func (p *Provider) SimpleDiff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.simpleDiff(s, c, p.meta) -} - -// Refresh implementation of terraform.ResourceProvider interface. -func (p *Provider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Refresh(s, p.meta) -} - -// Resources implementation of terraform.ResourceProvider interface. -func (p *Provider) Resources() []terraform.ResourceType { - keys := make([]string, 0, len(p.ResourcesMap)) - for k := range p.ResourcesMap { - keys = append(keys, k) - } - sort.Strings(keys) - - result := make([]terraform.ResourceType, 0, len(keys)) - for _, k := range keys { - resource := p.ResourcesMap[k] - - // This isn't really possible (it'd fail InternalValidate), but - // we do it anyways to avoid a panic. - if resource == nil { - resource = &Resource{} - } - - result = append(result, terraform.ResourceType{ - Name: k, - Importable: resource.Importer != nil, - - // Indicates that a provider is compiled against a new enough - // version of core to support the GetSchema method. - SchemaAvailable: true, - }) - } - - return result -} - -func (p *Provider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - // Find the resource - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - // If it doesn't support import, error - if r.Importer == nil { - return nil, fmt.Errorf("resource %s doesn't support import", info.Type) - } - - // Create the data - data := r.Data(nil) - data.SetId(id) - data.SetType(info.Type) - - // Call the import function - results := []*ResourceData{data} - if r.Importer.State != nil { - var err error - results, err = r.Importer.State(data, p.meta) - if err != nil { - return nil, err - } - } - - // Convert the results to InstanceState values and return it - states := make([]*terraform.InstanceState, len(results)) - for i, r := range results { - states[i] = r.State() - } - - // Verify that all are non-nil. If there are any nil the error - // isn't obvious so we circumvent that with a friendlier error. - for _, s := range states { - if s == nil { - return nil, fmt.Errorf( - "nil entry in ImportState results. This is always a bug with\n" + - "the resource that is being imported. Please report this as\n" + - "a bug to Terraform.") - } - } - - return states, nil -} - -// ValidateDataSource implementation of terraform.ResourceProvider interface. -func (p *Provider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := p.DataSourcesMap[t] - if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support data source: %s", t)} - } - - return r.Validate(c) -} - -// ReadDataDiff implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.Diff(nil, c, p.meta) -} - -// RefreshData implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.ReadDataApply(d, p.meta) -} - -// DataSources implementation of terraform.ResourceProvider interface. -func (p *Provider) DataSources() []terraform.DataSource { - keys := make([]string, 0, len(p.DataSourcesMap)) - for k, _ := range p.DataSourcesMap { - keys = append(keys, k) - } - sort.Strings(keys) - - result := make([]terraform.DataSource, 0, len(keys)) - for _, k := range keys { - result = append(result, terraform.DataSource{ - Name: k, - - // Indicates that a provider is compiled against a new enough - // version of core to support the GetSchema method. - SchemaAvailable: true, - }) - } - - return result -} diff --git a/internal/legacy/helper/schema/provider_test.go b/internal/legacy/helper/schema/provider_test.go deleted file mode 100644 index 1b176bc0d259..000000000000 --- a/internal/legacy/helper/schema/provider_test.go +++ /dev/null @@ -1,620 +0,0 @@ -package schema - -import ( - "fmt" - "reflect" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/legacy/terraform" -) - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = new(Provider) -} - -func TestProviderGetSchema(t *testing.T) { - // This functionality is already broadly tested in core_schema_test.go, - // so this is just to ensure that the call passes through correctly. - p := &Provider{ - Schema: map[string]*Schema{ - "bar": { - Type: TypeString, - Required: true, - }, - }, - ResourcesMap: map[string]*Resource{ - "foo": &Resource{ - Schema: map[string]*Schema{ - "bar": { - Type: TypeString, - Required: true, - }, - }, - }, - }, - DataSourcesMap: map[string]*Resource{ - "baz": &Resource{ - Schema: map[string]*Schema{ - "bur": { - Type: TypeString, - Required: true, - }, - }, - }, - }, - } - - want := &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": &configschema.Attribute{ - Type: cty.String, - Required: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{}, - }, - ResourceTypes: map[string]*configschema.Block{ - "foo": testResource(&configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": &configschema.Attribute{ - Type: cty.String, - Required: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{}, - }), - }, - DataSources: map[string]*configschema.Block{ - "baz": testResource(&configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bur": &configschema.Attribute{ - Type: cty.String, - Required: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{}, - }), - }, - } - got, err := p.GetSchema(&terraform.ProviderSchemaRequest{ - ResourceTypes: []string{"foo", "bar"}, - DataSources: []string{"baz", "bar"}, - }) - if err != nil { - t.Fatalf("unexpected error %s", err) - } - - if !cmp.Equal(got, want, equateEmpty, typeComparer) { - t.Error("wrong result:\n", cmp.Diff(got, want, equateEmpty, typeComparer)) - } -} - -func TestProviderConfigure(t *testing.T) { - cases := []struct { - P *Provider - Config map[string]interface{} - Err bool - }{ - { - P: &Provider{}, - Config: nil, - Err: false, - }, - - { - P: &Provider{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - ConfigureFunc: func(d *ResourceData) (interface{}, error) { - if d.Get("foo").(int) == 42 { - return nil, nil - } - - return nil, fmt.Errorf("nope") - }, - }, - Config: map[string]interface{}{ - "foo": 42, - }, - Err: false, - }, - - { - P: &Provider{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - ConfigureFunc: func(d *ResourceData) (interface{}, error) { - if d.Get("foo").(int) == 42 { - return nil, nil - } - - return nil, fmt.Errorf("nope") - }, - }, - Config: map[string]interface{}{ - "foo": 52, - }, - Err: true, - }, - } - - for i, tc := range cases { - c := terraform.NewResourceConfigRaw(tc.Config) - err := tc.P.Configure(c) - if err != nil != tc.Err { - t.Fatalf("%d: %s", i, err) - } - } -} - -func TestProviderResources(t *testing.T) { - cases := []struct { - P *Provider - Result []terraform.ResourceType - }{ - { - P: &Provider{}, - Result: []terraform.ResourceType{}, - }, - - { - P: &Provider{ - ResourcesMap: map[string]*Resource{ - "foo": nil, - "bar": nil, - }, - }, - Result: []terraform.ResourceType{ - terraform.ResourceType{Name: "bar", SchemaAvailable: true}, - terraform.ResourceType{Name: "foo", SchemaAvailable: true}, - }, - }, - - { - P: &Provider{ - ResourcesMap: map[string]*Resource{ - "foo": nil, - "bar": &Resource{Importer: &ResourceImporter{}}, - "baz": nil, - }, - }, - Result: []terraform.ResourceType{ - terraform.ResourceType{Name: "bar", Importable: true, SchemaAvailable: true}, - terraform.ResourceType{Name: "baz", SchemaAvailable: true}, - terraform.ResourceType{Name: "foo", SchemaAvailable: true}, - }, - }, - } - - for i, tc := range cases { - actual := tc.P.Resources() - if !reflect.DeepEqual(actual, tc.Result) { - t.Fatalf("%d: %#v", i, actual) - } - } -} - -func TestProviderDataSources(t *testing.T) { - cases := []struct { - P *Provider - Result []terraform.DataSource - }{ - { - P: &Provider{}, - Result: []terraform.DataSource{}, - }, - - { - P: &Provider{ - DataSourcesMap: map[string]*Resource{ - "foo": nil, - "bar": nil, - }, - }, - Result: []terraform.DataSource{ - terraform.DataSource{Name: "bar", SchemaAvailable: true}, - terraform.DataSource{Name: "foo", SchemaAvailable: true}, - }, - }, - } - - for i, tc := range cases { - actual := tc.P.DataSources() - if !reflect.DeepEqual(actual, tc.Result) { - t.Fatalf("%d: got %#v; want %#v", i, actual, tc.Result) - } - } -} - -func TestProviderValidate(t *testing.T) { - cases := []struct { - P *Provider - Config map[string]interface{} - Err bool - }{ - { - P: &Provider{ - Schema: map[string]*Schema{ - "foo": &Schema{}, - }, - }, - Config: nil, - Err: true, - }, - } - - for i, tc := range cases { - c := terraform.NewResourceConfigRaw(tc.Config) - _, es := tc.P.Validate(c) - if len(es) > 0 != tc.Err { - t.Fatalf("%d: %#v", i, es) - } - } -} - -func TestProviderDiff_legacyTimeoutType(t *testing.T) { - p := &Provider{ - ResourcesMap: map[string]*Resource{ - "blah": &Resource{ - Schema: map[string]*Schema{ - "foo": { - Type: TypeInt, - Optional: true, - }, - }, - Timeouts: &ResourceTimeout{ - Create: DefaultTimeout(10 * time.Minute), - }, - }, - }, - } - - invalidCfg := map[string]interface{}{ - "foo": 42, - "timeouts": []interface{}{ - map[string]interface{}{ - "create": "40m", - }, - }, - } - ic := terraform.NewResourceConfigRaw(invalidCfg) - _, err := p.Diff( - &terraform.InstanceInfo{ - Type: "blah", - }, - nil, - ic, - ) - if err != nil { - t.Fatal(err) - } -} - -func TestProviderDiff_timeoutInvalidValue(t *testing.T) { - p := &Provider{ - ResourcesMap: map[string]*Resource{ - "blah": &Resource{ - Schema: map[string]*Schema{ - "foo": { - Type: TypeInt, - Optional: true, - }, - }, - Timeouts: &ResourceTimeout{ - Create: DefaultTimeout(10 * time.Minute), - }, - }, - }, - } - - invalidCfg := map[string]interface{}{ - "foo": 42, - "timeouts": map[string]interface{}{ - "create": "invalid", - }, - } - ic := terraform.NewResourceConfigRaw(invalidCfg) - _, err := p.Diff( - &terraform.InstanceInfo{ - Type: "blah", - }, - nil, - ic, - ) - if err == nil { - t.Fatal("Expected provider.Diff to fail with invalid timeout value") - } - expectedErrMsg := `time: invalid duration "invalid"` - if !strings.Contains(err.Error(), expectedErrMsg) { - t.Fatalf("Unexpected error message: %q\nExpected message to contain %q", - err.Error(), - expectedErrMsg) - } -} - -func TestProviderValidateResource(t *testing.T) { - cases := []struct { - P *Provider - Type string - Config map[string]interface{} - Err bool - }{ - { - P: &Provider{}, - Type: "foo", - Config: nil, - Err: true, - }, - - { - P: &Provider{ - ResourcesMap: map[string]*Resource{ - "foo": &Resource{}, - }, - }, - Type: "foo", - Config: nil, - Err: false, - }, - } - - for i, tc := range cases { - c := terraform.NewResourceConfigRaw(tc.Config) - _, es := tc.P.ValidateResource(tc.Type, c) - if len(es) > 0 != tc.Err { - t.Fatalf("%d: %#v", i, es) - } - } -} - -func TestProviderImportState_default(t *testing.T) { - p := &Provider{ - ResourcesMap: map[string]*Resource{ - "foo": &Resource{ - Importer: &ResourceImporter{}, - }, - }, - } - - states, err := p.ImportState(&terraform.InstanceInfo{ - Type: "foo", - }, "bar") - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(states) != 1 { - t.Fatalf("bad: %#v", states) - } - if states[0].ID != "bar" { - t.Fatalf("bad: %#v", states) - } -} - -func TestProviderImportState_setsId(t *testing.T) { - var val string - stateFunc := func(d *ResourceData, meta interface{}) ([]*ResourceData, error) { - val = d.Id() - return []*ResourceData{d}, nil - } - - p := &Provider{ - ResourcesMap: map[string]*Resource{ - "foo": &Resource{ - Importer: &ResourceImporter{ - State: stateFunc, - }, - }, - }, - } - - _, err := p.ImportState(&terraform.InstanceInfo{ - Type: "foo", - }, "bar") - if err != nil { - t.Fatalf("err: %s", err) - } - - if val != "bar" { - t.Fatal("should set id") - } -} - -func TestProviderImportState_setsType(t *testing.T) { - var tVal string - stateFunc := func(d *ResourceData, meta interface{}) ([]*ResourceData, error) { - d.SetId("foo") - tVal = d.State().Ephemeral.Type - return []*ResourceData{d}, nil - } - - p := &Provider{ - ResourcesMap: map[string]*Resource{ - "foo": &Resource{ - Importer: &ResourceImporter{ - State: stateFunc, - }, - }, - }, - } - - _, err := p.ImportState(&terraform.InstanceInfo{ - Type: "foo", - }, "bar") - if err != nil { - t.Fatalf("err: %s", err) - } - - if tVal != "foo" { - t.Fatal("should set type") - } -} - -func TestProviderMeta(t *testing.T) { - p := new(Provider) - if v := p.Meta(); v != nil { - t.Fatalf("bad: %#v", v) - } - - expected := 42 - p.SetMeta(42) - if v := p.Meta(); !reflect.DeepEqual(v, expected) { - t.Fatalf("bad: %#v", v) - } -} - -func TestProviderStop(t *testing.T) { - var p Provider - - if p.Stopped() { - t.Fatal("should not be stopped") - } - - // Verify stopch blocks - ch := p.StopContext().Done() - select { - case <-ch: - t.Fatal("should not be stopped") - case <-time.After(10 * time.Millisecond): - } - - // Stop it - if err := p.Stop(); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify - if !p.Stopped() { - t.Fatal("should be stopped") - } - - select { - case <-ch: - case <-time.After(10 * time.Millisecond): - t.Fatal("should be stopped") - } -} - -func TestProviderStop_stopFirst(t *testing.T) { - var p Provider - - // Stop it - if err := p.Stop(); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify - if !p.Stopped() { - t.Fatal("should be stopped") - } - - select { - case <-p.StopContext().Done(): - case <-time.After(10 * time.Millisecond): - t.Fatal("should be stopped") - } -} - -func TestProviderReset(t *testing.T) { - var p Provider - stopCtx := p.StopContext() - p.MetaReset = func() error { - stopCtx = p.StopContext() - return nil - } - - // cancel the current context - p.Stop() - - if err := p.TestReset(); err != nil { - t.Fatal(err) - } - - // the first context should have been replaced - if err := stopCtx.Err(); err != nil { - t.Fatal(err) - } - - // we should not get a canceled context here either - if err := p.StopContext().Err(); err != nil { - t.Fatal(err) - } -} - -func TestProvider_InternalValidate(t *testing.T) { - cases := []struct { - P *Provider - ExpectedErr error - }{ - { - P: &Provider{ - Schema: map[string]*Schema{ - "foo": { - Type: TypeBool, - Optional: true, - }, - }, - }, - ExpectedErr: nil, - }, - { // Reserved resource fields should be allowed in provider block - P: &Provider{ - Schema: map[string]*Schema{ - "provisioner": { - Type: TypeString, - Optional: true, - }, - "count": { - Type: TypeInt, - Optional: true, - }, - }, - }, - ExpectedErr: nil, - }, - { // Reserved provider fields should not be allowed - P: &Provider{ - Schema: map[string]*Schema{ - "alias": { - Type: TypeString, - Optional: true, - }, - }, - }, - ExpectedErr: fmt.Errorf("%s is a reserved field name for a provider", "alias"), - }, - } - - for i, tc := range cases { - err := tc.P.InternalValidate() - if tc.ExpectedErr == nil { - if err != nil { - t.Fatalf("%d: Error returned (expected no error): %s", i, err) - } - continue - } - if tc.ExpectedErr != nil && err == nil { - t.Fatalf("%d: Expected error (%s), but no error returned", i, tc.ExpectedErr) - } - if err.Error() != tc.ExpectedErr.Error() { - t.Fatalf("%d: Errors don't match. Expected: %#v Given: %#v", i, tc.ExpectedErr, err) - } - } -} diff --git a/internal/legacy/helper/schema/provisioner.go b/internal/legacy/helper/schema/provisioner.go deleted file mode 100644 index 5c2cce2b0bf5..000000000000 --- a/internal/legacy/helper/schema/provisioner.go +++ /dev/null @@ -1,205 +0,0 @@ -package schema - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/legacy/terraform" -) - -// Provisioner represents a resource provisioner in Terraform and properly -// implements all of the ResourceProvisioner API. -// -// This higher level structure makes it much easier to implement a new or -// custom provisioner for Terraform. -// -// The function callbacks for this structure are all passed a context object. -// This context object has a number of pre-defined values that can be accessed -// via the global functions defined in context.go. -type Provisioner struct { - // ConnSchema is the schema for the connection settings for this - // provisioner. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - // - // NOTE: The value of connection keys can only be strings for now. - ConnSchema map[string]*Schema - - // Schema is the schema for the usage of this provisioner. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - Schema map[string]*Schema - - // ApplyFunc is the function for executing the provisioner. This is required. - // It is given a context. See the Provisioner struct docs for more - // information. - ApplyFunc func(ctx context.Context) error - - // ValidateFunc is a function for extended validation. This is optional - // and should be used when individual field validation is not enough. - ValidateFunc func(*terraform.ResourceConfig) ([]string, []error) - - stopCtx context.Context - stopCtxCancel context.CancelFunc - stopOnce sync.Once -} - -// Keys that can be used to access data in the context parameters for -// Provisioners. -var ( - connDataInvalid = contextKey("data invalid") - - // This returns a *ResourceData for the connection information. - // Guaranteed to never be nil. - ProvConnDataKey = contextKey("provider conn data") - - // This returns a *ResourceData for the config information. - // Guaranteed to never be nil. - ProvConfigDataKey = contextKey("provider config data") - - // This returns a terraform.UIOutput. Guaranteed to never be nil. - ProvOutputKey = contextKey("provider output") - - // This returns the raw InstanceState passed to Apply. Guaranteed to - // be set, but may be nil. - ProvRawStateKey = contextKey("provider raw state") -) - -// InternalValidate should be called to validate the structure -// of the provisioner. -// -// This should be called in a unit test to verify before release that this -// structure is properly configured for use. -func (p *Provisioner) InternalValidate() error { - if p == nil { - return errors.New("provisioner is nil") - } - - var validationErrors error - { - sm := schemaMap(p.ConnSchema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - } - - { - sm := schemaMap(p.Schema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - } - - if p.ApplyFunc == nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf( - "ApplyFunc must not be nil")) - } - - return validationErrors -} - -// StopContext returns a context that checks whether a provisioner is stopped. -func (p *Provisioner) StopContext() context.Context { - p.stopOnce.Do(p.stopInit) - return p.stopCtx -} - -func (p *Provisioner) stopInit() { - p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) -} - -// Stop implementation of terraform.ResourceProvisioner interface. -func (p *Provisioner) Stop() error { - p.stopOnce.Do(p.stopInit) - p.stopCtxCancel() - return nil -} - -// GetConfigSchema implementation of terraform.ResourceProvisioner interface. -func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) { - return schemaMap(p.Schema).CoreConfigSchema(), nil -} - -// Apply implementation of terraform.ResourceProvisioner interface. -func (p *Provisioner) Apply( - o terraform.UIOutput, - s *terraform.InstanceState, - c *terraform.ResourceConfig) error { - var connData, configData *ResourceData - - { - // We first need to turn the connection information into a - // terraform.ResourceConfig so that we can use that type to more - // easily build a ResourceData structure. We do this by simply treating - // the conn info as configuration input. - raw := make(map[string]interface{}) - if s != nil { - for k, v := range s.Ephemeral.ConnInfo { - raw[k] = v - } - } - - c := terraform.NewResourceConfigRaw(raw) - sm := schemaMap(p.ConnSchema) - diff, err := sm.Diff(nil, c, nil, nil, true) - if err != nil { - return err - } - connData, err = sm.Data(nil, diff) - if err != nil { - return err - } - } - - { - // Build the configuration data. Doing this requires making a "diff" - // even though that's never used. We use that just to get the correct types. - configMap := schemaMap(p.Schema) - diff, err := configMap.Diff(nil, c, nil, nil, true) - if err != nil { - return err - } - configData, err = configMap.Data(nil, diff) - if err != nil { - return err - } - } - - // Build the context and call the function - ctx := p.StopContext() - ctx = context.WithValue(ctx, ProvConnDataKey, connData) - ctx = context.WithValue(ctx, ProvConfigDataKey, configData) - ctx = context.WithValue(ctx, ProvOutputKey, o) - ctx = context.WithValue(ctx, ProvRawStateKey, s) - return p.ApplyFunc(ctx) -} - -// Validate implements the terraform.ResourceProvisioner interface. -func (p *Provisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) { - if err := p.InternalValidate(); err != nil { - return nil, []error{fmt.Errorf( - "Internal validation of the provisioner failed! This is always a bug\n"+ - "with the provisioner itself, and not a user issue. Please report\n"+ - "this bug:\n\n%s", err)} - } - - if p.Schema != nil { - w, e := schemaMap(p.Schema).Validate(c) - ws = append(ws, w...) - es = append(es, e...) - } - - if p.ValidateFunc != nil { - w, e := p.ValidateFunc(c) - ws = append(ws, w...) - es = append(es, e...) - } - - return ws, es -} diff --git a/internal/legacy/helper/schema/provisioner_test.go b/internal/legacy/helper/schema/provisioner_test.go deleted file mode 100644 index 228dacd72c62..000000000000 --- a/internal/legacy/helper/schema/provisioner_test.go +++ /dev/null @@ -1,334 +0,0 @@ -package schema - -import ( - "context" - "fmt" - "reflect" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/legacy/terraform" -) - -func TestProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = new(Provisioner) -} - -func noopApply(ctx context.Context) error { - return nil -} - -func TestProvisionerValidate(t *testing.T) { - cases := []struct { - Name string - P *Provisioner - Config map[string]interface{} - Err bool - Warns []string - }{ - { - Name: "No ApplyFunc", - P: &Provisioner{}, - Config: nil, - Err: true, - }, - { - Name: "Incorrect schema", - P: &Provisioner{ - Schema: map[string]*Schema{ - "foo": {}, - }, - ApplyFunc: noopApply, - }, - Config: nil, - Err: true, - }, - { - "Basic required field", - &Provisioner{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Required: true, - Type: TypeString, - }, - }, - ApplyFunc: noopApply, - }, - nil, - true, - nil, - }, - - { - "Basic required field set", - &Provisioner{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Required: true, - Type: TypeString, - }, - }, - ApplyFunc: noopApply, - }, - map[string]interface{}{ - "foo": "bar", - }, - false, - nil, - }, - { - Name: "Warning from property validation", - P: &Provisioner{ - Schema: map[string]*Schema{ - "foo": { - Type: TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - ws = append(ws, "Simple warning from property validation") - return - }, - }, - }, - ApplyFunc: noopApply, - }, - Config: map[string]interface{}{ - "foo": "", - }, - Err: false, - Warns: []string{"Simple warning from property validation"}, - }, - { - Name: "No schema", - P: &Provisioner{ - Schema: nil, - ApplyFunc: noopApply, - }, - Config: nil, - Err: false, - }, - { - Name: "Warning from provisioner ValidateFunc", - P: &Provisioner{ - Schema: nil, - ApplyFunc: noopApply, - ValidateFunc: func(*terraform.ResourceConfig) (ws []string, errors []error) { - ws = append(ws, "Simple warning from provisioner ValidateFunc") - return - }, - }, - Config: nil, - Err: false, - Warns: []string{"Simple warning from provisioner ValidateFunc"}, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - c := terraform.NewResourceConfigRaw(tc.Config) - ws, es := tc.P.Validate(c) - if len(es) > 0 != tc.Err { - t.Fatalf("%d: %#v %s", i, es, es) - } - if (tc.Warns != nil || len(ws) != 0) && !reflect.DeepEqual(ws, tc.Warns) { - t.Fatalf("%d: warnings mismatch, actual: %#v", i, ws) - } - }) - } -} - -func TestProvisionerApply(t *testing.T) { - cases := []struct { - Name string - P *Provisioner - Conn map[string]string - Config map[string]interface{} - Err bool - }{ - { - "Basic config", - &Provisioner{ - ConnSchema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - ApplyFunc: func(ctx context.Context) error { - cd := ctx.Value(ProvConnDataKey).(*ResourceData) - d := ctx.Value(ProvConfigDataKey).(*ResourceData) - if d.Get("foo").(int) != 42 { - return fmt.Errorf("bad config data") - } - if cd.Get("foo").(string) != "bar" { - return fmt.Errorf("bad conn data") - } - - return nil - }, - }, - map[string]string{ - "foo": "bar", - }, - map[string]interface{}{ - "foo": 42, - }, - false, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - c := terraform.NewResourceConfigRaw(tc.Config) - - state := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: tc.Conn, - }, - } - - err := tc.P.Apply(nil, state, c) - if err != nil != tc.Err { - t.Fatalf("%d: %s", i, err) - } - }) - } -} - -func TestProvisionerApply_nilState(t *testing.T) { - p := &Provisioner{ - ConnSchema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - ApplyFunc: func(ctx context.Context) error { - return nil - }, - } - - conf := map[string]interface{}{ - "foo": 42, - } - - c := terraform.NewResourceConfigRaw(conf) - err := p.Apply(nil, nil, c) - if err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvisionerStop(t *testing.T) { - var p Provisioner - - // Verify stopch blocks - ch := p.StopContext().Done() - select { - case <-ch: - t.Fatal("should not be stopped") - case <-time.After(10 * time.Millisecond): - } - - // Stop it - if err := p.Stop(); err != nil { - t.Fatalf("err: %s", err) - } - - select { - case <-ch: - case <-time.After(10 * time.Millisecond): - t.Fatal("should be stopped") - } -} - -func TestProvisionerStop_apply(t *testing.T) { - p := &Provisioner{ - ConnSchema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - ApplyFunc: func(ctx context.Context) error { - <-ctx.Done() - return nil - }, - } - - conn := map[string]string{ - "foo": "bar", - } - - conf := map[string]interface{}{ - "foo": 42, - } - - c := terraform.NewResourceConfigRaw(conf) - state := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: conn, - }, - } - - // Run the apply in a goroutine - doneCh := make(chan struct{}) - go func() { - p.Apply(nil, state, c) - close(doneCh) - }() - - // Should block - select { - case <-doneCh: - t.Fatal("should not be done") - case <-time.After(10 * time.Millisecond): - } - - // Stop! - p.Stop() - - select { - case <-doneCh: - case <-time.After(10 * time.Millisecond): - t.Fatal("should be done") - } -} - -func TestProvisionerStop_stopFirst(t *testing.T) { - var p Provisioner - - // Stop it - if err := p.Stop(); err != nil { - t.Fatalf("err: %s", err) - } - - select { - case <-p.StopContext().Done(): - case <-time.After(10 * time.Millisecond): - t.Fatal("should be stopped") - } -} diff --git a/internal/legacy/helper/schema/resource.go b/internal/legacy/helper/schema/resource.go deleted file mode 100644 index 28fa54e38c61..000000000000 --- a/internal/legacy/helper/schema/resource.go +++ /dev/null @@ -1,842 +0,0 @@ -package schema - -import ( - "errors" - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/internal/legacy/terraform" - "github.com/zclconf/go-cty/cty" -) - -var ReservedDataSourceFields = []string{ - "connection", - "count", - "depends_on", - "lifecycle", - "provider", - "provisioner", -} - -var ReservedResourceFields = []string{ - "connection", - "count", - "depends_on", - "id", - "lifecycle", - "provider", - "provisioner", -} - -// Resource represents a thing in Terraform that has a set of configurable -// attributes and a lifecycle (create, read, update, delete). -// -// The Resource schema is an abstraction that allows provider writers to -// worry only about CRUD operations while off-loading validation, diff -// generation, etc. to this higher level library. -// -// In spite of the name, this struct is not used only for terraform resources, -// but also for data sources. In the case of data sources, the Create, -// Update and Delete functions must not be provided. -type Resource struct { - // Schema is the schema for the configuration of this resource. - // - // The keys of this map are the configuration keys, and the values - // describe the schema of the configuration value. - // - // The schema is used to represent both configurable data as well - // as data that might be computed in the process of creating this - // resource. - Schema map[string]*Schema - - // SchemaVersion is the version number for this resource's Schema - // definition. The current SchemaVersion stored in the state for each - // resource. Provider authors can increment this version number - // when Schema semantics change. If the State's SchemaVersion is less than - // the current SchemaVersion, the InstanceState is yielded to the - // MigrateState callback, where the provider can make whatever changes it - // needs to update the state to be compatible to the latest version of the - // Schema. - // - // When unset, SchemaVersion defaults to 0, so provider authors can start - // their Versioning at any integer >= 1 - SchemaVersion int - - // MigrateState is deprecated and any new changes to a resource's schema - // should be handled by StateUpgraders. Existing MigrateState implementations - // should remain for compatibility with existing state. MigrateState will - // still be called if the stored SchemaVersion is less than the - // first version of the StateUpgraders. - // - // MigrateState is responsible for updating an InstanceState with an old - // version to the format expected by the current version of the Schema. - // - // It is called during Refresh if the State's stored SchemaVersion is less - // than the current SchemaVersion of the Resource. - // - // The function is yielded the state's stored SchemaVersion and a pointer to - // the InstanceState that needs updating, as well as the configured - // provider's configured meta interface{}, in case the migration process - // needs to make any remote API calls. - MigrateState StateMigrateFunc - - // StateUpgraders contains the functions responsible for upgrading an - // existing state with an old schema version to a newer schema. It is - // called specifically by Terraform when the stored schema version is less - // than the current SchemaVersion of the Resource. - // - // StateUpgraders map specific schema versions to a StateUpgrader - // function. The registered versions are expected to be ordered, - // consecutive values. The initial value may be greater than 0 to account - // for legacy schemas that weren't recorded and can be handled by - // MigrateState. - StateUpgraders []StateUpgrader - - // The functions below are the CRUD operations for this resource. - // - // The only optional operation is Update. If Update is not implemented, - // then updates will not be supported for this resource. - // - // The ResourceData parameter in the functions below are used to - // query configuration and changes for the resource as well as to set - // the ID, computed data, etc. - // - // The interface{} parameter is the result of the ConfigureFunc in - // the provider for this resource. If the provider does not define - // a ConfigureFunc, this will be nil. This parameter should be used - // to store API clients, configuration structures, etc. - // - // If any errors occur during each of the operation, an error should be - // returned. If a resource was partially updated, be careful to enable - // partial state mode for ResourceData and use it accordingly. - // - // Exists is a function that is called to check if a resource still - // exists. If this returns false, then this will affect the diff - // accordingly. If this function isn't set, it will not be called. You - // can also signal existence in the Read method by calling d.SetId("") - // if the Resource is no longer present and should be removed from state. - // The *ResourceData passed to Exists should _not_ be modified. - Create CreateFunc - Read ReadFunc - Update UpdateFunc - Delete DeleteFunc - Exists ExistsFunc - - // CustomizeDiff is a custom function for working with the diff that - // Terraform has created for this resource - it can be used to customize the - // diff that has been created, diff values not controlled by configuration, - // or even veto the diff altogether and abort the plan. It is passed a - // *ResourceDiff, a structure similar to ResourceData but lacking most write - // functions like Set, while introducing new functions that work with the - // diff such as SetNew, SetNewComputed, and ForceNew. - // - // The phases Terraform runs this in, and the state available via functions - // like Get and GetChange, are as follows: - // - // * New resource: One run with no state - // * Existing resource: One run with state - // * Existing resource, forced new: One run with state (before ForceNew), - // then one run without state (as if new resource) - // * Tainted resource: No runs (custom diff logic is skipped) - // * Destroy: No runs (standard diff logic is skipped on destroy diffs) - // - // This function needs to be resilient to support all scenarios. - // - // If this function needs to access external API resources, remember to flag - // the RequiresRefresh attribute mentioned below to ensure that - // -refresh=false is blocked when running plan or apply, as this means that - // this resource requires refresh-like behaviour to work effectively. - // - // For the most part, only computed fields can be customized by this - // function. - // - // This function is only allowed on regular resources (not data sources). - CustomizeDiff CustomizeDiffFunc - - // Importer is the ResourceImporter implementation for this resource. - // If this is nil, then this resource does not support importing. If - // this is non-nil, then it supports importing and ResourceImporter - // must be validated. The validity of ResourceImporter is verified - // by InternalValidate on Resource. - Importer *ResourceImporter - - // If non-empty, this string is emitted as a warning during Validate. - DeprecationMessage string - - // Timeouts allow users to specify specific time durations in which an - // operation should time out, to allow them to extend an action to suit their - // usage. For example, a user may specify a large Creation timeout for their - // AWS RDS Instance due to it's size, or restoring from a snapshot. - // Resource implementors must enable Timeout support by adding the allowed - // actions (Create, Read, Update, Delete, Default) to the Resource struct, and - // accessing them in the matching methods. - Timeouts *ResourceTimeout -} - -// ShimInstanceStateFromValue converts a cty.Value to a -// terraform.InstanceState. -func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { - // Get the raw shimmed value. While this is correct, the set hashes don't - // match those from the Schema. - s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) - - // We now rebuild the state through the ResourceData, so that the set indexes - // match what helper/schema expects. - data, err := schemaMap(r.Schema).Data(s, nil) - if err != nil { - return nil, err - } - - s = data.State() - if s == nil { - s = &terraform.InstanceState{} - } - return s, nil -} - -// See Resource documentation. -type CreateFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type ReadFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type UpdateFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type DeleteFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type ExistsFunc func(*ResourceData, interface{}) (bool, error) - -// See Resource documentation. -type StateMigrateFunc func( - int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) - -type StateUpgrader struct { - // Version is the version schema that this Upgrader will handle, converting - // it to Version+1. - Version int - - // Type describes the schema that this function can upgrade. Type is - // required to decode the schema if the state was stored in a legacy - // flatmap format. - Type cty.Type - - // Upgrade takes the JSON encoded state and the provider meta value, and - // upgrades the state one single schema version. The provided state is - // deocded into the default json types using a map[string]interface{}. It - // is up to the StateUpgradeFunc to ensure that the returned value can be - // encoded using the new schema. - Upgrade StateUpgradeFunc -} - -// See StateUpgrader -type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) - -// See Resource documentation. -type CustomizeDiffFunc func(*ResourceDiff, interface{}) error - -// Apply creates, updates, and/or deletes a resource. -func (r *Resource) Apply( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - data, err := schemaMap(r.Schema).Data(s, d) - if err != nil { - return s, err - } - if s != nil && data != nil { - data.providerMeta = s.ProviderMeta - } - - // Instance Diff shoould have the timeout info, need to copy it over to the - // ResourceData meta - rt := ResourceTimeout{} - if _, ok := d.Meta[TimeoutKey]; ok { - if err := rt.DiffDecode(d); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } else if s != nil { - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - } else { - log.Printf("[DEBUG] No meta timeoutkey found in Apply()") - } - data.timeouts = &rt - - if s == nil { - // The Terraform API dictates that this should never happen, but - // it doesn't hurt to be safe in this case. - s = new(terraform.InstanceState) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource since it is created - if err := r.Delete(data, meta); err != nil { - return r.recordCurrentSchemaVersion(data.State()), err - } - - // Make sure the ID is gone. - data.SetId("") - } - - // If we're only destroying, and not creating, then return - // now since we're done! - if !d.RequiresNew() { - return nil, nil - } - - // Reset the data to be stateless since we just destroyed - data, err = schemaMap(r.Schema).Data(nil, d) - // data was reset, need to re-apply the parsed timeouts - data.timeouts = &rt - if err != nil { - return nil, err - } - } - - err = nil - if data.Id() == "" { - // We're creating, it is a new resource. - data.MarkNewResource() - err = r.Create(data, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf("doesn't support update") - } - - err = r.Update(data, meta) - } - - return r.recordCurrentSchemaVersion(data.State()), err -} - -// Diff returns a diff of this resource. -func (r *Resource) Diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - t := &ResourceTimeout{} - err := t.ConfigDecode(r, c) - - if err != nil { - return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) - } - - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) - if err != nil { - return instanceDiff, err - } - - if instanceDiff != nil { - if err := t.DiffEncode(instanceDiff); err != nil { - log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) - } - } else { - log.Printf("[DEBUG] Instance Diff is nil in Diff()") - } - - return instanceDiff, err -} - -func (r *Resource) simpleDiff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) - if err != nil { - return instanceDiff, err - } - - if instanceDiff == nil { - instanceDiff = terraform.NewInstanceDiff() - } - - // Make sure the old value is set in each of the instance diffs. - // This was done by the RequiresNew logic in the full legacy Diff. - for k, attr := range instanceDiff.Attributes { - if attr == nil { - continue - } - if s != nil { - attr.Old = s.Attributes[k] - } - } - - return instanceDiff, nil -} - -// Validate validates the resource configuration against the schema. -func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { - warns, errs := schemaMap(r.Schema).Validate(c) - - if r.DeprecationMessage != "" { - warns = append(warns, r.DeprecationMessage) - } - - return warns, errs -} - -// ReadDataApply loads the data for a data source, given a diff that -// describes the configuration arguments and desired computed attributes. -func (r *Resource) ReadDataApply( - d *terraform.InstanceDiff, - meta interface{}, -) (*terraform.InstanceState, error) { - // Data sources are always built completely from scratch - // on each read, so the source state is always nil. - data, err := schemaMap(r.Schema).Data(nil, d) - if err != nil { - return nil, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - // Data sources can set an ID if they want, but they aren't - // required to; we'll provide a placeholder if they don't, - // to preserve the invariant that all resources have non-empty - // ids. - state.ID = "-" - } - - return r.recordCurrentSchemaVersion(state), err -} - -// RefreshWithoutUpgrade reads the instance state, but does not call -// MigrateState or the StateUpgraders, since those are now invoked in a -// separate API call. -// RefreshWithoutUpgrade is part of the new plugin shims. -func (r *Resource) RefreshWithoutUpgrade( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the ID is already somehow blank, it doesn't exist - if s.ID == "" { - return nil, nil - } - - rt := ResourceTimeout{} - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - - if r.Exists != nil { - // Make a copy of data so that if it is modified it doesn't - // affect our Read later. - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - - if err != nil { - return s, err - } - - if s != nil { - data.providerMeta = s.ProviderMeta - } - - exists, err := r.Exists(data, meta) - if err != nil { - return s, err - } - if !exists { - return nil, nil - } - } - - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - if err != nil { - return s, err - } - - if s != nil { - data.providerMeta = s.ProviderMeta - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - state = nil - } - - return r.recordCurrentSchemaVersion(state), err -} - -// Refresh refreshes the state of the resource. -func (r *Resource) Refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the ID is already somehow blank, it doesn't exist - if s.ID == "" { - return nil, nil - } - - rt := ResourceTimeout{} - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - - if r.Exists != nil { - // Make a copy of data so that if it is modified it doesn't - // affect our Read later. - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - - if err != nil { - return s, err - } - - exists, err := r.Exists(data, meta) - if err != nil { - return s, err - } - if !exists { - return nil, nil - } - } - - // there may be new StateUpgraders that need to be run - s, err := r.upgradeState(s, meta) - if err != nil { - return s, err - } - - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - if err != nil { - return s, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - state = nil - } - - return r.recordCurrentSchemaVersion(state), err -} - -func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - var err error - - needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) - migrate := needsMigration && r.MigrateState != nil - - if migrate { - s, err = r.MigrateState(stateSchemaVersion, s, meta) - if err != nil { - return s, err - } - } - - if len(r.StateUpgraders) == 0 { - return s, nil - } - - // If we ran MigrateState, then the stateSchemaVersion value is no longer - // correct. We can expect the first upgrade function to be the correct - // schema type version. - if migrate { - stateSchemaVersion = r.StateUpgraders[0].Version - } - - schemaType := r.CoreConfigSchema().ImpliedType() - // find the expected type to convert the state - for _, upgrader := range r.StateUpgraders { - if stateSchemaVersion == upgrader.Version { - schemaType = upgrader.Type - } - } - - // StateUpgraders only operate on the new JSON format state, so the state - // need to be converted. - stateVal, err := StateValueFromInstanceState(s, schemaType) - if err != nil { - return nil, err - } - - jsonState, err := StateValueToJSONMap(stateVal, schemaType) - if err != nil { - return nil, err - } - - for _, upgrader := range r.StateUpgraders { - if stateSchemaVersion != upgrader.Version { - continue - } - - jsonState, err = upgrader.Upgrade(jsonState, meta) - if err != nil { - return nil, err - } - stateSchemaVersion++ - } - - // now we need to re-flatmap the new state - stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) - if err != nil { - return nil, err - } - - return r.ShimInstanceStateFromValue(stateVal) -} - -// InternalValidate should be called to validate the structure -// of the resource. -// -// This should be called in a unit test for any resource to verify -// before release that a resource is properly configured for use with -// this library. -// -// Provider.InternalValidate() will automatically call this for all of -// the resources it manages, so you don't need to call this manually if it -// is part of a Provider. -func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { - if r == nil { - return errors.New("resource is nil") - } - - if !writable { - if r.Create != nil || r.Update != nil || r.Delete != nil { - return fmt.Errorf("must not implement Create, Update or Delete") - } - - // CustomizeDiff cannot be defined for read-only resources - if r.CustomizeDiff != nil { - return fmt.Errorf("cannot implement CustomizeDiff") - } - } - - tsm := topSchemaMap - - if r.isTopLevel() && writable { - // All non-Computed attributes must be ForceNew if Update is not defined - if r.Update == nil { - nonForceNewAttrs := make([]string, 0) - for k, v := range r.Schema { - if !v.ForceNew && !v.Computed { - nonForceNewAttrs = append(nonForceNewAttrs, k) - } - } - if len(nonForceNewAttrs) > 0 { - return fmt.Errorf( - "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) - } - } else { - nonUpdateableAttrs := make([]string, 0) - for k, v := range r.Schema { - if v.ForceNew || v.Computed && !v.Optional { - nonUpdateableAttrs = append(nonUpdateableAttrs, k) - } - } - updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) - if updateableAttrs == 0 { - return fmt.Errorf( - "All fields are ForceNew or Computed w/out Optional, Update is superfluous") - } - } - - tsm = schemaMap(r.Schema) - - // Destroy, and Read are required - if r.Read == nil { - return fmt.Errorf("Read must be implemented") - } - if r.Delete == nil { - return fmt.Errorf("Delete must be implemented") - } - - // If we have an importer, we need to verify the importer. - if r.Importer != nil { - if err := r.Importer.InternalValidate(); err != nil { - return err - } - } - - for k, f := range tsm { - if isReservedResourceFieldName(k, f) { - return fmt.Errorf("%s is a reserved field name", k) - } - } - } - - lastVersion := -1 - for _, u := range r.StateUpgraders { - if lastVersion >= 0 && u.Version-lastVersion > 1 { - return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) - } - - if u.Version >= r.SchemaVersion { - return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) - } - - if !u.Type.IsObjectType() { - return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) - } - - if u.Upgrade == nil { - return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) - } - - lastVersion = u.Version - } - - if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { - return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) - } - - // Data source - if r.isTopLevel() && !writable { - tsm = schemaMap(r.Schema) - for k, _ := range tsm { - if isReservedDataSourceFieldName(k) { - return fmt.Errorf("%s is a reserved field name", k) - } - } - } - - return schemaMap(r.Schema).InternalValidate(tsm) -} - -func isReservedDataSourceFieldName(name string) bool { - for _, reservedName := range ReservedDataSourceFields { - if name == reservedName { - return true - } - } - return false -} - -func isReservedResourceFieldName(name string, s *Schema) bool { - // Allow phasing out "id" - // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 - if name == "id" && (s.Deprecated != "" || s.Removed != "") { - return false - } - - for _, reservedName := range ReservedResourceFields { - if name == reservedName { - return true - } - } - return false -} - -// Data returns a ResourceData struct for this Resource. Each return value -// is a separate copy and can be safely modified differently. -// -// The data returned from this function has no actual affect on the Resource -// itself (including the state given to this function). -// -// This function is useful for unit tests and ResourceImporter functions. -func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { - result, err := schemaMap(r.Schema).Data(s, nil) - if err != nil { - // At the time of writing, this isn't possible (Data never returns - // non-nil errors). We panic to find this in the future if we have to. - // I don't see a reason for Data to ever return an error. - panic(err) - } - - // load the Resource timeouts - result.timeouts = r.Timeouts - if result.timeouts == nil { - result.timeouts = &ResourceTimeout{} - } - - // Set the schema version to latest by default - result.meta = map[string]interface{}{ - "schema_version": strconv.Itoa(r.SchemaVersion), - } - - return result -} - -// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing -// -// TODO: May be able to be removed with the above ResourceData function. -func (r *Resource) TestResourceData() *ResourceData { - return &ResourceData{ - schema: r.Schema, - } -} - -// SchemasForFlatmapPath tries its best to find a sequence of schemas that -// the given dot-delimited attribute path traverses through in the schema -// of the receiving Resource. -func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { - return SchemasForFlatmapPath(path, r.Schema) -} - -// Returns true if the resource is "top level" i.e. not a sub-resource. -func (r *Resource) isTopLevel() bool { - // TODO: This is a heuristic; replace with a definitive attribute? - return (r.Create != nil || r.Read != nil) -} - -// Determines if a given InstanceState needs to be migrated by checking the -// stored version number with the current SchemaVersion -func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { - // Get the raw interface{} value for the schema version. If it doesn't - // exist or is nil then set it to zero. - raw := is.Meta["schema_version"] - if raw == nil { - raw = "0" - } - - // Try to convert it to a string. If it isn't a string then we pretend - // that it isn't set at all. It should never not be a string unless it - // was manually tampered with. - rawString, ok := raw.(string) - if !ok { - rawString = "0" - } - - stateSchemaVersion, _ := strconv.Atoi(rawString) - - // Don't run MigrateState if the version is handled by a StateUpgrader, - // since StateMigrateFuncs are not required to handle unknown versions - maxVersion := r.SchemaVersion - if len(r.StateUpgraders) > 0 { - maxVersion = r.StateUpgraders[0].Version - } - - return stateSchemaVersion < maxVersion, stateSchemaVersion -} - -func (r *Resource) recordCurrentSchemaVersion( - state *terraform.InstanceState) *terraform.InstanceState { - if state != nil && r.SchemaVersion > 0 { - if state.Meta == nil { - state.Meta = make(map[string]interface{}) - } - state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) - } - return state -} - -// Noop is a convenience implementation of resource function which takes -// no action and returns no error. -func Noop(*ResourceData, interface{}) error { - return nil -} - -// RemoveFromState is a convenience implementation of a resource function -// which sets the resource ID to empty string (to remove it from state) -// and returns no error. -func RemoveFromState(d *ResourceData, _ interface{}) error { - d.SetId("") - return nil -} diff --git a/internal/legacy/helper/schema/resource_data.go b/internal/legacy/helper/schema/resource_data.go deleted file mode 100644 index 3a61e3493218..000000000000 --- a/internal/legacy/helper/schema/resource_data.go +++ /dev/null @@ -1,561 +0,0 @@ -package schema - -import ( - "log" - "reflect" - "strings" - "sync" - "time" - - "github.com/hashicorp/terraform/internal/legacy/terraform" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// ResourceData is used to query and set the attributes of a resource. -// -// ResourceData is the primary argument received for CRUD operations on -// a resource as well as configuration of a provider. It is a powerful -// structure that can be used to not only query data, but check for changes, -// define partial state updates, etc. -// -// The most relevant methods to take a look at are Get, Set, and Partial. -type ResourceData struct { - // Settable (internally) - schema map[string]*Schema - config *terraform.ResourceConfig - state *terraform.InstanceState - diff *terraform.InstanceDiff - meta map[string]interface{} - timeouts *ResourceTimeout - providerMeta cty.Value - - // Don't set - multiReader *MultiLevelFieldReader - setWriter *MapFieldWriter - newState *terraform.InstanceState - partial bool - partialMap map[string]struct{} - once sync.Once - isNew bool - - panicOnError bool -} - -// getResult is the internal structure that is generated when a Get -// is called that contains some extra data that might be used. -type getResult struct { - Value interface{} - ValueProcessed interface{} - Computed bool - Exists bool - Schema *Schema -} - -// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary -// values, bypassing schema. This MUST NOT be used in normal circumstances - -// it exists only to support the remote_state data source. -// -// Deprecated: Fully define schema attributes and use Set() instead. -func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { - d.once.Do(d.init) - - d.setWriter.unsafeWriteField(key, value) -} - -// Get returns the data for the given key, or nil if the key doesn't exist -// in the schema. -// -// If the key does exist in the schema but doesn't exist in the configuration, -// then the default value for that type will be returned. For strings, this is -// "", for numbers it is 0, etc. -// -// If you want to test if something is set at all in the configuration, -// use GetOk. -func (d *ResourceData) Get(key string) interface{} { - v, _ := d.GetOk(key) - return v -} - -// GetChange returns the old and new value for a given key. -// -// HasChange should be used to check if a change exists. It is possible -// that both the old and new value are the same if the old value was not -// set and the new value is. This is common, for example, for boolean -// fields which have a zero value of false. -func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { - o, n := d.getChange(key, getSourceState, getSourceDiff) - return o.Value, n.Value -} - -// GetOk returns the data for the given key and whether or not the key -// has been set to a non-zero value at some point. -// -// The first result will not necessarilly be nil if the value doesn't exist. -// The second result should be checked to determine this information. -func (d *ResourceData) GetOk(key string) (interface{}, bool) { - r := d.getRaw(key, getSourceSet) - exists := r.Exists && !r.Computed - if exists { - // If it exists, we also want to verify it is not the zero-value. - value := r.Value - zero := r.Schema.Type.Zero() - - if eq, ok := value.(Equal); ok { - exists = !eq.Equal(zero) - } else { - exists = !reflect.DeepEqual(value, zero) - } - } - - return r.Value, exists -} - -// GetOkExists returns the data for a given key and whether or not the key -// has been set to a non-zero value. This is only useful for determining -// if boolean attributes have been set, if they are Optional but do not -// have a Default value. -// -// This is nearly the same function as GetOk, yet it does not check -// for the zero value of the attribute's type. This allows for attributes -// without a default, to fully check for a literal assignment, regardless -// of the zero-value for that type. -// This should only be used if absolutely required/needed. -func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { - r := d.getRaw(key, getSourceSet) - exists := r.Exists && !r.Computed - return r.Value, exists -} - -func (d *ResourceData) getRaw(key string, level getSource) getResult { - var parts []string - if key != "" { - parts = strings.Split(key, ".") - } - - return d.get(parts, level) -} - -// HasChange returns whether or not the given key has been changed. -func (d *ResourceData) HasChange(key string) bool { - o, n := d.GetChange(key) - - // If the type implements the Equal interface, then call that - // instead of just doing a reflect.DeepEqual. An example where this is - // needed is *Set - if eq, ok := o.(Equal); ok { - return !eq.Equal(n) - } - - return !reflect.DeepEqual(o, n) -} - -// Partial turns partial state mode on/off. -// -// When partial state mode is enabled, then only key prefixes specified -// by SetPartial will be in the final state. This allows providers to return -// partial states for partially applied resources (when errors occur). -func (d *ResourceData) Partial(on bool) { - d.partial = on - if on { - if d.partialMap == nil { - d.partialMap = make(map[string]struct{}) - } - } else { - d.partialMap = nil - } -} - -// Set sets the value for the given key. -// -// If the key is invalid or the value is not a correct type, an error -// will be returned. -func (d *ResourceData) Set(key string, value interface{}) error { - d.once.Do(d.init) - - // If the value is a pointer to a non-struct, get its value and - // use that. This allows Set to take a pointer to primitives to - // simplify the interface. - reflectVal := reflect.ValueOf(value) - if reflectVal.Kind() == reflect.Ptr { - if reflectVal.IsNil() { - // If the pointer is nil, then the value is just nil - value = nil - } else { - // Otherwise, we dereference the pointer as long as its not - // a pointer to a struct, since struct pointers are allowed. - reflectVal = reflect.Indirect(reflectVal) - if reflectVal.Kind() != reflect.Struct { - value = reflectVal.Interface() - } - } - } - - err := d.setWriter.WriteField(strings.Split(key, "."), value) - if err != nil && d.panicOnError { - panic(err) - } - return err -} - -// SetPartial adds the key to the final state output while -// in partial state mode. The key must be a root key in the schema (i.e. -// it cannot be "list.0"). -// -// If partial state mode is disabled, then this has no effect. Additionally, -// whenever partial state mode is toggled, the partial data is cleared. -func (d *ResourceData) SetPartial(k string) { - if d.partial { - d.partialMap[k] = struct{}{} - } -} - -func (d *ResourceData) MarkNewResource() { - d.isNew = true -} - -func (d *ResourceData) IsNewResource() bool { - return d.isNew -} - -// Id returns the ID of the resource. -func (d *ResourceData) Id() string { - var result string - - if d.state != nil { - result = d.state.ID - if result == "" { - result = d.state.Attributes["id"] - } - } - - if d.newState != nil { - result = d.newState.ID - if result == "" { - result = d.newState.Attributes["id"] - } - } - - return result -} - -// ConnInfo returns the connection info for this resource. -func (d *ResourceData) ConnInfo() map[string]string { - if d.newState != nil { - return d.newState.Ephemeral.ConnInfo - } - - if d.state != nil { - return d.state.Ephemeral.ConnInfo - } - - return nil -} - -// SetId sets the ID of the resource. If the value is blank, then the -// resource is destroyed. -func (d *ResourceData) SetId(v string) { - d.once.Do(d.init) - d.newState.ID = v - - // once we transition away from the legacy state types, "id" will no longer - // be a special field, and will become a normal attribute. - // set the attribute normally - d.setWriter.unsafeWriteField("id", v) - - // Make sure the newState is also set, otherwise the old value - // may get precedence. - if d.newState.Attributes == nil { - d.newState.Attributes = map[string]string{} - } - d.newState.Attributes["id"] = v -} - -// SetConnInfo sets the connection info for a resource. -func (d *ResourceData) SetConnInfo(v map[string]string) { - d.once.Do(d.init) - d.newState.Ephemeral.ConnInfo = v -} - -// SetType sets the ephemeral type for the data. This is only required -// for importing. -func (d *ResourceData) SetType(t string) { - d.once.Do(d.init) - d.newState.Ephemeral.Type = t -} - -// State returns the new InstanceState after the diff and any Set -// calls. -func (d *ResourceData) State() *terraform.InstanceState { - var result terraform.InstanceState - result.ID = d.Id() - result.Meta = d.meta - - // If we have no ID, then this resource doesn't exist and we just - // return nil. - if result.ID == "" { - return nil - } - - if d.timeouts != nil { - if err := d.timeouts.StateEncode(&result); err != nil { - log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) - } - } - - // Look for a magic key in the schema that determines we skip the - // integrity check of fields existing in the schema, allowing dynamic - // keys to be created. - hasDynamicAttributes := false - for k, _ := range d.schema { - if k == "__has_dynamic_attributes" { - hasDynamicAttributes = true - log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) - } - } - - // In order to build the final state attributes, we read the full - // attribute set as a map[string]interface{}, write it to a MapFieldWriter, - // and then use that map. - rawMap := make(map[string]interface{}) - for k := range d.schema { - source := getSourceSet - if d.partial { - source = getSourceState - if _, ok := d.partialMap[k]; ok { - source = getSourceSet - } - } - - raw := d.get([]string{k}, source) - if raw.Exists && !raw.Computed { - rawMap[k] = raw.Value - if raw.ValueProcessed != nil { - rawMap[k] = raw.ValueProcessed - } - } - } - - mapW := &MapFieldWriter{Schema: d.schema} - if err := mapW.WriteField(nil, rawMap); err != nil { - log.Printf("[ERR] Error writing fields: %s", err) - return nil - } - - result.Attributes = mapW.Map() - - if hasDynamicAttributes { - // If we have dynamic attributes, just copy the attributes map - // one for one into the result attributes. - for k, v := range d.setWriter.Map() { - // Don't clobber schema values. This limits usage of dynamic - // attributes to names which _do not_ conflict with schema - // keys! - if _, ok := result.Attributes[k]; !ok { - result.Attributes[k] = v - } - } - } - - if d.newState != nil { - result.Ephemeral = d.newState.Ephemeral - } - - // TODO: This is hacky and we can remove this when we have a proper - // state writer. We should instead have a proper StateFieldWriter - // and use that. - for k, schema := range d.schema { - if schema.Type != TypeMap { - continue - } - - if result.Attributes[k] == "" { - delete(result.Attributes, k) - } - } - - if v := d.Id(); v != "" { - result.Attributes["id"] = d.Id() - } - - if d.state != nil { - result.Tainted = d.state.Tainted - } - - return &result -} - -// Timeout returns the data for the given timeout key -// Returns a duration of 20 minutes for any key not found, or not found and no default. -func (d *ResourceData) Timeout(key string) time.Duration { - key = strings.ToLower(key) - - // System default of 20 minutes - defaultTimeout := 20 * time.Minute - - if d.timeouts == nil { - return defaultTimeout - } - - var timeout *time.Duration - switch key { - case TimeoutCreate: - timeout = d.timeouts.Create - case TimeoutRead: - timeout = d.timeouts.Read - case TimeoutUpdate: - timeout = d.timeouts.Update - case TimeoutDelete: - timeout = d.timeouts.Delete - } - - if timeout != nil { - return *timeout - } - - if d.timeouts.Default != nil { - return *d.timeouts.Default - } - - return defaultTimeout -} - -func (d *ResourceData) init() { - // Initialize the field that will store our new state - var copyState terraform.InstanceState - if d.state != nil { - copyState = *d.state.DeepCopy() - } - d.newState = ©State - - // Initialize the map for storing set data - d.setWriter = &MapFieldWriter{Schema: d.schema} - - // Initialize the reader for getting data from the - // underlying sources (config, diff, etc.) - readers := make(map[string]FieldReader) - var stateAttributes map[string]string - if d.state != nil { - stateAttributes = d.state.Attributes - readers["state"] = &MapFieldReader{ - Schema: d.schema, - Map: BasicMapReader(stateAttributes), - } - } - if d.config != nil { - readers["config"] = &ConfigFieldReader{ - Schema: d.schema, - Config: d.config, - } - } - if d.diff != nil { - readers["diff"] = &DiffFieldReader{ - Schema: d.schema, - Diff: d.diff, - Source: &MultiLevelFieldReader{ - Levels: []string{"state", "config"}, - Readers: readers, - }, - } - } - readers["set"] = &MapFieldReader{ - Schema: d.schema, - Map: BasicMapReader(d.setWriter.Map()), - } - d.multiReader = &MultiLevelFieldReader{ - Levels: []string{ - "state", - "config", - "diff", - "set", - }, - - Readers: readers, - } -} - -func (d *ResourceData) diffChange( - k string) (interface{}, interface{}, bool, bool, bool) { - // Get the change between the state and the config. - o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) - if !o.Exists { - o.Value = nil - } - if !n.Exists { - n.Value = nil - } - - // Return the old, new, and whether there is a change - return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false -} - -func (d *ResourceData) getChange( - k string, - oldLevel getSource, - newLevel getSource) (getResult, getResult) { - var parts, parts2 []string - if k != "" { - parts = strings.Split(k, ".") - parts2 = strings.Split(k, ".") - } - - o := d.get(parts, oldLevel) - n := d.get(parts2, newLevel) - return o, n -} - -func (d *ResourceData) get(addr []string, source getSource) getResult { - d.once.Do(d.init) - - level := "set" - flags := source & ^getSourceLevelMask - exact := flags&getSourceExact != 0 - source = source & getSourceLevelMask - if source >= getSourceSet { - level = "set" - } else if source >= getSourceDiff { - level = "diff" - } else if source >= getSourceConfig { - level = "config" - } else { - level = "state" - } - - var result FieldReadResult - var err error - if exact { - result, err = d.multiReader.ReadFieldExact(addr, level) - } else { - result, err = d.multiReader.ReadFieldMerge(addr, level) - } - if err != nil { - panic(err) - } - - // If the result doesn't exist, then we set the value to the zero value - var schema *Schema - if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { - schema = schemaL[len(schemaL)-1] - } - - if result.Value == nil && schema != nil { - result.Value = result.ValueOrZero(schema) - } - - // Transform the FieldReadResult into a getResult. It might be worth - // merging these two structures one day. - return getResult{ - Value: result.Value, - ValueProcessed: result.ValueProcessed, - Computed: result.Computed, - Exists: result.Exists, - Schema: schema, - } -} - -func (d *ResourceData) GetProviderMeta(dst interface{}) error { - if d.providerMeta.IsNull() { - return nil - } - return gocty.FromCtyValue(d.providerMeta, &dst) -} diff --git a/internal/legacy/helper/schema/resource_data_test.go b/internal/legacy/helper/schema/resource_data_test.go deleted file mode 100644 index 22ad45b6b82b..000000000000 --- a/internal/legacy/helper/schema/resource_data_test.go +++ /dev/null @@ -1,3564 +0,0 @@ -package schema - -import ( - "fmt" - "math" - "os" - "reflect" - "testing" - "time" - - "github.com/hashicorp/terraform/internal/legacy/terraform" -) - -func TestResourceDataGet(t *testing.T) { - cases := []struct { - Schema map[string]*Schema - State *terraform.InstanceState - Diff *terraform.InstanceDiff - Key string - Value interface{} - }{ - // #0 - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "bar", - NewComputed: true, - }, - }, - }, - - Key: "availability_zone", - Value: "", - }, - - // #1 - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Key: "availability_zone", - - Value: "foo", - }, - - // #2 - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo!", - NewExtra: "foo", - }, - }, - }, - - Key: "availability_zone", - Value: "foo", - }, - - // #3 - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "bar", - }, - }, - - Diff: nil, - - Key: "availability_zone", - - Value: "bar", - }, - - // #4 - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "bar", - NewComputed: true, - }, - }, - }, - - Key: "availability_zone", - Value: "", - }, - - // #5 - { - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "port": "80", - }, - }, - - Diff: nil, - - Key: "port", - - Value: 80, - }, - - // #6 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.0": "1", - "ports.1": "2", - "ports.2": "5", - }, - }, - - Key: "ports.1", - - Value: 2, - }, - - // #7 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.0": "1", - "ports.1": "2", - "ports.2": "5", - }, - }, - - Key: "ports.#", - - Value: 3, - }, - - // #8 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Key: "ports.#", - - Value: 0, - }, - - // #9 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.0": "1", - "ports.1": "2", - "ports.2": "5", - }, - }, - - Key: "ports", - - Value: []interface{}{1, 2, 5}, - }, - - // #10 - { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Required: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ingress.#": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "ingress.0.from": &terraform.ResourceAttrDiff{ - Old: "", - New: "8080", - }, - }, - }, - - Key: "ingress.0", - - Value: map[string]interface{}{ - "from": 8080, - }, - }, - - // #11 - { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Required: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ingress.#": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "ingress.0.from": &terraform.ResourceAttrDiff{ - Old: "", - New: "8080", - }, - }, - }, - - Key: "ingress", - - Value: []interface{}{ - map[string]interface{}{ - "from": 8080, - }, - }, - }, - - // #12 Computed get - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - }, - }, - - Key: "availability_zone", - - Value: "foo", - }, - - // #13 Full object - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Key: "", - - Value: map[string]interface{}{ - "availability_zone": "foo", - }, - }, - - // #14 List of maps - { - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{ - Type: TypeMap, - }, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "2", - }, - "config_vars.0.foo": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - }, - "config_vars.1.bar": &terraform.ResourceAttrDiff{ - Old: "", - New: "baz", - }, - }, - }, - - Key: "config_vars", - - Value: []interface{}{ - map[string]interface{}{ - "foo": "bar", - }, - map[string]interface{}{ - "bar": "baz", - }, - }, - }, - - // #15 List of maps in state - { - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{ - Type: TypeMap, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "2", - "config_vars.0.foo": "baz", - "config_vars.1.bar": "bar", - }, - }, - - Diff: nil, - - Key: "config_vars", - - Value: []interface{}{ - map[string]interface{}{ - "foo": "baz", - }, - map[string]interface{}{ - "bar": "bar", - }, - }, - }, - - // #16 List of maps with removal in diff - { - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{ - Type: TypeMap, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "1", - "config_vars.0.FOO": "bar", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - }, - "config_vars.0.FOO": &terraform.ResourceAttrDiff{ - Old: "bar", - NewRemoved: true, - }, - }, - }, - - Key: "config_vars", - - Value: []interface{}{}, - }, - - // #17 Sets - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.80": "80", - }, - }, - - Diff: nil, - - Key: "ports", - - Value: []interface{}{80}, - }, - - // #18 - { - Schema: map[string]*Schema{ - "data": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "index": &Schema{ - Type: TypeInt, - Required: true, - }, - - "value": &Schema{ - Type: TypeString, - Required: true, - }, - }, - }, - Set: func(a interface{}) int { - m := a.(map[string]interface{}) - return m["index"].(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "data.#": "1", - "data.10.index": "10", - "data.10.value": "50", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "data.10.value": &terraform.ResourceAttrDiff{ - Old: "50", - New: "80", - }, - }, - }, - - Key: "data", - - Value: []interface{}{ - map[string]interface{}{ - "index": 10, - "value": "80", - }, - }, - }, - - // #19 Empty Set - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - - Value: []interface{}{}, - }, - - // #20 Float zero - { - Schema: map[string]*Schema{ - "ratio": &Schema{ - Type: TypeFloat, - Optional: true, - Computed: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ratio", - - Value: 0.0, - }, - - // #21 Float given - { - Schema: map[string]*Schema{ - "ratio": &Schema{ - Type: TypeFloat, - Optional: true, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ratio": "0.5", - }, - }, - - Diff: nil, - - Key: "ratio", - - Value: 0.5, - }, - - // #22 Float diff - { - Schema: map[string]*Schema{ - "ratio": &Schema{ - Type: TypeFloat, - Optional: true, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ratio": "-0.5", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ratio": &terraform.ResourceAttrDiff{ - Old: "-0.5", - New: "33.0", - }, - }, - }, - - Key: "ratio", - - Value: 33.0, - }, - - // #23 Sets with removed elements - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.80": "80", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "2", - New: "1", - }, - "ports.80": &terraform.ResourceAttrDiff{ - Old: "80", - New: "80", - }, - "ports.8080": &terraform.ResourceAttrDiff{ - Old: "8080", - New: "0", - NewRemoved: true, - }, - }, - }, - - Key: "ports", - - Value: []interface{}{80}, - }, - } - - for i, tc := range cases { - d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - v := d.Get(tc.Key) - if s, ok := v.(*Set); ok { - v = s.List() - } - - if !reflect.DeepEqual(v, tc.Value) { - t.Fatalf("Bad: %d\n\n%#v\n\nExpected: %#v", i, v, tc.Value) - } - } -} - -func TestResourceDataGetChange(t *testing.T) { - cases := []struct { - Schema map[string]*Schema - State *terraform.InstanceState - Diff *terraform.InstanceDiff - Key string - OldValue interface{} - NewValue interface{} - }{ - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Key: "availability_zone", - - OldValue: "", - NewValue: "foo", - }, - - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Key: "availability_zone", - - OldValue: "foo", - NewValue: "foo", - }, - } - - for i, tc := range cases { - d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - o, n := d.GetChange(tc.Key) - if !reflect.DeepEqual(o, tc.OldValue) { - t.Fatalf("Old Bad: %d\n\n%#v", i, o) - } - if !reflect.DeepEqual(n, tc.NewValue) { - t.Fatalf("New Bad: %d\n\n%#v", i, n) - } - } -} - -func TestResourceDataGetOk(t *testing.T) { - cases := []struct { - Schema map[string]*Schema - State *terraform.InstanceState - Diff *terraform.InstanceDiff - Key string - Value interface{} - Ok bool - }{ - /* - * Primitives - */ - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - }, - }, - }, - - Key: "availability_zone", - Value: "", - Ok: false, - }, - - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewComputed: true, - }, - }, - }, - - Key: "availability_zone", - Value: "", - Ok: false, - }, - - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "availability_zone", - Value: "", - Ok: false, - }, - - /* - * Lists - */ - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - Value: []interface{}{}, - Ok: false, - }, - - /* - * Map - */ - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeMap, - Optional: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - Value: map[string]interface{}{}, - Ok: false, - }, - - /* - * Set - */ - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { return a.(int) }, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - Value: []interface{}{}, - Ok: false, - }, - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { return a.(int) }, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports.0", - Value: 0, - Ok: false, - }, - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { return a.(int) }, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "0", - }, - }, - }, - - Key: "ports", - Value: []interface{}{}, - Ok: false, - }, - - // Further illustrates and clarifiies the GetOk semantics from #933, and - // highlights the limitation that zero-value config is currently - // indistinguishable from unset config. - { - Schema: map[string]*Schema{ - "from_port": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "from_port": &terraform.ResourceAttrDiff{ - Old: "", - New: "0", - }, - }, - }, - - Key: "from_port", - Value: 0, - Ok: false, - }, - } - - for i, tc := range cases { - d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - v, ok := d.GetOk(tc.Key) - if s, ok := v.(*Set); ok { - v = s.List() - } - - if !reflect.DeepEqual(v, tc.Value) { - t.Fatalf("Bad: %d\n\n%#v", i, v) - } - if ok != tc.Ok { - t.Fatalf("%d: expected ok: %t, got: %t", i, tc.Ok, ok) - } - } -} - -func TestResourceDataGetOkExists(t *testing.T) { - cases := []struct { - Name string - Schema map[string]*Schema - State *terraform.InstanceState - Diff *terraform.InstanceDiff - Key string - Value interface{} - Ok bool - }{ - /* - * Primitives - */ - { - Name: "string-literal-empty", - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": { - Old: "", - New: "", - }, - }, - }, - - Key: "availability_zone", - Value: "", - Ok: true, - }, - - { - Name: "string-computed-empty", - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": { - Old: "", - New: "", - NewComputed: true, - }, - }, - }, - - Key: "availability_zone", - Value: "", - Ok: false, - }, - - { - Name: "string-optional-computed-nil-diff", - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "availability_zone", - Value: "", - Ok: false, - }, - - /* - * Lists - */ - - { - Name: "list-optional", - Schema: map[string]*Schema{ - "ports": { - Type: TypeList, - Optional: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - Value: []interface{}{}, - Ok: false, - }, - - /* - * Map - */ - - { - Name: "map-optional", - Schema: map[string]*Schema{ - "ports": { - Type: TypeMap, - Optional: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - Value: map[string]interface{}{}, - Ok: false, - }, - - /* - * Set - */ - - { - Name: "set-optional", - Schema: map[string]*Schema{ - "ports": { - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { return a.(int) }, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - Value: []interface{}{}, - Ok: false, - }, - - { - Name: "set-optional-key", - Schema: map[string]*Schema{ - "ports": { - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { return a.(int) }, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports.0", - Value: 0, - Ok: false, - }, - - { - Name: "bool-literal-empty", - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": { - Old: "", - New: "", - }, - }, - }, - - Key: "availability_zone", - Value: false, - Ok: true, - }, - - { - Name: "bool-literal-set", - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": { - New: "true", - }, - }, - }, - - Key: "availability_zone", - Value: true, - Ok: true, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) - if err != nil { - t.Fatalf("%s err: %s", tc.Name, err) - } - - v, ok := d.GetOkExists(tc.Key) - if s, ok := v.(*Set); ok { - v = s.List() - } - - if !reflect.DeepEqual(v, tc.Value) { - t.Fatalf("Bad %s: \n%#v", tc.Name, v) - } - if ok != tc.Ok { - t.Fatalf("%s: expected ok: %t, got: %t", tc.Name, tc.Ok, ok) - } - }) - } -} - -func TestResourceDataTimeout(t *testing.T) { - cases := []struct { - Name string - Rd *ResourceData - Expected *ResourceTimeout - }{ - { - Name: "Basic example default", - Rd: &ResourceData{timeouts: timeoutForValues(10, 3, 0, 15, 0)}, - Expected: expectedTimeoutForValues(10, 3, 0, 15, 0), - }, - { - Name: "Resource and config match update, create", - Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 3, 0, 0)}, - Expected: expectedTimeoutForValues(10, 0, 3, 0, 0), - }, - { - Name: "Resource provides default", - Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 0, 0, 7)}, - Expected: expectedTimeoutForValues(10, 7, 7, 7, 7), - }, - { - Name: "Resource provides default and delete", - Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 0, 15, 7)}, - Expected: expectedTimeoutForValues(10, 7, 7, 15, 7), - }, - { - Name: "Resource provides default, config overwrites other values", - Rd: &ResourceData{timeouts: timeoutForValues(10, 3, 0, 0, 13)}, - Expected: expectedTimeoutForValues(10, 3, 13, 13, 13), - }, - { - Name: "Resource has no config", - Rd: &ResourceData{}, - Expected: expectedTimeoutForValues(0, 0, 0, 0, 0), - }, - } - - keys := timeoutKeys() - for i, c := range cases { - t.Run(fmt.Sprintf("%d-%s", i, c.Name), func(t *testing.T) { - - for _, k := range keys { - got := c.Rd.Timeout(k) - var ex *time.Duration - switch k { - case TimeoutCreate: - ex = c.Expected.Create - case TimeoutRead: - ex = c.Expected.Read - case TimeoutUpdate: - ex = c.Expected.Update - case TimeoutDelete: - ex = c.Expected.Delete - case TimeoutDefault: - ex = c.Expected.Default - } - - if got > 0 && ex == nil { - t.Fatalf("Unexpected value in (%s), case %d check 1:\n\texpected: %#v\n\tgot: %#v", k, i, ex, got) - } - if got == 0 && ex != nil { - t.Fatalf("Unexpected value in (%s), case %d check 2:\n\texpected: %#v\n\tgot: %#v", k, i, *ex, got) - } - - // confirm values - if ex != nil { - if got != *ex { - t.Fatalf("Timeout %s case (%d) expected (%s), got (%s)", k, i, *ex, got) - } - } - } - - }) - } -} - -func TestResourceDataHasChange(t *testing.T) { - cases := []struct { - Schema map[string]*Schema - State *terraform.InstanceState - Diff *terraform.InstanceDiff - Key string - Change bool - }{ - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Key: "availability_zone", - - Change: true, - }, - - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Key: "availability_zone", - - Change: false, - }, - - { - Schema: map[string]*Schema{ - "tags": &Schema{ - Type: TypeMap, - Optional: true, - Computed: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "tags.Name": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "foo", - }, - }, - }, - - Key: "tags", - - Change: true, - }, - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { return a.(int) }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.80": "80", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - }, - }, - }, - - Key: "ports", - - Change: true, - }, - - // https://github.com/hashicorp/terraform/issues/927 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { return a.(int) }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.80": "80", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "tags.foo": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - }, - }, - }, - - Key: "ports", - - Change: false, - }, - } - - for i, tc := range cases { - d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - actual := d.HasChange(tc.Key) - if actual != tc.Change { - t.Fatalf("Bad: %d %#v", i, actual) - } - } -} - -func TestResourceDataSet(t *testing.T) { - var testNilPtr *string - - cases := []struct { - Schema map[string]*Schema - State *terraform.InstanceState - Diff *terraform.InstanceDiff - Key string - Value interface{} - Err bool - GetKey string - GetValue interface{} - - // GetPreProcess can be set to munge the return value before being - // compared to GetValue - GetPreProcess func(interface{}) interface{} - }{ - // #0: Basic good - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "availability_zone", - Value: "foo", - - GetKey: "availability_zone", - GetValue: "foo", - }, - - // #1: Basic int - { - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "port", - Value: 80, - - GetKey: "port", - GetValue: 80, - }, - - // #2: Basic bool - { - Schema: map[string]*Schema{ - "vpc": &Schema{ - Type: TypeBool, - Optional: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "vpc", - Value: true, - - GetKey: "vpc", - GetValue: true, - }, - - // #3 - { - Schema: map[string]*Schema{ - "vpc": &Schema{ - Type: TypeBool, - Optional: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "vpc", - Value: false, - - GetKey: "vpc", - GetValue: false, - }, - - // #4: Invalid type - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "availability_zone", - Value: 80, - Err: true, - - GetKey: "availability_zone", - GetValue: "", - }, - - // #5: List of primitives, set list - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Computed: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - Value: []int{1, 2, 5}, - - GetKey: "ports", - GetValue: []interface{}{1, 2, 5}, - }, - - // #6: List of primitives, set list with error - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Computed: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ports", - Value: []interface{}{1, "NOPE", 5}, - Err: true, - - GetKey: "ports", - GetValue: []interface{}{}, - }, - - // #7: Set a list of maps - { - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{ - Type: TypeMap, - }, - }, - }, - - State: nil, - - Diff: nil, - - Key: "config_vars", - Value: []interface{}{ - map[string]interface{}{ - "foo": "bar", - }, - map[string]interface{}{ - "bar": "baz", - }, - }, - Err: false, - - GetKey: "config_vars", - GetValue: []interface{}{ - map[string]interface{}{ - "foo": "bar", - }, - map[string]interface{}{ - "bar": "baz", - }, - }, - }, - - // #8: Set, with list - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.0": "100", - "ports.1": "80", - "ports.2": "80", - }, - }, - - Key: "ports", - Value: []interface{}{100, 125, 125}, - - GetKey: "ports", - GetValue: []interface{}{100, 125}, - }, - - // #9: Set, with Set - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.100": "100", - "ports.80": "80", - "ports.81": "81", - }, - }, - - Key: "ports", - Value: &Set{ - m: map[string]interface{}{ - "1": 1, - "2": 2, - }, - }, - - GetKey: "ports", - GetValue: []interface{}{1, 2}, - }, - - // #10: Set single item - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.100": "100", - "ports.80": "80", - }, - }, - - Key: "ports.100", - Value: 256, - Err: true, - - GetKey: "ports", - GetValue: []interface{}{100, 80}, - }, - - // #11: Set with nested set - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Elem: &Resource{ - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeInt, - }, - - "set": &Schema{ - Type: TypeSet, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - }, - Set: func(a interface{}) int { - return a.(map[string]interface{})["port"].(int) - }, - }, - }, - - State: nil, - - Key: "ports", - Value: []interface{}{ - map[string]interface{}{ - "port": 80, - }, - }, - - GetKey: "ports", - GetValue: []interface{}{ - map[string]interface{}{ - "port": 80, - "set": []interface{}{}, - }, - }, - - GetPreProcess: func(v interface{}) interface{} { - if v == nil { - return v - } - s, ok := v.([]interface{}) - if !ok { - return v - } - for _, v := range s { - m, ok := v.(map[string]interface{}) - if !ok { - continue - } - if m["set"] == nil { - continue - } - if s, ok := m["set"].(*Set); ok { - m["set"] = s.List() - } - } - - return v - }, - }, - - // #12: List of floats, set list - { - Schema: map[string]*Schema{ - "ratios": &Schema{ - Type: TypeList, - Computed: true, - Elem: &Schema{Type: TypeFloat}, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ratios", - Value: []float64{1.0, 2.2, 5.5}, - - GetKey: "ratios", - GetValue: []interface{}{1.0, 2.2, 5.5}, - }, - - // #12: Set of floats, set list - { - Schema: map[string]*Schema{ - "ratios": &Schema{ - Type: TypeSet, - Computed: true, - Elem: &Schema{Type: TypeFloat}, - Set: func(a interface{}) int { - return int(math.Float64bits(a.(float64))) - }, - }, - }, - - State: nil, - - Diff: nil, - - Key: "ratios", - Value: []float64{1.0, 2.2, 5.5}, - - GetKey: "ratios", - GetValue: []interface{}{1.0, 2.2, 5.5}, - }, - - // #13: Basic pointer - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "availability_zone", - Value: testPtrTo("foo"), - - GetKey: "availability_zone", - GetValue: "foo", - }, - - // #14: Basic nil value - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "availability_zone", - Value: testPtrTo(nil), - - GetKey: "availability_zone", - GetValue: "", - }, - - // #15: Basic nil pointer - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: nil, - - Key: "availability_zone", - Value: testNilPtr, - - GetKey: "availability_zone", - GetValue: "", - }, - - // #16: Set in a list - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Elem: &Resource{ - Schema: map[string]*Schema{ - "set": &Schema{ - Type: TypeSet, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - }, - }, - }, - - State: nil, - - Key: "ports", - Value: []interface{}{ - map[string]interface{}{ - "set": []interface{}{ - 1, - }, - }, - }, - - GetKey: "ports", - GetValue: []interface{}{ - map[string]interface{}{ - "set": []interface{}{ - 1, - }, - }, - }, - GetPreProcess: func(v interface{}) interface{} { - if v == nil { - return v - } - s, ok := v.([]interface{}) - if !ok { - return v - } - for _, v := range s { - m, ok := v.(map[string]interface{}) - if !ok { - continue - } - if m["set"] == nil { - continue - } - if s, ok := m["set"].(*Set); ok { - m["set"] = s.List() - } - } - - return v - }, - }, - } - - oldEnv := os.Getenv(PanicOnErr) - os.Setenv(PanicOnErr, "") - defer os.Setenv(PanicOnErr, oldEnv) - - for i, tc := range cases { - d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = d.Set(tc.Key, tc.Value) - if err != nil != tc.Err { - t.Fatalf("%d err: %s", i, err) - } - - v := d.Get(tc.GetKey) - if s, ok := v.(*Set); ok { - v = s.List() - } - - if tc.GetPreProcess != nil { - v = tc.GetPreProcess(v) - } - - if !reflect.DeepEqual(v, tc.GetValue) { - t.Fatalf("Get Bad: %d\n\n%#v", i, v) - } - } -} - -func TestResourceDataState_dynamicAttributes(t *testing.T) { - cases := []struct { - Schema map[string]*Schema - State *terraform.InstanceState - Diff *terraform.InstanceDiff - Set map[string]interface{} - UnsafeSet map[string]string - Result *terraform.InstanceState - }{ - { - Schema: map[string]*Schema{ - "__has_dynamic_attributes": { - Type: TypeString, - Optional: true, - }, - - "schema_field": { - Type: TypeString, - Required: true, - }, - }, - - State: nil, - - Diff: nil, - - Set: map[string]interface{}{ - "schema_field": "present", - }, - - UnsafeSet: map[string]string{ - "test1": "value", - "test2": "value", - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "schema_field": "present", - "test1": "value", - "test2": "value", - }, - }, - }, - } - - for i, tc := range cases { - d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - for k, v := range tc.Set { - d.Set(k, v) - } - - for k, v := range tc.UnsafeSet { - d.UnsafeSetFieldRaw(k, v) - } - - // Set an ID so that the state returned is not nil - idSet := false - if d.Id() == "" { - idSet = true - d.SetId("foo") - } - - actual := d.State() - - // If we set an ID, then undo what we did so the comparison works - if actual != nil && idSet { - actual.ID = "" - delete(actual.Attributes, "id") - } - - if !reflect.DeepEqual(actual, tc.Result) { - t.Fatalf("Bad: %d\n\n%#v\n\nExpected:\n\n%#v", i, actual, tc.Result) - } - } -} - -func TestResourceDataState_schema(t *testing.T) { - cases := []struct { - Schema map[string]*Schema - State *terraform.InstanceState - Diff *terraform.InstanceDiff - Set map[string]interface{} - Result *terraform.InstanceState - Partial []string - }{ - // #0 Basic primitive in diff - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - }, - }, - }, - - // #1 Basic primitive set override - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Set: map[string]interface{}{ - "availability_zone": "bar", - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "bar", - }, - }, - }, - - // #2 - { - Schema: map[string]*Schema{ - "vpc": &Schema{ - Type: TypeBool, - Optional: true, - }, - }, - - State: nil, - - Diff: nil, - - Set: map[string]interface{}{ - "vpc": true, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "vpc": "true", - }, - }, - }, - - // #3 Basic primitive with StateFunc set - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - StateFunc: func(interface{}) string { return "" }, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - NewExtra: "foo!", - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - }, - }, - }, - - // #4 List - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.0": "80", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "2", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "", - New: "100", - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.0": "80", - "ports.1": "100", - }, - }, - }, - - // #5 List of resources - { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Required: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ingress.#": "1", - "ingress.0.from": "80", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ingress.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "2", - }, - "ingress.0.from": &terraform.ResourceAttrDiff{ - Old: "80", - New: "150", - }, - "ingress.1.from": &terraform.ResourceAttrDiff{ - Old: "", - New: "100", - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ingress.#": "2", - "ingress.0.from": "150", - "ingress.1.from": "100", - }, - }, - }, - - // #6 List of maps - { - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{ - Type: TypeMap, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "2", - "config_vars.0.%": "2", - "config_vars.0.foo": "bar", - "config_vars.0.bar": "bar", - "config_vars.1.%": "1", - "config_vars.1.bar": "baz", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.0.bar": &terraform.ResourceAttrDiff{ - NewRemoved: true, - }, - }, - }, - - Set: map[string]interface{}{ - "config_vars": []map[string]interface{}{ - map[string]interface{}{ - "foo": "bar", - }, - map[string]interface{}{ - "baz": "bang", - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "2", - "config_vars.0.%": "1", - "config_vars.0.foo": "bar", - "config_vars.1.%": "1", - "config_vars.1.baz": "bang", - }, - }, - }, - - // #7 List of maps with removal in diff - { - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{ - Type: TypeMap, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "1", - "config_vars.0.FOO": "bar", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - }, - "config_vars.0.FOO": &terraform.ResourceAttrDiff{ - Old: "bar", - NewRemoved: true, - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "0", - }, - }, - }, - - // #8 Basic state with other keys - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Result: &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "availability_zone": "foo", - }, - }, - }, - - // #9 Sets - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.100": "100", - "ports.80": "80", - "ports.81": "81", - }, - }, - - Diff: nil, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.80": "80", - "ports.81": "81", - "ports.100": "100", - }, - }, - }, - - // #10 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Diff: nil, - - Set: map[string]interface{}{ - "ports": []interface{}{100, 80}, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.80": "80", - "ports.100": "100", - }, - }, - }, - - // #11 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "order": &Schema{ - Type: TypeInt, - }, - - "a": &Schema{ - Type: TypeList, - Elem: &Schema{Type: TypeInt}, - }, - - "b": &Schema{ - Type: TypeList, - Elem: &Schema{Type: TypeInt}, - }, - }, - }, - Set: func(a interface{}) int { - m := a.(map[string]interface{}) - return m["order"].(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.10.order": "10", - "ports.10.a.#": "1", - "ports.10.a.0": "80", - "ports.20.order": "20", - "ports.20.b.#": "1", - "ports.20.b.0": "100", - }, - }, - - Set: map[string]interface{}{ - "ports": []interface{}{ - map[string]interface{}{ - "order": 20, - "b": []interface{}{100}, - }, - map[string]interface{}{ - "order": 10, - "a": []interface{}{80}, - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.10.order": "10", - "ports.10.a.#": "1", - "ports.10.a.0": "80", - "ports.10.b.#": "0", - "ports.20.order": "20", - "ports.20.a.#": "0", - "ports.20.b.#": "1", - "ports.20.b.0": "100", - }, - }, - }, - - /* - * PARTIAL STATES - */ - - // #12 Basic primitive - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Partial: []string{}, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - - // #13 List - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.0": "80", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "2", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "", - New: "100", - }, - }, - }, - - Partial: []string{}, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.0": "80", - }, - }, - }, - - // #14 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "", - NewComputed: true, - }, - }, - }, - - Partial: []string{}, - - Set: map[string]interface{}{ - "ports": []interface{}{}, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - - // #15 List of resources - { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Required: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ingress.#": "1", - "ingress.0.from": "80", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ingress.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "2", - }, - "ingress.0.from": &terraform.ResourceAttrDiff{ - Old: "80", - New: "150", - }, - "ingress.1.from": &terraform.ResourceAttrDiff{ - Old: "", - New: "100", - }, - }, - }, - - Partial: []string{}, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ingress.#": "1", - "ingress.0.from": "80", - }, - }, - }, - - // #16 List of maps - { - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{ - Type: TypeMap, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "2", - "config_vars.0.foo": "bar", - "config_vars.0.bar": "bar", - "config_vars.1.bar": "baz", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.0.bar": &terraform.ResourceAttrDiff{ - NewRemoved: true, - }, - }, - }, - - Set: map[string]interface{}{ - "config_vars": []map[string]interface{}{ - map[string]interface{}{ - "foo": "bar", - }, - map[string]interface{}{ - "baz": "bang", - }, - }, - }, - - Partial: []string{}, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - // TODO: broken, shouldn't bar be removed? - "config_vars.#": "2", - "config_vars.0.%": "2", - "config_vars.0.foo": "bar", - "config_vars.0.bar": "bar", - "config_vars.1.%": "1", - "config_vars.1.bar": "baz", - }, - }, - }, - - // #17 Sets - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.100": "100", - "ports.80": "80", - "ports.81": "81", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.120": &terraform.ResourceAttrDiff{ - New: "120", - }, - }, - }, - - Partial: []string{}, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.80": "80", - "ports.81": "81", - "ports.100": "100", - }, - }, - }, - - // #18 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "", - NewComputed: true, - }, - }, - }, - - Partial: []string{}, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - - // #19 Maps - { - Schema: map[string]*Schema{ - "tags": &Schema{ - Type: TypeMap, - Optional: true, - Computed: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "tags.Name": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "tags.%": "1", - "tags.Name": "foo", - }, - }, - }, - - // #20 empty computed map - { - Schema: map[string]*Schema{ - "tags": &Schema{ - Type: TypeMap, - Optional: true, - Computed: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "tags.Name": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - }, - }, - }, - - Set: map[string]interface{}{ - "tags": map[string]string{}, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "tags.%": "0", - }, - }, - }, - - // #21 - { - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - NewComputed: true, - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - - // #22 - { - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - NewComputed: true, - }, - }, - }, - - Set: map[string]interface{}{ - "foo": "bar", - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "foo": "bar", - }, - }, - }, - - // #23 Set of maps - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "index": &Schema{Type: TypeInt}, - "uuids": &Schema{Type: TypeMap}, - }, - }, - Set: func(a interface{}) int { - m := a.(map[string]interface{}) - return m["index"].(int) - }, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.10.uuids.#": &terraform.ResourceAttrDiff{ - NewComputed: true, - }, - }, - }, - - Set: map[string]interface{}{ - "ports": []interface{}{ - map[string]interface{}{ - "index": 10, - "uuids": map[string]interface{}{ - "80": "value", - }, - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.10.index": "10", - "ports.10.uuids.%": "1", - "ports.10.uuids.80": "value", - }, - }, - }, - - // #24 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.100": "100", - "ports.80": "80", - "ports.81": "81", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "3", - New: "0", - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "0", - }, - }, - }, - - // #25 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Diff: nil, - - Set: map[string]interface{}{ - "ports": []interface{}{}, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "0", - }, - }, - }, - - // #26 - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Diff: nil, - - Set: map[string]interface{}{ - "ports": []interface{}{}, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "0", - }, - }, - }, - - // #27 Set lists - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "index": &Schema{Type: TypeInt}, - "uuids": &Schema{Type: TypeMap}, - }, - }, - }, - }, - - State: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - NewComputed: true, - }, - }, - }, - - Set: map[string]interface{}{ - "ports": []interface{}{ - map[string]interface{}{ - "index": 10, - "uuids": map[string]interface{}{ - "80": "value", - }, - }, - }, - }, - - Result: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "1", - "ports.0.index": "10", - "ports.0.uuids.%": "1", - "ports.0.uuids.80": "value", - }, - }, - }, - } - - for i, tc := range cases { - d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - for k, v := range tc.Set { - if err := d.Set(k, v); err != nil { - t.Fatalf("%d err: %s", i, err) - } - } - - // Set an ID so that the state returned is not nil - idSet := false - if d.Id() == "" { - idSet = true - d.SetId("foo") - } - - // If we have partial, then enable partial state mode. - if tc.Partial != nil { - d.Partial(true) - for _, k := range tc.Partial { - d.SetPartial(k) - } - } - - actual := d.State() - - // If we set an ID, then undo what we did so the comparison works - if actual != nil && idSet { - actual.ID = "" - delete(actual.Attributes, "id") - } - - if !reflect.DeepEqual(actual, tc.Result) { - t.Fatalf("Bad: %d\n\n%#v\n\nExpected:\n\n%#v", i, actual, tc.Result) - } - } -} - -func TestResourceData_nonStringValuesInMap(t *testing.T) { - cases := []struct { - Schema map[string]*Schema - Diff *terraform.InstanceDiff - MapFieldName string - ItemName string - ExpectedType string - }{ - { - Schema: map[string]*Schema{ - "boolMap": &Schema{ - Type: TypeMap, - Elem: TypeBool, - Optional: true, - }, - }, - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "boolMap.%": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "boolMap.boolField": &terraform.ResourceAttrDiff{ - Old: "", - New: "true", - }, - }, - }, - MapFieldName: "boolMap", - ItemName: "boolField", - ExpectedType: "bool", - }, - { - Schema: map[string]*Schema{ - "intMap": &Schema{ - Type: TypeMap, - Elem: TypeInt, - Optional: true, - }, - }, - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "intMap.%": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "intMap.intField": &terraform.ResourceAttrDiff{ - Old: "", - New: "8", - }, - }, - }, - MapFieldName: "intMap", - ItemName: "intField", - ExpectedType: "int", - }, - { - Schema: map[string]*Schema{ - "floatMap": &Schema{ - Type: TypeMap, - Elem: TypeFloat, - Optional: true, - }, - }, - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "floatMap.%": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "floatMap.floatField": &terraform.ResourceAttrDiff{ - Old: "", - New: "8.22", - }, - }, - }, - MapFieldName: "floatMap", - ItemName: "floatField", - ExpectedType: "float64", - }, - } - - for _, c := range cases { - d, err := schemaMap(c.Schema).Data(nil, c.Diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - m, ok := d.Get(c.MapFieldName).(map[string]interface{}) - if !ok { - t.Fatalf("expected %q to be castable to a map", c.MapFieldName) - } - field, ok := m[c.ItemName] - if !ok { - t.Fatalf("expected %q in the map", c.ItemName) - } - - typeName := reflect.TypeOf(field).Name() - if typeName != c.ExpectedType { - t.Fatalf("expected %q to be %q, it is %q.", - c.ItemName, c.ExpectedType, typeName) - } - } -} - -func TestResourceDataSetConnInfo(t *testing.T) { - d := &ResourceData{} - d.SetId("foo") - d.SetConnInfo(map[string]string{ - "foo": "bar", - }) - - expected := map[string]string{ - "foo": "bar", - } - - actual := d.State() - if !reflect.DeepEqual(actual.Ephemeral.ConnInfo, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceDataSetMeta_Timeouts(t *testing.T) { - d := &ResourceData{} - d.SetId("foo") - - rt := ResourceTimeout{ - Create: DefaultTimeout(7 * time.Minute), - } - - d.timeouts = &rt - - expected := expectedForValues(7, 0, 0, 0, 0) - - actual := d.State() - if !reflect.DeepEqual(actual.Meta[TimeoutKey], expected) { - t.Fatalf("Bad Meta_timeout match:\n\texpected: %#v\n\tgot: %#v", expected, actual.Meta[TimeoutKey]) - } -} - -func TestResourceDataSetId(t *testing.T) { - d := &ResourceData{ - state: &terraform.InstanceState{ - ID: "test", - Attributes: map[string]string{ - "id": "test", - }, - }, - } - d.SetId("foo") - - actual := d.State() - - // SetId should set both the ID field as well as the attribute, to aid in - // transitioning to the new type system. - if actual.ID != "foo" || actual.Attributes["id"] != "foo" { - t.Fatalf("bad: %#v", actual) - } - - d.SetId("") - actual = d.State() - if actual != nil { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceDataSetId_clear(t *testing.T) { - d := &ResourceData{ - state: &terraform.InstanceState{ID: "bar"}, - } - d.SetId("") - - actual := d.State() - if actual != nil { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceDataSetId_override(t *testing.T) { - d := &ResourceData{ - state: &terraform.InstanceState{ID: "bar"}, - } - d.SetId("foo") - - actual := d.State() - if actual.ID != "foo" { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceDataSetType(t *testing.T) { - d := &ResourceData{} - d.SetId("foo") - d.SetType("bar") - - actual := d.State() - if v := actual.Ephemeral.Type; v != "bar" { - t.Fatalf("bad: %#v", actual) - } -} - -func testPtrTo(raw interface{}) interface{} { - return &raw -} diff --git a/internal/legacy/helper/schema/resource_test.go b/internal/legacy/helper/schema/resource_test.go deleted file mode 100644 index 47f508d81b70..000000000000 --- a/internal/legacy/helper/schema/resource_test.go +++ /dev/null @@ -1,1687 +0,0 @@ -package schema - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/terraform" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -func TestResourceApply_create(t *testing.T) { - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - called := false - r.Create = func(d *ResourceData, m interface{}) error { - called = true - d.SetId("foo") - return nil - } - - var s *terraform.InstanceState = nil - - d := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - New: "42", - }, - }, - } - - actual, err := r.Apply(s, d, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !called { - t.Fatal("not called") - } - - expected := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "foo": "42", - }, - Meta: map[string]interface{}{ - "schema_version": "2", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceApply_Timeout_state(t *testing.T) { - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - Timeouts: &ResourceTimeout{ - Create: DefaultTimeout(40 * time.Minute), - Update: DefaultTimeout(80 * time.Minute), - Delete: DefaultTimeout(40 * time.Minute), - }, - } - - called := false - r.Create = func(d *ResourceData, m interface{}) error { - called = true - d.SetId("foo") - return nil - } - - var s *terraform.InstanceState = nil - - d := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - New: "42", - }, - }, - } - - diffTimeout := &ResourceTimeout{ - Create: DefaultTimeout(40 * time.Minute), - Update: DefaultTimeout(80 * time.Minute), - Delete: DefaultTimeout(40 * time.Minute), - } - - if err := diffTimeout.DiffEncode(d); err != nil { - t.Fatalf("Error encoding timeout to diff: %s", err) - } - - actual, err := r.Apply(s, d, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !called { - t.Fatal("not called") - } - - expected := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "foo": "42", - }, - Meta: map[string]interface{}{ - "schema_version": "2", - TimeoutKey: expectedForValues(40, 0, 80, 40, 0), - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("Not equal in Timeout State:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) - } -} - -// Regression test to ensure that the meta data is read from state, if a -// resource is destroyed and the timeout meta is no longer available from the -// config -func TestResourceApply_Timeout_destroy(t *testing.T) { - timeouts := &ResourceTimeout{ - Create: DefaultTimeout(40 * time.Minute), - Update: DefaultTimeout(80 * time.Minute), - Delete: DefaultTimeout(40 * time.Minute), - } - - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - Timeouts: timeouts, - } - - called := false - var delTimeout time.Duration - r.Delete = func(d *ResourceData, m interface{}) error { - delTimeout = d.Timeout(TimeoutDelete) - called = true - return nil - } - - s := &terraform.InstanceState{ - ID: "bar", - } - - if err := timeouts.StateEncode(s); err != nil { - t.Fatalf("Error encoding to state: %s", err) - } - - d := &terraform.InstanceDiff{ - Destroy: true, - } - - actual, err := r.Apply(s, d, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !called { - t.Fatal("delete not called") - } - - if *timeouts.Delete != delTimeout { - t.Fatalf("timeouts don't match, expected (%#v), got (%#v)", timeouts.Delete, delTimeout) - } - - if actual != nil { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceDiff_Timeout_diff(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - Timeouts: &ResourceTimeout{ - Create: DefaultTimeout(40 * time.Minute), - Update: DefaultTimeout(80 * time.Minute), - Delete: DefaultTimeout(40 * time.Minute), - }, - } - - r.Create = func(d *ResourceData, m interface{}) error { - d.SetId("foo") - return nil - } - - conf := terraform.NewResourceConfigRaw( - map[string]interface{}{ - "foo": 42, - TimeoutsConfigKey: map[string]interface{}{ - "create": "2h", - }, - }, - ) - var s *terraform.InstanceState - - actual, err := r.Diff(s, conf, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - New: "42", - }, - }, - } - - diffTimeout := &ResourceTimeout{ - Create: DefaultTimeout(120 * time.Minute), - Update: DefaultTimeout(80 * time.Minute), - Delete: DefaultTimeout(40 * time.Minute), - } - - if err := diffTimeout.DiffEncode(expected); err != nil { - t.Fatalf("Error encoding timeout to diff: %s", err) - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("Not equal Meta in Timeout Diff:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) - } -} - -func TestResourceDiff_CustomizeFunc(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - var called bool - - r.CustomizeDiff = func(d *ResourceDiff, m interface{}) error { - called = true - return nil - } - - conf := terraform.NewResourceConfigRaw( - map[string]interface{}{ - "foo": 42, - }, - ) - - var s *terraform.InstanceState - - _, err := r.Diff(s, conf, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !called { - t.Fatalf("diff customization not called") - } -} - -func TestResourceApply_destroy(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - called := false - r.Delete = func(d *ResourceData, m interface{}) error { - called = true - return nil - } - - s := &terraform.InstanceState{ - ID: "bar", - } - - d := &terraform.InstanceDiff{ - Destroy: true, - } - - actual, err := r.Apply(s, d, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !called { - t.Fatal("delete not called") - } - - if actual != nil { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceApply_destroyCreate(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - - "tags": &Schema{ - Type: TypeMap, - Optional: true, - Computed: true, - }, - }, - } - - change := false - r.Create = func(d *ResourceData, m interface{}) error { - change = d.HasChange("tags") - d.SetId("foo") - return nil - } - r.Delete = func(d *ResourceData, m interface{}) error { - return nil - } - - var s *terraform.InstanceState = &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "foo": "bar", - "tags.Name": "foo", - }, - } - - d := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - New: "42", - RequiresNew: true, - }, - "tags.Name": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "foo", - RequiresNew: true, - }, - }, - } - - actual, err := r.Apply(s, d, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !change { - t.Fatal("should have change") - } - - expected := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "foo": "42", - "tags.%": "1", - "tags.Name": "foo", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceApply_destroyPartial(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - SchemaVersion: 3, - } - - r.Delete = func(d *ResourceData, m interface{}) error { - d.Set("foo", 42) - return fmt.Errorf("some error") - } - - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "foo": "12", - }, - } - - d := &terraform.InstanceDiff{ - Destroy: true, - } - - actual, err := r.Apply(s, d, nil) - if err == nil { - t.Fatal("should error") - } - - expected := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "foo": "42", - }, - Meta: map[string]interface{}{ - "schema_version": "3", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("expected:\n%#v\n\ngot:\n%#v", expected, actual) - } -} - -func TestResourceApply_update(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Update = func(d *ResourceData, m interface{}) error { - d.Set("foo", 42) - return nil - } - - s := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "foo": "12", - }, - } - - d := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - New: "13", - }, - }, - } - - actual, err := r.Apply(s, d, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "foo": "42", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceApply_updateNoCallback(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Update = nil - - s := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "foo": "12", - }, - } - - d := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - New: "13", - }, - }, - } - - actual, err := r.Apply(s, d, nil) - if err == nil { - t.Fatal("should error") - } - - expected := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "foo": "12", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceApply_isNewResource(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - } - - updateFunc := func(d *ResourceData, m interface{}) error { - d.Set("foo", "updated") - if d.IsNewResource() { - d.Set("foo", "new-resource") - } - return nil - } - r.Create = func(d *ResourceData, m interface{}) error { - d.SetId("foo") - d.Set("foo", "created") - return updateFunc(d, m) - } - r.Update = updateFunc - - d := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - New: "bla-blah", - }, - }, - } - - // positive test - var s *terraform.InstanceState = nil - - actual, err := r.Apply(s, d, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "foo": "new-resource", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("actual: %#v\nexpected: %#v", - actual, expected) - } - - // negative test - s = &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "foo": "new-resource", - }, - } - - actual, err = r.Apply(s, d, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected = &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "foo": "updated", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("actual: %#v\nexpected: %#v", - actual, expected) - } -} - -func TestResourceInternalValidate(t *testing.T) { - cases := []struct { - In *Resource - Writable bool - Err bool - }{ - 0: { - nil, - true, - true, - }, - - // No optional and no required - 1: { - &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - Required: true, - }, - }, - }, - true, - true, - }, - - // Update undefined for non-ForceNew field - 2: { - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "boo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - }, - true, - true, - }, - - // Update defined for ForceNew field - 3: { - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Update: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "goo": &Schema{ - Type: TypeInt, - Optional: true, - ForceNew: true, - }, - }, - }, - true, - true, - }, - - // non-writable doesn't need Update, Create or Delete - 4: { - &Resource{ - Schema: map[string]*Schema{ - "goo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - }, - false, - false, - }, - - // non-writable *must not* have Create - 5: { - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "goo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - }, - false, - true, - }, - - // writable must have Read - 6: { - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Update: func(d *ResourceData, meta interface{}) error { return nil }, - Delete: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "goo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - }, - true, - true, - }, - - // writable must have Delete - 7: { - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Read: func(d *ResourceData, meta interface{}) error { return nil }, - Update: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "goo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - }, - true, - true, - }, - - 8: { // Reserved name at root should be disallowed - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Read: func(d *ResourceData, meta interface{}) error { return nil }, - Update: func(d *ResourceData, meta interface{}) error { return nil }, - Delete: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "count": { - Type: TypeInt, - Optional: true, - }, - }, - }, - true, - true, - }, - - 9: { // Reserved name at nested levels should be allowed - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Read: func(d *ResourceData, meta interface{}) error { return nil }, - Update: func(d *ResourceData, meta interface{}) error { return nil }, - Delete: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "parent_list": &Schema{ - Type: TypeString, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "provisioner": { - Type: TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - true, - false, - }, - - 10: { // Provider reserved name should be allowed in resource - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Read: func(d *ResourceData, meta interface{}) error { return nil }, - Update: func(d *ResourceData, meta interface{}) error { return nil }, - Delete: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "alias": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - }, - true, - false, - }, - - 11: { // ID should be allowed in data source - &Resource{ - Read: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "id": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - }, - false, - false, - }, - - 12: { // Deprecated ID should be allowed in resource - &Resource{ - Create: func(d *ResourceData, meta interface{}) error { return nil }, - Read: func(d *ResourceData, meta interface{}) error { return nil }, - Update: func(d *ResourceData, meta interface{}) error { return nil }, - Delete: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "id": &Schema{ - Type: TypeString, - Optional: true, - Deprecated: "Use x_id instead", - }, - }, - }, - true, - false, - }, - - 13: { // non-writable must not define CustomizeDiff - &Resource{ - Read: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "goo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - CustomizeDiff: func(*ResourceDiff, interface{}) error { return nil }, - }, - false, - true, - }, - 14: { // Deprecated resource - &Resource{ - Read: func(d *ResourceData, meta interface{}) error { return nil }, - Schema: map[string]*Schema{ - "goo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - DeprecationMessage: "This resource has been deprecated.", - }, - true, - true, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - sm := schemaMap{} - if tc.In != nil { - sm = schemaMap(tc.In.Schema) - } - - err := tc.In.InternalValidate(sm, tc.Writable) - if err != nil && !tc.Err { - t.Fatalf("%d: expected validation to pass: %s", i, err) - } - if err == nil && tc.Err { - t.Fatalf("%d: expected validation to fail", i) - } - }) - } -} - -func TestResourceRefresh(t *testing.T) { - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Read = func(d *ResourceData, m interface{}) error { - if m != 42 { - return fmt.Errorf("meta not passed") - } - - return d.Set("foo", d.Get("foo").(int)+1) - } - - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "foo": "12", - }, - } - - expected := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "foo": "13", - }, - Meta: map[string]interface{}{ - "schema_version": "2", - }, - } - - actual, err := r.Refresh(s, 42) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceRefresh_blankId(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Read = func(d *ResourceData, m interface{}) error { - d.SetId("foo") - return nil - } - - s := &terraform.InstanceState{ - ID: "", - Attributes: map[string]string{}, - } - - actual, err := r.Refresh(s, 42) - if err != nil { - t.Fatalf("err: %s", err) - } - if actual != nil { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceRefresh_delete(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Read = func(d *ResourceData, m interface{}) error { - d.SetId("") - return nil - } - - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "foo": "12", - }, - } - - actual, err := r.Refresh(s, 42) - if err != nil { - t.Fatalf("err: %s", err) - } - - if actual != nil { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceRefresh_existsError(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Exists = func(*ResourceData, interface{}) (bool, error) { - return false, fmt.Errorf("error") - } - - r.Read = func(d *ResourceData, m interface{}) error { - panic("shouldn't be called") - } - - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "foo": "12", - }, - } - - actual, err := r.Refresh(s, 42) - if err == nil { - t.Fatalf("should error") - } - if !reflect.DeepEqual(actual, s) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceRefresh_noExists(t *testing.T) { - r := &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Exists = func(*ResourceData, interface{}) (bool, error) { - return false, nil - } - - r.Read = func(d *ResourceData, m interface{}) error { - panic("shouldn't be called") - } - - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "foo": "12", - }, - } - - actual, err := r.Refresh(s, 42) - if err != nil { - t.Fatalf("err: %s", err) - } - if actual != nil { - t.Fatalf("should have no state") - } -} - -func TestResourceRefresh_needsMigration(t *testing.T) { - // Schema v2 it deals only in newfoo, which tracks foo as an int - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "newfoo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Read = func(d *ResourceData, m interface{}) error { - return d.Set("newfoo", d.Get("newfoo").(int)+1) - } - - r.MigrateState = func( - v int, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // Real state migration functions will probably switch on this value, - // but we'll just assert on it for now. - if v != 1 { - t.Fatalf("Expected StateSchemaVersion to be 1, got %d", v) - } - - if meta != 42 { - t.Fatal("Expected meta to be passed through to the migration function") - } - - oldfoo, err := strconv.ParseFloat(s.Attributes["oldfoo"], 64) - if err != nil { - t.Fatalf("err: %#v", err) - } - s.Attributes["newfoo"] = strconv.Itoa(int(oldfoo * 10)) - delete(s.Attributes, "oldfoo") - - return s, nil - } - - // State is v1 and deals in oldfoo, which tracked foo as a float at 1/10th - // the scale of newfoo - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "oldfoo": "1.2", - }, - Meta: map[string]interface{}{ - "schema_version": "1", - }, - } - - actual, err := r.Refresh(s, 42) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "newfoo": "13", - }, - Meta: map[string]interface{}{ - "schema_version": "2", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) - } -} - -func TestResourceRefresh_noMigrationNeeded(t *testing.T) { - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "newfoo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Read = func(d *ResourceData, m interface{}) error { - return d.Set("newfoo", d.Get("newfoo").(int)+1) - } - - r.MigrateState = func( - v int, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - t.Fatal("Migrate function shouldn't be called!") - return nil, nil - } - - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "newfoo": "12", - }, - Meta: map[string]interface{}{ - "schema_version": "2", - }, - } - - actual, err := r.Refresh(s, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "newfoo": "13", - }, - Meta: map[string]interface{}{ - "schema_version": "2", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) - } -} - -func TestResourceRefresh_stateSchemaVersionUnset(t *testing.T) { - r := &Resource{ - // Version 1 > Version 0 - SchemaVersion: 1, - Schema: map[string]*Schema{ - "newfoo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Read = func(d *ResourceData, m interface{}) error { - return d.Set("newfoo", d.Get("newfoo").(int)+1) - } - - r.MigrateState = func( - v int, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - s.Attributes["newfoo"] = s.Attributes["oldfoo"] - return s, nil - } - - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "oldfoo": "12", - }, - } - - actual, err := r.Refresh(s, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "newfoo": "13", - }, - Meta: map[string]interface{}{ - "schema_version": "1", - }, - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) - } -} - -func TestResourceRefresh_migrateStateErr(t *testing.T) { - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "newfoo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.Read = func(d *ResourceData, m interface{}) error { - t.Fatal("Read should never be called!") - return nil - } - - r.MigrateState = func( - v int, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - return s, fmt.Errorf("triggering an error") - } - - s := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "oldfoo": "12", - }, - } - - _, err := r.Refresh(s, nil) - if err == nil { - t.Fatal("expected error, but got none!") - } -} - -func TestResourceData(t *testing.T) { - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - state := &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "foo": "42", - }, - } - - data := r.Data(state) - if data.Id() != "foo" { - t.Fatalf("err: %s", data.Id()) - } - if v := data.Get("foo"); v != 42 { - t.Fatalf("bad: %#v", v) - } - - // Set expectations - state.Meta = map[string]interface{}{ - "schema_version": "2", - } - - result := data.State() - if !reflect.DeepEqual(result, state) { - t.Fatalf("bad: %#v", result) - } -} - -func TestResourceData_blank(t *testing.T) { - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - data := r.Data(nil) - if data.Id() != "" { - t.Fatalf("err: %s", data.Id()) - } - if v := data.Get("foo"); v != 0 { - t.Fatalf("bad: %#v", v) - } -} - -func TestResourceData_timeouts(t *testing.T) { - one := 1 * time.Second - two := 2 * time.Second - three := 3 * time.Second - four := 4 * time.Second - five := 5 * time.Second - - timeouts := &ResourceTimeout{ - Create: &one, - Read: &two, - Update: &three, - Delete: &four, - Default: &five, - } - - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - Timeouts: timeouts, - } - - data := r.Data(nil) - if data.Id() != "" { - t.Fatalf("err: %s", data.Id()) - } - - if !reflect.DeepEqual(timeouts, data.timeouts) { - t.Fatalf("incorrect ResourceData timeouts: %#v\n", *data.timeouts) - } -} - -func TestResource_UpgradeState(t *testing.T) { - // While this really only calls itself and therefore doesn't test any of - // the Resource code directly, it still serves as an example of registering - // a StateUpgrader. - r := &Resource{ - SchemaVersion: 2, - Schema: map[string]*Schema{ - "newfoo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - r.StateUpgraders = []StateUpgrader{ - { - Version: 1, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - "oldfoo": cty.Number, - }), - Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - - oldfoo, ok := m["oldfoo"].(float64) - if !ok { - t.Fatalf("expected 1.2, got %#v", m["oldfoo"]) - } - m["newfoo"] = int(oldfoo * 10) - delete(m, "oldfoo") - - return m, nil - }, - }, - } - - oldStateAttrs := map[string]string{ - "id": "bar", - "oldfoo": "1.2", - } - - // convert the legacy flatmap state to the json equivalent - ty := r.StateUpgraders[0].Type - val, err := hcl2shim.HCL2ValueFromFlatmap(oldStateAttrs, ty) - if err != nil { - t.Fatal(err) - } - js, err := ctyjson.Marshal(val, ty) - if err != nil { - t.Fatal(err) - } - - // unmarshal the state using the json default types - var m map[string]interface{} - if err := json.Unmarshal(js, &m); err != nil { - t.Fatal(err) - } - - actual, err := r.StateUpgraders[0].Upgrade(m, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := map[string]interface{}{ - "id": "bar", - "newfoo": 12, - } - - if !reflect.DeepEqual(expected, actual) { - t.Fatalf("expected: %#v\ngot: %#v\n", expected, actual) - } -} - -func TestResource_ValidateUpgradeState(t *testing.T) { - r := &Resource{ - SchemaVersion: 3, - Schema: map[string]*Schema{ - "newfoo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - } - - if err := r.InternalValidate(nil, true); err != nil { - t.Fatal(err) - } - - r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ - Version: 2, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - }), - Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { - return m, nil - }, - }) - if err := r.InternalValidate(nil, true); err != nil { - t.Fatal(err) - } - - // check for missing type - r.StateUpgraders[0].Type = cty.Type{} - if err := r.InternalValidate(nil, true); err == nil { - t.Fatal("StateUpgrader must have type") - } - r.StateUpgraders[0].Type = cty.Object(map[string]cty.Type{ - "id": cty.String, - }) - - // check for missing Upgrade func - r.StateUpgraders[0].Upgrade = nil - if err := r.InternalValidate(nil, true); err == nil { - t.Fatal("StateUpgrader must have an Upgrade func") - } - r.StateUpgraders[0].Upgrade = func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { - return m, nil - } - - // check for skipped version - r.StateUpgraders[0].Version = 0 - r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ - Version: 2, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - }), - Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { - return m, nil - }, - }) - if err := r.InternalValidate(nil, true); err == nil { - t.Fatal("StateUpgraders cannot skip versions") - } - - // add the missing version, but fail because it's still out of order - r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ - Version: 1, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - }), - Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { - return m, nil - }, - }) - if err := r.InternalValidate(nil, true); err == nil { - t.Fatal("upgraders must be defined in order") - } - - r.StateUpgraders[1], r.StateUpgraders[2] = r.StateUpgraders[2], r.StateUpgraders[1] - if err := r.InternalValidate(nil, true); err != nil { - t.Fatal(err) - } - - // can't add an upgrader for a schema >= the current version - r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ - Version: 3, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - }), - Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { - return m, nil - }, - }) - if err := r.InternalValidate(nil, true); err == nil { - t.Fatal("StateUpgraders cannot have a version >= current SchemaVersion") - } -} - -// The legacy provider will need to be able to handle both types of schema -// transformations, which has been retrofitted into the Refresh method. -func TestResource_migrateAndUpgrade(t *testing.T) { - r := &Resource{ - SchemaVersion: 4, - Schema: map[string]*Schema{ - "four": { - Type: TypeInt, - Required: true, - }, - }, - // this MigrateState will take the state to version 2 - MigrateState: func(v int, is *terraform.InstanceState, _ interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - _, ok := is.Attributes["zero"] - if !ok { - return nil, fmt.Errorf("zero not found in %#v", is.Attributes) - } - is.Attributes["one"] = "1" - delete(is.Attributes, "zero") - fallthrough - case 1: - _, ok := is.Attributes["one"] - if !ok { - return nil, fmt.Errorf("one not found in %#v", is.Attributes) - } - is.Attributes["two"] = "2" - delete(is.Attributes, "one") - default: - return nil, fmt.Errorf("invalid schema version %d", v) - } - return is, nil - }, - } - - r.Read = func(d *ResourceData, m interface{}) error { - return d.Set("four", 4) - } - - r.StateUpgraders = []StateUpgrader{ - { - Version: 2, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - "two": cty.Number, - }), - Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - _, ok := m["two"].(float64) - if !ok { - return nil, fmt.Errorf("two not found in %#v", m) - } - m["three"] = float64(3) - delete(m, "two") - return m, nil - }, - }, - { - Version: 3, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - "three": cty.Number, - }), - Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - _, ok := m["three"].(float64) - if !ok { - return nil, fmt.Errorf("three not found in %#v", m) - } - m["four"] = float64(4) - delete(m, "three") - return m, nil - }, - }, - } - - testStates := []*terraform.InstanceState{ - { - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "zero": "0", - }, - Meta: map[string]interface{}{ - "schema_version": "0", - }, - }, - { - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "one": "1", - }, - Meta: map[string]interface{}{ - "schema_version": "1", - }, - }, - { - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "two": "2", - }, - Meta: map[string]interface{}{ - "schema_version": "2", - }, - }, - { - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "three": "3", - }, - Meta: map[string]interface{}{ - "schema_version": "3", - }, - }, - { - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "four": "4", - }, - Meta: map[string]interface{}{ - "schema_version": "4", - }, - }, - } - - for i, s := range testStates { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - newState, err := r.Refresh(s, nil) - if err != nil { - t.Fatal(err) - } - - expected := &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "four": "4", - }, - Meta: map[string]interface{}{ - "schema_version": "4", - }, - } - - if !cmp.Equal(expected, newState, equateEmpty) { - t.Fatal(cmp.Diff(expected, newState, equateEmpty)) - } - }) - } -} diff --git a/internal/legacy/helper/schema/schema.go b/internal/legacy/helper/schema/schema.go deleted file mode 100644 index 99657c466071..000000000000 --- a/internal/legacy/helper/schema/schema.go +++ /dev/null @@ -1,1854 +0,0 @@ -// schema is a high-level framework for easily writing new providers -// for Terraform. Usage of schema is recommended over attempting to write -// to the low-level plugin interfaces manually. -// -// schema breaks down provider creation into simple CRUD operations for -// resources. The logic of diffing, destroying before creating, updating -// or creating, etc. is all handled by the framework. The plugin author -// only needs to implement a configuration schema and the CRUD operations and -// everything else is meant to just work. -// -// A good starting point is to view the Provider structure. -package schema - -import ( - "context" - "fmt" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/terraform" - "github.com/mitchellh/copystructure" - "github.com/mitchellh/mapstructure" -) - -// Name of ENV variable which (if not empty) prefers panic over error -const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" - -// type used for schema package context keys -type contextKey string - -var ( - protoVersionMu sync.Mutex - protoVersion5 = false -) - -func isProto5() bool { - protoVersionMu.Lock() - defer protoVersionMu.Unlock() - return protoVersion5 - -} - -// SetProto5 enables a feature flag for any internal changes required required -// to work with the new plugin protocol. This should not be called by -// provider. -func SetProto5() { - protoVersionMu.Lock() - defer protoVersionMu.Unlock() - protoVersion5 = true -} - -// Schema is used to describe the structure of a value. -// -// Read the documentation of the struct elements for important details. -type Schema struct { - // Type is the type of the value and must be one of the ValueType values. - // - // This type not only determines what type is expected/valid in configuring - // this value, but also what type is returned when ResourceData.Get is - // called. The types returned by Get are: - // - // TypeBool - bool - // TypeInt - int - // TypeFloat - float64 - // TypeString - string - // TypeList - []interface{} - // TypeMap - map[string]interface{} - // TypeSet - *schema.Set - // - Type ValueType - - // ConfigMode allows for overriding the default behaviors for mapping - // schema entries onto configuration constructs. - // - // By default, the Elem field is used to choose whether a particular - // schema is represented in configuration as an attribute or as a nested - // block; if Elem is a *schema.Resource then it's a block and it's an - // attribute otherwise. - // - // If Elem is *schema.Resource then setting ConfigMode to - // SchemaConfigModeAttr will force it to be represented in configuration - // as an attribute, which means that the Computed flag can be used to - // provide default elements when the argument isn't set at all, while still - // allowing the user to force zero elements by explicitly assigning an - // empty list. - // - // When Computed is set without Optional, the attribute is not settable - // in configuration at all and so SchemaConfigModeAttr is the automatic - // behavior, and SchemaConfigModeBlock is not permitted. - ConfigMode SchemaConfigMode - - // If one of these is set, then this item can come from the configuration. - // Both cannot be set. If Optional is set, the value is optional. If - // Required is set, the value is required. - // - // One of these must be set if the value is not computed. That is: - // value either comes from the config, is computed, or is both. - Optional bool - Required bool - - // If this is non-nil, the provided function will be used during diff - // of this field. If this is nil, a default diff for the type of the - // schema will be used. - // - // This allows comparison based on something other than primitive, list - // or map equality - for example SSH public keys may be considered - // equivalent regardless of trailing whitespace. - DiffSuppressFunc SchemaDiffSuppressFunc - - // If this is non-nil, then this will be a default value that is used - // when this item is not set in the configuration. - // - // DefaultFunc can be specified to compute a dynamic default. - // Only one of Default or DefaultFunc can be set. If DefaultFunc is - // used then its return value should be stable to avoid generating - // confusing/perpetual diffs. - // - // Changing either Default or the return value of DefaultFunc can be - // a breaking change, especially if the attribute in question has - // ForceNew set. If a default needs to change to align with changing - // assumptions in an upstream API then it may be necessary to also use - // the MigrateState function on the resource to change the state to match, - // or have the Read function adjust the state value to align with the - // new default. - // - // If Required is true above, then Default cannot be set. DefaultFunc - // can be set with Required. If the DefaultFunc returns nil, then there - // will be no default and the user will be asked to fill it in. - // - // If either of these is set, then the user won't be asked for input - // for this key if the default is not nil. - Default interface{} - DefaultFunc SchemaDefaultFunc - - // Description is used as the description for docs or asking for user - // input. It should be relatively short (a few sentences max) and should - // be formatted to fit a CLI. - Description string - - // InputDefault is the default value to use for when inputs are requested. - // This differs from Default in that if Default is set, no input is - // asked for. If Input is asked, this will be the default value offered. - InputDefault string - - // The fields below relate to diffs. - // - // If Computed is true, then the result of this value is computed - // (unless specified by config) on creation. - // - // If ForceNew is true, then a change in this resource necessitates - // the creation of a new resource. - // - // StateFunc is a function called to change the value of this before - // storing it in the state (and likewise before comparing for diffs). - // The use for this is for example with large strings, you may want - // to simply store the hash of it. - Computed bool - ForceNew bool - StateFunc SchemaStateFunc - - // The following fields are only set for a TypeList, TypeSet, or TypeMap. - // - // Elem represents the element type. For a TypeMap, it must be a *Schema - // with a Type that is one of the primitives: TypeString, TypeBool, - // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a - // *Resource. If it is *Schema, the element type is just a simple value. - // If it is *Resource, the element type is a complex structure, - // potentially managed via its own CRUD actions on the API. - Elem interface{} - - // The following fields are only set for a TypeList or TypeSet. - // - // MaxItems defines a maximum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however more than one instance would - // cause instability. - // - // MinItems defines a minimum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however less than one instance would - // cause instability. - // - // If the field Optional is set to true then MinItems is ignored and thus - // effectively zero. - MaxItems int - MinItems int - - // PromoteSingle originally allowed for a single element to be assigned - // where a primitive list was expected, but this no longer works from - // Terraform v0.12 onwards (Terraform Core will require a list to be set - // regardless of what this is set to) and so only applies to Terraform v0.11 - // and earlier, and so should be used only to retain this functionality - // for those still using v0.11 with a provider that formerly used this. - PromoteSingle bool - - // The following fields are only valid for a TypeSet type. - // - // Set defines a function to determine the unique ID of an item so that - // a proper set can be built. - Set SchemaSetFunc - - // ComputedWhen is a set of queries on the configuration. Whenever any - // of these things is changed, it will require a recompute (this requires - // that Computed is set to true). - // - // NOTE: This currently does not work. - ComputedWhen []string - - // ConflictsWith is a set of schema keys that conflict with this schema. - // This will only check that they're set in the _config_. This will not - // raise an error for a malfunctioning resource that sets a conflicting - // key. - ConflictsWith []string - - // When Deprecated is set, this attribute is deprecated. - // - // A deprecated field still works, but will probably stop working in near - // future. This string is the message shown to the user with instructions on - // how to address the deprecation. - Deprecated string - - // When Removed is set, this attribute has been removed from the schema - // - // Removed attributes can be left in the Schema to generate informative error - // messages for the user when they show up in resource configurations. - // This string is the message shown to the user with instructions on - // what do to about the removed attribute. - Removed string - - // ValidateFunc allows individual fields to define arbitrary validation - // logic. It is yielded the provided config value as an interface{} that is - // guaranteed to be of the proper Schema type, and it can yield warnings or - // errors based on inspection of that value. - // - // ValidateFunc is honored only when the schema's Type is set to TypeInt, - // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. - ValidateFunc SchemaValidateFunc - - // Sensitive ensures that the attribute's value does not get displayed in - // logs or regular output. It should be used for passwords or other - // secret fields. Future versions of Terraform may encrypt these - // values. - Sensitive bool -} - -// SchemaConfigMode is used to influence how a schema item is mapped into a -// corresponding configuration construct, using the ConfigMode field of -// Schema. -type SchemaConfigMode int - -const ( - SchemaConfigModeAuto SchemaConfigMode = iota - SchemaConfigModeAttr - SchemaConfigModeBlock -) - -// SchemaDiffSuppressFunc is a function which can be used to determine -// whether a detected diff on a schema element is "valid" or not, and -// suppress it from the plan if necessary. -// -// Return true if the diff should be suppressed, false to retain it. -type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool - -// SchemaDefaultFunc is a function called to return a default value for -// a field. -type SchemaDefaultFunc func() (interface{}, error) - -// EnvDefaultFunc is a helper function that returns the value of the -// given environment variable, if one exists, or the default value -// otherwise. -func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { - return func() (interface{}, error) { - if v := os.Getenv(k); v != "" { - return v, nil - } - - return dv, nil - } -} - -// MultiEnvDefaultFunc is a helper function that returns the value of the first -// environment variable in the given list that returns a non-empty value. If -// none of the environment variables return a value, the default value is -// returned. -func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { - return func() (interface{}, error) { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v, nil - } - } - return dv, nil - } -} - -// SchemaSetFunc is a function that must return a unique ID for the given -// element. This unique ID is used to store the element in a hash. -type SchemaSetFunc func(interface{}) int - -// SchemaStateFunc is a function used to convert some type to a string -// to be stored in the state. -type SchemaStateFunc func(interface{}) string - -// SchemaValidateFunc is a function used to validate a single field in the -// schema. -type SchemaValidateFunc func(interface{}, string) ([]string, []error) - -func (s *Schema) GoString() string { - return fmt.Sprintf("*%#v", *s) -} - -// Returns a default value for this schema by either reading Default or -// evaluating DefaultFunc. If neither of these are defined, returns nil. -func (s *Schema) DefaultValue() (interface{}, error) { - if s.Default != nil { - return s.Default, nil - } - - if s.DefaultFunc != nil { - defaultValue, err := s.DefaultFunc() - if err != nil { - return nil, fmt.Errorf("error loading default: %s", err) - } - return defaultValue, nil - } - - return nil, nil -} - -// Returns a zero value for the schema. -func (s *Schema) ZeroValue() interface{} { - // If it's a set then we'll do a bit of extra work to provide the - // right hashing function in our empty value. - if s.Type == TypeSet { - setFunc := s.Set - if setFunc == nil { - // Default set function uses the schema to hash the whole value - elem := s.Elem - switch t := elem.(type) { - case *Schema: - setFunc = HashSchema(t) - case *Resource: - setFunc = HashResource(t) - default: - panic("invalid set element type") - } - } - return &Set{F: setFunc} - } else { - return s.Type.Zero() - } -} - -func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { - if d == nil { - return d - } - - if s.Type == TypeBool { - normalizeBoolString := func(s string) string { - switch s { - case "0": - return "false" - case "1": - return "true" - } - return s - } - d.Old = normalizeBoolString(d.Old) - d.New = normalizeBoolString(d.New) - } - - if s.Computed && !d.NewRemoved && d.New == "" { - // Computed attribute without a new value set - d.NewComputed = true - } - - if s.ForceNew { - // ForceNew, mark that this field is requiring new under the - // following conditions, explained below: - // - // * Old != New - There is a change in value. This field - // is therefore causing a new resource. - // - // * NewComputed - This field is being computed, hence a - // potential change in value, mark as causing a new resource. - d.RequiresNew = d.Old != d.New || d.NewComputed - } - - if d.NewRemoved { - return d - } - - if s.Computed { - // FIXME: This is where the customized bool from getChange finally - // comes into play. It allows the previously incorrect behavior - // of an empty string being used as "unset" when the value is - // computed. This should be removed once we can properly - // represent an unset/nil value from the configuration. - if !customized { - if d.Old != "" && d.New == "" { - // This is a computed value with an old value set already, - // just let it go. - return nil - } - } - - if d.New == "" && !d.NewComputed { - // Computed attribute without a new value set - d.NewComputed = true - } - } - - if s.Sensitive { - // Set the Sensitive flag so output is hidden in the UI - d.Sensitive = true - } - - return d -} - -// InternalMap is used to aid in the transition to the new schema types and -// protocol. The name is not meant to convey any usefulness, as this is not to -// be used directly by any providers. -type InternalMap = schemaMap - -// schemaMap is a wrapper that adds nice functions on top of schemas. -type schemaMap map[string]*Schema - -func (m schemaMap) panicOnError() bool { - if os.Getenv(PanicOnErr) != "" { - return true - } - return false -} - -// Data returns a ResourceData for the given schema, state, and diff. -// -// The diff is optional. -func (m schemaMap) Data( - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*ResourceData, error) { - return &ResourceData{ - schema: m, - state: s, - diff: d, - panicOnError: m.panicOnError(), - }, nil -} - -// DeepCopy returns a copy of this schemaMap. The copy can be safely modified -// without affecting the original. -func (m *schemaMap) DeepCopy() schemaMap { - copy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - return *copy.(*schemaMap) -} - -// Diff returns the diff for a resource given the schema map, -// state, and configuration. -func (m schemaMap) Diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - customizeDiff CustomizeDiffFunc, - meta interface{}, - handleRequiresNew bool) (*terraform.InstanceDiff, error) { - result := new(terraform.InstanceDiff) - result.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Make sure to mark if the resource is tainted - if s != nil { - result.DestroyTainted = s.Tainted - } - - d := &ResourceData{ - schema: m, - state: s, - config: c, - panicOnError: m.panicOnError(), - } - - for k, schema := range m { - err := m.diff(k, schema, result, d, false) - if err != nil { - return nil, err - } - } - - // Remove any nil diffs just to keep things clean - for k, v := range result.Attributes { - if v == nil { - delete(result.Attributes, k) - } - } - - // If this is a non-destroy diff, call any custom diff logic that has been - // defined. - if !result.DestroyTainted && customizeDiff != nil { - mc := m.DeepCopy() - rd := newResourceDiff(mc, c, s, result) - if err := customizeDiff(rd, meta); err != nil { - return nil, err - } - for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result, rd, false) - if err != nil { - return nil, err - } - } - } - - if handleRequiresNew { - // If the diff requires a new resource, then we recompute the diff - // so we have the complete new resource diff, and preserve the - // RequiresNew fields where necessary so the user knows exactly what - // caused that. - if result.RequiresNew() { - // Create the new diff - result2 := new(terraform.InstanceDiff) - result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Preserve the DestroyTainted flag - result2.DestroyTainted = result.DestroyTainted - - // Reset the data to not contain state. We have to call init() - // again in order to reset the FieldReaders. - d.state = nil - d.init() - - // Perform the diff again - for k, schema := range m { - err := m.diff(k, schema, result2, d, false) - if err != nil { - return nil, err - } - } - - // Re-run customization - if !result2.DestroyTainted && customizeDiff != nil { - mc := m.DeepCopy() - rd := newResourceDiff(mc, c, d.state, result2) - if err := customizeDiff(rd, meta); err != nil { - return nil, err - } - for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result2, rd, false) - if err != nil { - return nil, err - } - } - } - - // Force all the fields to not force a new since we know what we - // want to force new. - for k, attr := range result2.Attributes { - if attr == nil { - continue - } - - if attr.RequiresNew { - attr.RequiresNew = false - } - - if s != nil { - attr.Old = s.Attributes[k] - } - } - - // Now copy in all the requires new diffs... - for k, attr := range result.Attributes { - if attr == nil { - continue - } - - newAttr, ok := result2.Attributes[k] - if !ok { - newAttr = attr - } - - if attr.RequiresNew { - newAttr.RequiresNew = true - } - - result2.Attributes[k] = newAttr - } - - // And set the diff! - result = result2 - } - - } - - // Go through and detect all of the ComputedWhens now that we've - // finished the diff. - // TODO - - if result.Empty() { - // If we don't have any diff elements, just return nil - return nil, nil - } - - return result, nil -} - -// Input implements the terraform.ResourceProvider method by asking -// for input for required configuration keys that don't have a value. -func (m schemaMap) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - keys := make([]string, 0, len(m)) - for k, _ := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := m[k] - - // Skip things that don't require config, if that is even valid - // for a provider schema. - // Required XOR Optional must always be true to validate, so we only - // need to check one. - if v.Optional { - continue - } - - // Deprecated fields should never prompt - if v.Deprecated != "" { - continue - } - - // Skip things that have a value of some sort already - if _, ok := c.Raw[k]; ok { - continue - } - - // Skip if it has a default value - defaultValue, err := v.DefaultValue() - if err != nil { - return nil, fmt.Errorf("%s: error loading default: %s", k, err) - } - if defaultValue != nil { - continue - } - - var value interface{} - switch v.Type { - case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: - continue - case TypeString: - value, err = m.inputString(input, k, v) - default: - panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) - } - - if err != nil { - return nil, fmt.Errorf( - "%s: %s", k, err) - } - - c.Config[k] = value - } - - return c, nil -} - -// Validate validates the configuration against this schema mapping. -func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { - return m.validateObject("", m, c) -} - -// InternalValidate validates the format of this schema. This should be called -// from a unit test (and not in user-path code) to verify that a schema -// is properly built. -func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { - return m.internalValidate(topSchemaMap, false) -} - -func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { - if topSchemaMap == nil { - topSchemaMap = m - } - for k, v := range m { - if v.Type == TypeInvalid { - return fmt.Errorf("%s: Type must be specified", k) - } - - if v.Optional && v.Required { - return fmt.Errorf("%s: Optional or Required must be set, not both", k) - } - - if v.Required && v.Computed { - return fmt.Errorf("%s: Cannot be both Required and Computed", k) - } - - if !v.Required && !v.Optional && !v.Computed { - return fmt.Errorf("%s: One of optional, required, or computed must be set", k) - } - - computedOnly := v.Computed && !v.Optional - - switch v.ConfigMode { - case SchemaConfigModeBlock: - if _, ok := v.Elem.(*Resource); !ok { - return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) - } - if attrsOnly { - return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) - } - if computedOnly { - return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) - } - case SchemaConfigModeAttr: - // anything goes - case SchemaConfigModeAuto: - // Since "Auto" for Elem: *Resource would create a nested block, - // and that's impossible inside an attribute, we require it to be - // explicitly overridden as mode "Attr" for clarity. - if _, ok := v.Elem.(*Resource); ok { - if attrsOnly { - return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) - } - } - default: - return fmt.Errorf("%s: invalid ConfigMode value", k) - } - - if v.Computed && v.Default != nil { - return fmt.Errorf("%s: Default must be nil if computed", k) - } - - if v.Required && v.Default != nil { - return fmt.Errorf("%s: Default cannot be set with Required", k) - } - - if len(v.ComputedWhen) > 0 && !v.Computed { - return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) - } - - if len(v.ConflictsWith) > 0 && v.Required { - return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) - } - - if len(v.ConflictsWith) > 0 { - for _, key := range v.ConflictsWith { - parts := strings.Split(key, ".") - sm := topSchemaMap - var target *Schema - for _, part := range parts { - // Skip index fields - if _, err := strconv.Atoi(part); err == nil { - continue - } - - var ok bool - if target, ok = sm[part]; !ok { - return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s) at part (%s)", k, key, part) - } - - if subResource, ok := target.Elem.(*Resource); ok { - sm = schemaMap(subResource.Schema) - } - } - if target == nil { - return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm) - } - if target.Required { - return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key) - } - - if len(target.ComputedWhen) > 0 { - return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key) - } - } - } - - if v.Type == TypeList || v.Type == TypeSet { - if v.Elem == nil { - return fmt.Errorf("%s: Elem must be set for lists", k) - } - - if v.Default != nil { - return fmt.Errorf("%s: Default is not valid for lists or sets", k) - } - - if v.Type != TypeSet && v.Set != nil { - return fmt.Errorf("%s: Set can only be set for TypeSet", k) - } - - switch t := v.Elem.(type) { - case *Resource: - attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr - - if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { - return err - } - case *Schema: - bad := t.Computed || t.Optional || t.Required - if bad { - return fmt.Errorf( - "%s: Elem must have only Type set", k) - } - } - } else { - if v.MaxItems > 0 || v.MinItems > 0 { - return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) - } - } - - // Computed-only field - if v.Computed && !v.Optional { - if v.ValidateFunc != nil { - return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ - "there's nothing to validate on computed-only field", k) - } - if v.DiffSuppressFunc != nil { - return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ - " between config and state representation. "+ - "There is no config for computed-only field, nothing to compare.", k) - } - } - - if v.ValidateFunc != nil { - switch v.Type { - case TypeList, TypeSet: - return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) - } - } - - if v.Deprecated == "" && v.Removed == "" { - if !isValidFieldName(k) { - return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) - } - } - } - - return nil -} - -func isValidFieldName(name string) bool { - re := regexp.MustCompile("^[a-z0-9_]+$") - return re.MatchString(name) -} - -// resourceDiffer is an interface that is used by the private diff functions. -// This helps facilitate diff logic for both ResourceData and ResoureDiff with -// minimal divergence in code. -type resourceDiffer interface { - diffChange(string) (interface{}, interface{}, bool, bool, bool) - Get(string) interface{} - GetChange(string) (interface{}, interface{}) - GetOk(string) (interface{}, bool) - HasChange(string) bool - Id() string -} - -func (m schemaMap) diff( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - - unsupressedDiff := new(terraform.InstanceDiff) - unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - var err error - switch schema.Type { - case TypeBool, TypeInt, TypeFloat, TypeString: - err = m.diffString(k, schema, unsupressedDiff, d, all) - case TypeList: - err = m.diffList(k, schema, unsupressedDiff, d, all) - case TypeMap: - err = m.diffMap(k, schema, unsupressedDiff, d, all) - case TypeSet: - err = m.diffSet(k, schema, unsupressedDiff, d, all) - default: - err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) - } - - for attrK, attrV := range unsupressedDiff.Attributes { - switch rd := d.(type) { - case *ResourceData: - if schema.DiffSuppressFunc != nil && attrV != nil && - schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { - // If this attr diff is suppressed, we may still need it in the - // overall diff if it's contained within a set. Rather than - // dropping the diff, make it a NOOP. - if !all { - continue - } - - attrV = &terraform.ResourceAttrDiff{ - Old: attrV.Old, - New: attrV.Old, - } - } - } - diff.Attributes[attrK] = attrV - } - - return err -} - -func (m schemaMap) diffList( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - o, n, _, computedList, customized := d.diffChange(k) - if computedList { - n = nil - } - nSet := n != nil - - // If we have an old value and no new value is set or will be - // computed once all variables can be interpolated and we're - // computed, then nothing has changed. - if o != nil && n == nil && !computedList && schema.Computed { - return nil - } - - if o == nil { - o = []interface{}{} - } - if n == nil { - n = []interface{}{} - } - if s, ok := o.(*Set); ok { - o = s.List() - } - if s, ok := n.(*Set); ok { - n = s.List() - } - os := o.([]interface{}) - vs := n.([]interface{}) - - // If the new value was set, and the two are equal, then we're done. - // We have to do this check here because sets might be NOT - // reflect.DeepEqual so we need to wait until we get the []interface{} - if !all && nSet && reflect.DeepEqual(os, vs) { - return nil - } - - // Get the counts - oldLen := len(os) - newLen := len(vs) - oldStr := strconv.FormatInt(int64(oldLen), 10) - - // If the whole list is computed, then say that the # is computed - if computedList { - diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ - Old: oldStr, - NewComputed: true, - RequiresNew: schema.ForceNew, - } - return nil - } - - // If the counts are not the same, then record that diff - changed := oldLen != newLen - computed := oldLen == 0 && newLen == 0 && schema.Computed - if changed || computed || all { - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed, - ForceNew: schema.ForceNew, - } - - newStr := "" - if !computed { - newStr = strconv.FormatInt(int64(newLen), 10) - } else { - oldStr = "" - } - - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // Figure out the maximum - maxLen := oldLen - if newLen > maxLen { - maxLen = newLen - } - - switch t := schema.Elem.(type) { - case *Resource: - // This is a complex resource - for i := 0; i < maxLen; i++ { - for k2, schema := range t.Schema { - subK := fmt.Sprintf("%s.%d.%s", k, i, k2) - err := m.diff(subK, schema, diff, d, all) - if err != nil { - return err - } - } - } - case *Schema: - // Copy the schema so that we can set Computed/ForceNew from - // the parent schema (the TypeList). - t2 := *t - t2.ForceNew = schema.ForceNew - - // This is just a primitive element, so go through each and - // just diff each. - for i := 0; i < maxLen; i++ { - subK := fmt.Sprintf("%s.%d", k, i) - err := m.diff(subK, &t2, diff, d, all) - if err != nil { - return err - } - } - default: - return fmt.Errorf("%s: unknown element type (internal)", k) - } - - return nil -} - -func (m schemaMap) diffMap( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - prefix := k + "." - - // First get all the values from the state - var stateMap, configMap map[string]string - o, n, _, nComputed, customized := d.diffChange(k) - if err := mapstructure.WeakDecode(o, &stateMap); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - if err := mapstructure.WeakDecode(n, &configMap); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - // Keep track of whether the state _exists_ at all prior to clearing it - stateExists := o != nil - - // Delete any count values, since we don't use those - delete(configMap, "%") - delete(stateMap, "%") - - // Check if the number of elements has changed. - oldLen, newLen := len(stateMap), len(configMap) - changed := oldLen != newLen - if oldLen != 0 && newLen == 0 && schema.Computed { - changed = false - } - - // It is computed if we have no old value, no new value, the schema - // says it is computed, and it didn't exist in the state before. The - // last point means: if it existed in the state, even empty, then it - // has already been computed. - computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists - - // If the count has changed or we're computed, then add a diff for the - // count. "nComputed" means that the new value _contains_ a value that - // is computed. We don't do granular diffs for this yet, so we mark the - // whole map as computed. - if changed || computed || nComputed { - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed || nComputed, - ForceNew: schema.ForceNew, - } - - oldStr := strconv.FormatInt(int64(oldLen), 10) - newStr := "" - if !computed && !nComputed { - newStr = strconv.FormatInt(int64(newLen), 10) - } else { - oldStr = "" - } - - diff.Attributes[k+".%"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // If the new map is nil and we're computed, then ignore it. - if n == nil && schema.Computed { - return nil - } - - // Now we compare, preferring values from the config map - for k, v := range configMap { - old, ok := stateMap[k] - delete(stateMap, k) - - if old == v && ok && !all { - continue - } - - diff.Attributes[prefix+k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: old, - New: v, - }, - customized, - ) - } - for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: v, - NewRemoved: true, - }, - customized, - ) - } - - return nil -} - -func (m schemaMap) diffSet( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - - o, n, _, computedSet, customized := d.diffChange(k) - if computedSet { - n = nil - } - nSet := n != nil - - // If we have an old value and no new value is set or will be - // computed once all variables can be interpolated and we're - // computed, then nothing has changed. - if o != nil && n == nil && !computedSet && schema.Computed { - return nil - } - - if o == nil { - o = schema.ZeroValue().(*Set) - } - if n == nil { - n = schema.ZeroValue().(*Set) - } - os := o.(*Set) - ns := n.(*Set) - - // If the new value was set, compare the listCode's to determine if - // the two are equal. Comparing listCode's instead of the actual values - // is needed because there could be computed values in the set which - // would result in false positives while comparing. - if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { - return nil - } - - // Get the counts - oldLen := os.Len() - newLen := ns.Len() - oldStr := strconv.Itoa(oldLen) - newStr := strconv.Itoa(newLen) - - // Build a schema for our count - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed, - ForceNew: schema.ForceNew, - } - - // If the set computed then say that the # is computed - if computedSet || schema.Computed && !nSet { - // If # already exists, equals 0 and no new set is supplied, there - // is nothing to record in the diff - count, ok := d.GetOk(k + ".#") - if ok && count.(int) == 0 && !nSet && !computedSet { - return nil - } - - // Set the count but make sure that if # does not exist, we don't - // use the zeroed value - countStr := strconv.Itoa(count.(int)) - if !ok { - countStr = "" - } - - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: countStr, - NewComputed: true, - }, - customized, - ) - return nil - } - - // If the counts are not the same, then record that diff - changed := oldLen != newLen - if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // Build the list of codes that will make up our set. This is the - // removed codes as well as all the codes in the new codes. - codes := make([][]string, 2) - codes[0] = os.Difference(ns).listCode() - codes[1] = ns.listCode() - for _, list := range codes { - for _, code := range list { - switch t := schema.Elem.(type) { - case *Resource: - // This is a complex resource - for k2, schema := range t.Schema { - subK := fmt.Sprintf("%s.%s.%s", k, code, k2) - err := m.diff(subK, schema, diff, d, true) - if err != nil { - return err - } - } - case *Schema: - // Copy the schema so that we can set Computed/ForceNew from - // the parent schema (the TypeSet). - t2 := *t - t2.ForceNew = schema.ForceNew - - // This is just a primitive element, so go through each and - // just diff each. - subK := fmt.Sprintf("%s.%s", k, code) - err := m.diff(subK, &t2, diff, d, true) - if err != nil { - return err - } - default: - return fmt.Errorf("%s: unknown element type (internal)", k) - } - } - } - - return nil -} - -func (m schemaMap) diffString( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - var originalN interface{} - var os, ns string - o, n, _, computed, customized := d.diffChange(k) - if schema.StateFunc != nil && n != nil { - originalN = n - n = schema.StateFunc(n) - } - nraw := n - if nraw == nil && o != nil { - nraw = schema.Type.Zero() - } - if err := mapstructure.WeakDecode(o, &os); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - if err := mapstructure.WeakDecode(nraw, &ns); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - if os == ns && !all && !computed { - // They're the same value. If there old value is not blank or we - // have an ID, then return right away since we're already set up. - if os != "" || d.Id() != "" { - return nil - } - - // Otherwise, only continue if we're computed - if !schema.Computed { - return nil - } - } - - removed := false - if o != nil && n == nil && !computed { - removed = true - } - if removed && schema.Computed { - return nil - } - - diff.Attributes[k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: os, - New: ns, - NewExtra: originalN, - NewRemoved: removed, - NewComputed: computed, - }, - customized, - ) - - return nil -} - -func (m schemaMap) inputString( - input terraform.UIInput, - k string, - schema *Schema) (interface{}, error) { - result, err := input.Input(context.Background(), &terraform.InputOpts{ - Id: k, - Query: k, - Description: schema.Description, - Default: schema.InputDefault, - }) - - return result, err -} - -func (m schemaMap) validate( - k string, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - raw, ok := c.Get(k) - if !ok && schema.DefaultFunc != nil { - // We have a dynamic default. Check if we have a value. - var err error - raw, err = schema.DefaultFunc() - if err != nil { - return nil, []error{fmt.Errorf( - "%q, error loading default: %s", k, err)} - } - - // We're okay as long as we had a value set - ok = raw != nil - } - if !ok { - if schema.Required { - return nil, []error{fmt.Errorf( - "%q: required field is not set", k)} - } - - return nil, nil - } - - if !schema.Required && !schema.Optional { - // This is a computed-only field - return nil, []error{fmt.Errorf( - "%q: this field cannot be set", k)} - } - - // If the value is unknown then we can't validate it yet. - // In particular, this avoids spurious type errors where downstream - // validation code sees UnknownVariableValue as being just a string. - // The SDK has to allow the unknown value through initially, so that - // Required fields set via an interpolated value are accepted. - if !isWhollyKnown(raw) { - if schema.Deprecated != "" { - return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil - } - return nil, nil - } - - err := m.validateConflictingAttributes(k, schema, c) - if err != nil { - return nil, []error{err} - } - - return m.validateType(k, raw, schema, c) -} - -// isWhollyKnown returns false if the argument contains an UnknownVariableValue -func isWhollyKnown(raw interface{}) bool { - switch raw := raw.(type) { - case string: - if raw == hcl2shim.UnknownVariableValue { - return false - } - case []interface{}: - for _, v := range raw { - if !isWhollyKnown(v) { - return false - } - } - case map[string]interface{}: - for _, v := range raw { - if !isWhollyKnown(v) { - return false - } - } - } - return true -} -func (m schemaMap) validateConflictingAttributes( - k string, - schema *Schema, - c *terraform.ResourceConfig) error { - - if len(schema.ConflictsWith) == 0 { - return nil - } - - for _, conflictingKey := range schema.ConflictsWith { - if raw, ok := c.Get(conflictingKey); ok { - if raw == hcl2shim.UnknownVariableValue { - // An unknown value might become unset (null) once known, so - // we must defer validation until it's known. - continue - } - return fmt.Errorf( - "%q: conflicts with %s", k, conflictingKey) - } - } - - return nil -} - -func (m schemaMap) validateList( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - // first check if the list is wholly unknown - if s, ok := raw.(string); ok { - if s == hcl2shim.UnknownVariableValue { - return nil, nil - } - } - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - - // We use reflection to verify the slice because you can't - // case to []interface{} unless the slice is exactly that type. - rawV := reflect.ValueOf(raw) - - // If we support promotion and the raw value isn't a slice, wrap - // it in []interface{} and check again. - if schema.PromoteSingle && rawV.Kind() != reflect.Slice { - raw = []interface{}{raw} - rawV = reflect.ValueOf(raw) - } - - if rawV.Kind() != reflect.Slice { - return nil, []error{fmt.Errorf( - "%s: should be a list", k)} - } - - // We can't validate list length if this came from a dynamic block. - // Since there's no way to determine if something was from a dynamic block - // at this point, we're going to skip validation in the new protocol if - // there are any unknowns. Validate will eventually be called again once - // all values are known. - if isProto5() && !isWhollyKnown(raw) { - return nil, nil - } - - // Validate length - if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} - } - - if schema.MinItems > 0 && rawV.Len() < schema.MinItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} - } - - // Now build the []interface{} - raws := make([]interface{}, rawV.Len()) - for i, _ := range raws { - raws[i] = rawV.Index(i).Interface() - } - - var ws []string - var es []error - for i, raw := range raws { - key := fmt.Sprintf("%s.%d", k, i) - - // Reify the key value from the ResourceConfig. - // If the list was computed we have all raw values, but some of these - // may be known in the config, and aren't individually marked as Computed. - if r, ok := c.Get(key); ok { - raw = r - } - - var ws2 []string - var es2 []error - switch t := schema.Elem.(type) { - case *Resource: - // This is a sub-resource - ws2, es2 = m.validateObject(key, t.Schema, c) - case *Schema: - ws2, es2 = m.validateType(key, raw, t, c) - } - - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } - } - - return ws, es -} - -func (m schemaMap) validateMap( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - // first check if the list is wholly unknown - if s, ok := raw.(string); ok { - if s == hcl2shim.UnknownVariableValue { - return nil, nil - } - } - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - // We use reflection to verify the slice because you can't - // case to []interface{} unless the slice is exactly that type. - rawV := reflect.ValueOf(raw) - switch rawV.Kind() { - case reflect.String: - // If raw and reified are equal, this is a string and should - // be rejected. - reified, reifiedOk := c.Get(k) - if reifiedOk && raw == reified && !c.IsComputed(k) { - return nil, []error{fmt.Errorf("%s: should be a map", k)} - } - // Otherwise it's likely raw is an interpolation. - return nil, nil - case reflect.Map: - case reflect.Slice: - default: - return nil, []error{fmt.Errorf("%s: should be a map", k)} - } - - // If it is not a slice, validate directly - if rawV.Kind() != reflect.Slice { - mapIface := rawV.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - if schema.ValidateFunc != nil { - return schema.ValidateFunc(mapIface, k) - } - return nil, nil - } - - // It is a slice, verify that all the elements are maps - raws := make([]interface{}, rawV.Len()) - for i, _ := range raws { - raws[i] = rawV.Index(i).Interface() - } - - for _, raw := range raws { - v := reflect.ValueOf(raw) - if v.Kind() != reflect.Map { - return nil, []error{fmt.Errorf( - "%s: should be a map", k)} - } - mapIface := v.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - } - - if schema.ValidateFunc != nil { - validatableMap := make(map[string]interface{}) - for _, raw := range raws { - for k, v := range raw.(map[string]interface{}) { - validatableMap[k] = v - } - } - - return schema.ValidateFunc(validatableMap, k) - } - - return nil, nil -} - -func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { - for key, raw := range m { - valueType, err := getValueType(k, schema) - if err != nil { - return nil, []error{err} - } - - switch valueType { - case TypeBool: - var n bool - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeInt: - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeFloat: - var n float64 - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeString: - var n string - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - default: - panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) - } - } - return nil, nil -} - -func getValueType(k string, schema *Schema) (ValueType, error) { - if schema.Elem == nil { - return TypeString, nil - } - if vt, ok := schema.Elem.(ValueType); ok { - return vt, nil - } - - // If a Schema is provided to a Map, we use the Type of that schema - // as the type for each element in the Map. - if s, ok := schema.Elem.(*Schema); ok { - return s.Type, nil - } - - if _, ok := schema.Elem.(*Resource); ok { - // TODO: We don't actually support this (yet) - // but silently pass the validation, until we decide - // how to handle nested structures in maps - return TypeString, nil - } - return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) -} - -func (m schemaMap) validateObject( - k string, - schema map[string]*Schema, - c *terraform.ResourceConfig) ([]string, []error) { - raw, _ := c.Get(k) - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - - if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { - return nil, []error{fmt.Errorf( - "%s: expected object, got %s", - k, reflect.ValueOf(raw).Kind())} - } - - var ws []string - var es []error - for subK, s := range schema { - key := subK - if k != "" { - key = fmt.Sprintf("%s.%s", k, subK) - } - - ws2, es2 := m.validate(key, s, c) - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } - } - - // Detect any extra/unknown keys and report those as errors. - if m, ok := raw.(map[string]interface{}); ok { - for subk, _ := range m { - if _, ok := schema[subk]; !ok { - if subk == TimeoutsConfigKey { - continue - } - es = append(es, fmt.Errorf( - "%s: invalid or unknown key: %s", k, subk)) - } - } - } - - return ws, es -} - -func (m schemaMap) validatePrimitive( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - - // a nil value shouldn't happen in the old protocol, and in the new - // protocol the types have already been validated. Either way, we can't - // reflect on nil, so don't panic. - if raw == nil { - return nil, nil - } - - // Catch if the user gave a complex type where a primitive was - // expected, so we can return a friendly error message that - // doesn't contain Go type system terminology. - switch reflect.ValueOf(raw).Type().Kind() { - case reflect.Slice: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a list", k), - } - case reflect.Map: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a map", k), - } - default: // ok - } - - if c.IsComputed(k) { - // If the key is being computed, then it is not an error as - // long as it's not a slice or map. - return nil, nil - } - - var decoded interface{} - switch schema.Type { - case TypeBool: - // Verify that we can parse this as the correct type - var n bool - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeInt: - switch { - case isProto5(): - // We need to verify the type precisely, because WeakDecode will - // decode a float as an integer. - - // the config shims only use int for integral number values - if v, ok := raw.(int); ok { - decoded = v - } else { - return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} - } - default: - // Verify that we can parse this as an int - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - } - case TypeFloat: - // Verify that we can parse this as an int - var n float64 - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeString: - // Verify that we can parse this as a string - var n string - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - default: - panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) - } - - if schema.ValidateFunc != nil { - return schema.ValidateFunc(decoded, k) - } - - return nil, nil -} - -func (m schemaMap) validateType( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - var ws []string - var es []error - switch schema.Type { - case TypeSet, TypeList: - ws, es = m.validateList(k, raw, schema, c) - case TypeMap: - ws, es = m.validateMap(k, raw, schema, c) - default: - ws, es = m.validatePrimitive(k, raw, schema, c) - } - - if schema.Deprecated != "" { - ws = append(ws, fmt.Sprintf( - "%q: [DEPRECATED] %s", k, schema.Deprecated)) - } - - if schema.Removed != "" { - es = append(es, fmt.Errorf( - "%q: [REMOVED] %s", k, schema.Removed)) - } - - return ws, es -} - -// Zero returns the zero value for a type. -func (t ValueType) Zero() interface{} { - switch t { - case TypeInvalid: - return nil - case TypeBool: - return false - case TypeInt: - return 0 - case TypeFloat: - return 0.0 - case TypeString: - return "" - case TypeList: - return []interface{}{} - case TypeMap: - return map[string]interface{}{} - case TypeSet: - return new(Set) - case typeObject: - return map[string]interface{}{} - default: - panic(fmt.Sprintf("unknown type %s", t)) - } -} diff --git a/internal/legacy/helper/schema/schema_test.go b/internal/legacy/helper/schema/schema_test.go deleted file mode 100644 index dcc2008e72fd..000000000000 --- a/internal/legacy/helper/schema/schema_test.go +++ /dev/null @@ -1,5558 +0,0 @@ -package schema - -import ( - "bytes" - "errors" - "fmt" - "os" - "reflect" - "sort" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/helper/hashcode" - "github.com/hashicorp/terraform/internal/legacy/terraform" -) - -func TestEnvDefaultFunc(t *testing.T) { - key := "TF_TEST_ENV_DEFAULT_FUNC" - defer os.Unsetenv(key) - - f := EnvDefaultFunc(key, "42") - if err := os.Setenv(key, "foo"); err != nil { - t.Fatalf("err: %s", err) - } - - actual, err := f() - if err != nil { - t.Fatalf("err: %s", err) - } - if actual != "foo" { - t.Fatalf("bad: %#v", actual) - } - - if err := os.Unsetenv(key); err != nil { - t.Fatalf("err: %s", err) - } - - actual, err = f() - if err != nil { - t.Fatalf("err: %s", err) - } - if actual != "42" { - t.Fatalf("bad: %#v", actual) - } -} - -func TestMultiEnvDefaultFunc(t *testing.T) { - keys := []string{ - "TF_TEST_MULTI_ENV_DEFAULT_FUNC1", - "TF_TEST_MULTI_ENV_DEFAULT_FUNC2", - } - defer func() { - for _, k := range keys { - os.Unsetenv(k) - } - }() - - // Test that the first key is returned first - f := MultiEnvDefaultFunc(keys, "42") - if err := os.Setenv(keys[0], "foo"); err != nil { - t.Fatalf("err: %s", err) - } - - actual, err := f() - if err != nil { - t.Fatalf("err: %s", err) - } - if actual != "foo" { - t.Fatalf("bad: %#v", actual) - } - - if err := os.Unsetenv(keys[0]); err != nil { - t.Fatalf("err: %s", err) - } - - // Test that the second key is returned if the first one is empty - f = MultiEnvDefaultFunc(keys, "42") - if err := os.Setenv(keys[1], "foo"); err != nil { - t.Fatalf("err: %s", err) - } - - actual, err = f() - if err != nil { - t.Fatalf("err: %s", err) - } - if actual != "foo" { - t.Fatalf("bad: %#v", actual) - } - - if err := os.Unsetenv(keys[1]); err != nil { - t.Fatalf("err: %s", err) - } - - // Test that the default value is returned when no keys are set - actual, err = f() - if err != nil { - t.Fatalf("err: %s", err) - } - if actual != "42" { - t.Fatalf("bad: %#v", actual) - } -} - -func TestValueType_Zero(t *testing.T) { - cases := []struct { - Type ValueType - Value interface{} - }{ - {TypeBool, false}, - {TypeInt, 0}, - {TypeFloat, 0.0}, - {TypeString, ""}, - {TypeList, []interface{}{}}, - {TypeMap, map[string]interface{}{}}, - {TypeSet, new(Set)}, - } - - for i, tc := range cases { - actual := tc.Type.Zero() - if !reflect.DeepEqual(actual, tc.Value) { - t.Fatalf("%d: %#v != %#v", i, actual, tc.Value) - } - } -} - -func TestSchemaMap_Diff(t *testing.T) { - cases := []struct { - Name string - Schema map[string]*Schema - State *terraform.InstanceState - Config map[string]interface{} - CustomizeDiff CustomizeDiffFunc - Diff *terraform.InstanceDiff - Err bool - }{ - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - NewComputed: true, - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - ID: "foo", - }, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "Computed, but set in config", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - }, - }, - - Config: map[string]interface{}{ - "availability_zone": "bar", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "bar", - }, - }, - }, - - Err: false, - }, - - { - Name: "Default", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Default: "foo", - }, - }, - - State: nil, - - Config: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - }, - }, - }, - - Err: false, - }, - - { - Name: "DefaultFunc, value", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - DefaultFunc: func() (interface{}, error) { - return "foo", nil - }, - }, - }, - - State: nil, - - Config: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - }, - }, - }, - - Err: false, - }, - - { - Name: "DefaultFunc, configuration set", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - DefaultFunc: func() (interface{}, error) { - return "foo", nil - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "availability_zone": "bar", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - }, - }, - }, - - Err: false, - }, - - { - Name: "String with StateFunc", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - StateFunc: func(a interface{}) string { - return a.(string) + "!" - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo!", - NewExtra: "foo", - }, - }, - }, - - Err: false, - }, - - { - Name: "StateFunc not called with nil value", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - StateFunc: func(a interface{}) string { - t.Fatalf("should not get here!") - return "" - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Variable computed", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "availability_zone": hcl2shim.UnknownVariableValue, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: hcl2shim.UnknownVariableValue, - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Int decode", - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "port": 27, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "port": &terraform.ResourceAttrDiff{ - Old: "", - New: "27", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "bool decode", - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "port": false, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "port": &terraform.ResourceAttrDiff{ - Old: "", - New: "false", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Bool", - Schema: map[string]*Schema{ - "delete": &Schema{ - Type: TypeBool, - Optional: true, - Default: false, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "delete": "false", - }, - }, - - Config: nil, - - Diff: nil, - - Err: false, - }, - - { - Name: "List decode", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{1, 2, 5}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "3", - }, - "ports.0": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "", - New: "2", - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - - { - Name: "List decode with promotion", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - PromoteSingle: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": "5", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "ports.0": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - - { - Name: "List decode with promotion with list", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - PromoteSingle: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{"5"}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "ports.0": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{1, 2, 5}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "3", - }, - "ports.0": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "", - New: "2", - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{1, hcl2shim.UnknownVariableValue, 5}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.0": "1", - "ports.1": "2", - "ports.2": "5", - }, - }, - - Config: map[string]interface{}{ - "ports": []interface{}{1, 2, 5}, - }, - - Diff: nil, - - Err: false, - }, - - { - Name: "", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.0": "1", - "ports.1": "2", - }, - }, - - Config: map[string]interface{}{ - "ports": []interface{}{1, 2, 5}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "2", - New: "3", - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - - { - Name: "", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - ForceNew: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{1, 2, 5}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "3", - RequiresNew: true, - }, - "ports.0": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - RequiresNew: true, - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "", - New: "2", - RequiresNew: true, - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: nil, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "List with computed set", - Schema: map[string]*Schema{ - "config": &Schema{ - Type: TypeList, - Optional: true, - ForceNew: true, - MinItems: 1, - Elem: &Resource{ - Schema: map[string]*Schema{ - "name": { - Type: TypeString, - Required: true, - }, - - "rules": { - Type: TypeSet, - Computed: true, - Elem: &Schema{Type: TypeString}, - Set: HashString, - }, - }, - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "config": []interface{}{ - map[string]interface{}{ - "name": "hello", - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - RequiresNew: true, - }, - - "config.0.name": &terraform.ResourceAttrDiff{ - Old: "", - New: "hello", - }, - - "config.0.rules.#": &terraform.ResourceAttrDiff{ - Old: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{5, 2, 1}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "3", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "", - New: "2", - }, - "ports.5": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Computed: true, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "0", - }, - }, - - Config: nil, - - Diff: nil, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Config: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{"2", "5", 1}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "3", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "", - New: "2", - }, - "ports.5": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{1, hcl2shim.UnknownVariableValue, "5"}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.1": "1", - "ports.2": "2", - }, - }, - - Config: map[string]interface{}{ - "ports": []interface{}{5, 2, 1}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "2", - New: "3", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "1", - New: "1", - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "2", - New: "2", - }, - "ports.5": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.1": "1", - "ports.2": "2", - }, - }, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "2", - New: "0", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - NewRemoved: true, - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "2", - New: "0", - NewRemoved: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "bar", - "ports.#": "1", - "ports.80": "80", - }, - }, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - }, - Set: func(v interface{}) int { - m := v.(map[string]interface{}) - ps := m["ports"].([]interface{}) - result := 0 - for _, p := range ps { - result += p.(int) - } - return result - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ingress.#": "2", - "ingress.80.ports.#": "1", - "ingress.80.ports.0": "80", - "ingress.443.ports.#": "1", - "ingress.443.ports.0": "443", - }, - }, - - Config: map[string]interface{}{ - "ingress": []interface{}{ - map[string]interface{}{ - "ports": []interface{}{443}, - }, - map[string]interface{}{ - "ports": []interface{}{80}, - }, - }, - }, - - Diff: nil, - - Err: false, - }, - - { - Name: "List of structure decode", - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Required: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ingress": []interface{}{ - map[string]interface{}{ - "from": 8080, - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ingress.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "ingress.0.from": &terraform.ResourceAttrDiff{ - Old: "", - New: "8080", - }, - }, - }, - - Err: false, - }, - - { - Name: "ComputedWhen", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Computed: true, - ComputedWhen: []string{"port"}, - }, - - "port": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - "port": "80", - }, - }, - - Config: map[string]interface{}{ - "port": 80, - }, - - Diff: nil, - - Err: false, - }, - - { - Name: "", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Computed: true, - ComputedWhen: []string{"port"}, - }, - - "port": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "port": "80", - }, - }, - - Config: map[string]interface{}{ - "port": 80, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - /* TODO - { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Computed: true, - ComputedWhen: []string{"port"}, - }, - - "port": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "foo", - "port": "80", - }, - }, - - Config: map[string]interface{}{ - "port": 8080, - }, - - Diff: &terraform.ResourceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "foo", - NewComputed: true, - }, - "port": &terraform.ResourceAttrDiff{ - Old: "80", - New: "8080", - }, - }, - }, - - Err: false, - }, - */ - - { - Name: "Maps", - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeMap, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "config_vars": []interface{}{ - map[string]interface{}{ - "bar": "baz", - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.%": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - - "config_vars.bar": &terraform.ResourceAttrDiff{ - Old: "", - New: "baz", - }, - }, - }, - - Err: false, - }, - - { - Name: "Maps", - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeMap, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.foo": "bar", - }, - }, - - Config: map[string]interface{}{ - "config_vars": []interface{}{ - map[string]interface{}{ - "bar": "baz", - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.foo": &terraform.ResourceAttrDiff{ - Old: "bar", - NewRemoved: true, - }, - "config_vars.bar": &terraform.ResourceAttrDiff{ - Old: "", - New: "baz", - }, - }, - }, - - Err: false, - }, - - { - Name: "Maps", - Schema: map[string]*Schema{ - "vars": &Schema{ - Type: TypeMap, - Optional: true, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "vars.foo": "bar", - }, - }, - - Config: map[string]interface{}{ - "vars": []interface{}{ - map[string]interface{}{ - "bar": "baz", - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "vars.foo": &terraform.ResourceAttrDiff{ - Old: "bar", - New: "", - NewRemoved: true, - }, - "vars.bar": &terraform.ResourceAttrDiff{ - Old: "", - New: "baz", - }, - }, - }, - - Err: false, - }, - - { - Name: "Maps", - Schema: map[string]*Schema{ - "vars": &Schema{ - Type: TypeMap, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "vars.foo": "bar", - }, - }, - - Config: nil, - - Diff: nil, - - Err: false, - }, - - { - Name: "Maps", - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Elem: &Schema{Type: TypeMap}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "1", - "config_vars.0.foo": "bar", - }, - }, - - Config: map[string]interface{}{ - "config_vars": []interface{}{ - map[string]interface{}{ - "bar": "baz", - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.0.foo": &terraform.ResourceAttrDiff{ - Old: "bar", - NewRemoved: true, - }, - "config_vars.0.bar": &terraform.ResourceAttrDiff{ - Old: "", - New: "baz", - }, - }, - }, - - Err: false, - }, - - { - Name: "Maps", - Schema: map[string]*Schema{ - "config_vars": &Schema{ - Type: TypeList, - Elem: &Schema{Type: TypeMap}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config_vars.#": "1", - "config_vars.0.foo": "bar", - "config_vars.0.bar": "baz", - }, - }, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config_vars.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - }, - "config_vars.0.%": &terraform.ResourceAttrDiff{ - Old: "2", - New: "0", - }, - "config_vars.0.foo": &terraform.ResourceAttrDiff{ - Old: "bar", - NewRemoved: true, - }, - "config_vars.0.bar": &terraform.ResourceAttrDiff{ - Old: "baz", - NewRemoved: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "ForceNews", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - ForceNew: true, - }, - - "address": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "bar", - "address": "foo", - }, - }, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "bar", - New: "foo", - RequiresNew: true, - }, - - "address": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - ForceNew: true, - }, - - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "availability_zone": "bar", - "ports.#": "1", - "ports.80": "80", - }, - }, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "bar", - New: "foo", - RequiresNew: true, - }, - - "ports.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "instances": &Schema{ - Type: TypeSet, - Elem: &Schema{Type: TypeString}, - Optional: true, - Computed: true, - Set: func(v interface{}) int { - return len(v.(string)) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "instances.#": "0", - }, - }, - - Config: map[string]interface{}{ - "instances": []interface{}{hcl2shim.UnknownVariableValue}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "instances.#": &terraform.ResourceAttrDiff{ - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "route": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "index": &Schema{ - Type: TypeInt, - Required: true, - }, - - "gateway": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - }, - Set: func(v interface{}) int { - m := v.(map[string]interface{}) - return m["index"].(int) - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "route": []interface{}{ - map[string]interface{}{ - "index": "1", - "gateway": hcl2shim.UnknownVariableValue, - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "route.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "route.~1.index": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "route.~1.gateway": &terraform.ResourceAttrDiff{ - Old: "", - New: hcl2shim.UnknownVariableValue, - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set", - Schema: map[string]*Schema{ - "route": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "index": &Schema{ - Type: TypeInt, - Required: true, - }, - - "gateway": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - }, - Set: func(v interface{}) int { - m := v.(map[string]interface{}) - return m["index"].(int) - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "route": []interface{}{ - map[string]interface{}{ - "index": "1", - "gateway": []interface{}{ - hcl2shim.UnknownVariableValue, - }, - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "route.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "route.~1.index": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "route.~1.gateway.#": &terraform.ResourceAttrDiff{ - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Computed maps", - Schema: map[string]*Schema{ - "vars": &Schema{ - Type: TypeMap, - Computed: true, - }, - }, - - State: nil, - - Config: nil, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "vars.%": &terraform.ResourceAttrDiff{ - Old: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Computed maps", - Schema: map[string]*Schema{ - "vars": &Schema{ - Type: TypeMap, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "vars.%": "0", - }, - }, - - Config: map[string]interface{}{ - "vars": map[string]interface{}{ - "bar": hcl2shim.UnknownVariableValue, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "vars.%": &terraform.ResourceAttrDiff{ - Old: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: " - Empty", - Schema: map[string]*Schema{}, - - State: &terraform.InstanceState{}, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "Float", - Schema: map[string]*Schema{ - "some_threshold": &Schema{ - Type: TypeFloat, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "some_threshold": "567.8", - }, - }, - - Config: map[string]interface{}{ - "some_threshold": 12.34, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "some_threshold": &terraform.ResourceAttrDiff{ - Old: "567.8", - New: "12.34", - }, - }, - }, - - Err: false, - }, - - { - Name: "https://github.com/hashicorp/terraform/issues/824", - Schema: map[string]*Schema{ - "block_device": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "device_name": &Schema{ - Type: TypeString, - Required: true, - }, - "delete_on_termination": &Schema{ - Type: TypeBool, - Optional: true, - Default: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool))) - return hashcode.String(buf.String()) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "block_device.#": "2", - "block_device.616397234.delete_on_termination": "true", - "block_device.616397234.device_name": "/dev/sda1", - "block_device.2801811477.delete_on_termination": "true", - "block_device.2801811477.device_name": "/dev/sdx", - }, - }, - - Config: map[string]interface{}{ - "block_device": []interface{}{ - map[string]interface{}{ - "device_name": "/dev/sda1", - }, - map[string]interface{}{ - "device_name": "/dev/sdx", - }, - }, - }, - Diff: nil, - Err: false, - }, - - { - Name: "Zero value in state shouldn't result in diff", - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeBool, - Optional: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "port": "false", - }, - }, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "Same as prev, but for sets", - Schema: map[string]*Schema{ - "route": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "index": &Schema{ - Type: TypeInt, - Required: true, - }, - - "gateway": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - }, - Set: func(v interface{}) int { - m := v.(map[string]interface{}) - return m["index"].(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "route.#": "0", - }, - }, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "A set computed element shouldn't cause a diff", - Schema: map[string]*Schema{ - "active": &Schema{ - Type: TypeBool, - Computed: true, - ForceNew: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "active": "true", - }, - }, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "An empty set should show up in the diff", - Schema: map[string]*Schema{ - "instances": &Schema{ - Type: TypeSet, - Elem: &Schema{Type: TypeString}, - Optional: true, - ForceNew: true, - Set: func(v interface{}) int { - return len(v.(string)) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "instances.#": "1", - "instances.3": "foo", - }, - }, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "instances.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - RequiresNew: true, - }, - "instances.3": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "", - NewRemoved: true, - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Map with empty value", - Schema: map[string]*Schema{ - "vars": &Schema{ - Type: TypeMap, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "vars": map[string]interface{}{ - "foo": "", - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "vars.%": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "vars.foo": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - }, - }, - }, - - Err: false, - }, - - { - Name: "Unset bool, not in state", - Schema: map[string]*Schema{ - "force": &Schema{ - Type: TypeBool, - Optional: true, - ForceNew: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "Unset set, not in state", - Schema: map[string]*Schema{ - "metadata_keys": &Schema{ - Type: TypeSet, - Optional: true, - ForceNew: true, - Elem: &Schema{Type: TypeInt}, - Set: func(interface{}) int { return 0 }, - }, - }, - - State: nil, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "Unset list in state, should not show up computed", - Schema: map[string]*Schema{ - "metadata_keys": &Schema{ - Type: TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &Schema{Type: TypeInt}, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "metadata_keys.#": "0", - }, - }, - - Config: map[string]interface{}{}, - - Diff: nil, - - Err: false, - }, - - { - Name: "Set element computed element", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": []interface{}{1, hcl2shim.UnknownVariableValue}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Computed map without config that's known to be empty does not generate diff", - Schema: map[string]*Schema{ - "tags": &Schema{ - Type: TypeMap, - Computed: true, - }, - }, - - Config: nil, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "tags.%": "0", - }, - }, - - Diff: nil, - - Err: false, - }, - - { - Name: "Set with hyphen keys", - Schema: map[string]*Schema{ - "route": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "index": &Schema{ - Type: TypeInt, - Required: true, - }, - - "gateway-name": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - }, - Set: func(v interface{}) int { - m := v.(map[string]interface{}) - return m["index"].(int) - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "route": []interface{}{ - map[string]interface{}{ - "index": "1", - "gateway-name": "hello", - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "route.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "route.1.index": &terraform.ResourceAttrDiff{ - Old: "", - New: "1", - }, - "route.1.gateway-name": &terraform.ResourceAttrDiff{ - Old: "", - New: "hello", - }, - }, - }, - - Err: false, - }, - - { - Name: ": StateFunc in nested set (#1759)", - Schema: map[string]*Schema{ - "service_account": &Schema{ - Type: TypeList, - Optional: true, - ForceNew: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "scopes": &Schema{ - Type: TypeSet, - Required: true, - ForceNew: true, - Elem: &Schema{ - Type: TypeString, - StateFunc: func(v interface{}) string { - return v.(string) + "!" - }, - }, - Set: func(v interface{}) int { - i, err := strconv.Atoi(v.(string)) - if err != nil { - t.Fatalf("err: %s", err) - } - return i - }, - }, - }, - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "service_account": []interface{}{ - map[string]interface{}{ - "scopes": []interface{}{"123"}, - }, - }, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "service_account.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - RequiresNew: true, - }, - "service_account.0.scopes.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - RequiresNew: true, - }, - "service_account.0.scopes.123": &terraform.ResourceAttrDiff{ - Old: "", - New: "123!", - NewExtra: "123", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Removing set elements", - Schema: map[string]*Schema{ - "instances": &Schema{ - Type: TypeSet, - Elem: &Schema{Type: TypeString}, - Optional: true, - ForceNew: true, - Set: func(v interface{}) int { - return len(v.(string)) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "instances.#": "2", - "instances.3": "333", - "instances.2": "22", - }, - }, - - Config: map[string]interface{}{ - "instances": []interface{}{"333", "4444"}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "instances.#": &terraform.ResourceAttrDiff{ - Old: "2", - New: "2", - }, - "instances.2": &terraform.ResourceAttrDiff{ - Old: "22", - New: "", - NewRemoved: true, - RequiresNew: true, - }, - "instances.3": &terraform.ResourceAttrDiff{ - Old: "333", - New: "333", - }, - "instances.4": &terraform.ResourceAttrDiff{ - Old: "", - New: "4444", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Bools can be set with 0/1 in config, still get true/false", - Schema: map[string]*Schema{ - "one": &Schema{ - Type: TypeBool, - Optional: true, - }, - "two": &Schema{ - Type: TypeBool, - Optional: true, - }, - "three": &Schema{ - Type: TypeBool, - Optional: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "one": "false", - "two": "true", - "three": "true", - }, - }, - - Config: map[string]interface{}{ - "one": "1", - "two": "0", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "one": &terraform.ResourceAttrDiff{ - Old: "false", - New: "true", - }, - "two": &terraform.ResourceAttrDiff{ - Old: "true", - New: "false", - }, - "three": &terraform.ResourceAttrDiff{ - Old: "true", - New: "false", - NewRemoved: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "tainted in state w/ no attr changes is still a replacement", - Schema: map[string]*Schema{}, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "id": "someid", - }, - Tainted: true, - }, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{}, - DestroyTainted: true, - }, - - Err: false, - }, - - { - Name: "Set ForceNew only marks the changing element as ForceNew", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - ForceNew: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.1": "1", - "ports.2": "2", - "ports.4": "4", - }, - }, - - Config: map[string]interface{}{ - "ports": []interface{}{5, 2, 1}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "3", - New: "3", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "1", - New: "1", - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "2", - New: "2", - }, - "ports.5": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - RequiresNew: true, - }, - "ports.4": &terraform.ResourceAttrDiff{ - Old: "4", - New: "0", - NewRemoved: true, - RequiresNew: true, - }, - }, - }, - }, - - { - Name: "removed optional items should trigger ForceNew", - Schema: map[string]*Schema{ - "description": &Schema{ - Type: TypeString, - ForceNew: true, - Optional: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "description": "foo", - }, - }, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "description": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "", - RequiresNew: true, - NewRemoved: true, - }, - }, - }, - - Err: false, - }, - - // GH-7715 - { - Name: "computed value for boolean field", - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeBool, - ForceNew: true, - Computed: true, - Optional: true, - }, - }, - - State: &terraform.InstanceState{}, - - Config: map[string]interface{}{ - "foo": hcl2shim.UnknownVariableValue, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - Old: "", - New: "false", - NewComputed: true, - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "Set ForceNew marks count as ForceNew if computed", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - ForceNew: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.1": "1", - "ports.2": "2", - "ports.4": "4", - }, - }, - - Config: map[string]interface{}{ - "ports": []interface{}{hcl2shim.UnknownVariableValue, 2, 1}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "3", - New: "", - NewComputed: true, - RequiresNew: true, - }, - }, - }, - }, - - { - Name: "List with computed schema and ForceNew", - Schema: map[string]*Schema{ - "config": &Schema{ - Type: TypeList, - Optional: true, - ForceNew: true, - Elem: &Schema{ - Type: TypeString, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "config.#": "2", - "config.0": "a", - "config.1": "b", - }, - }, - - Config: map[string]interface{}{ - "config": []interface{}{hcl2shim.UnknownVariableValue, hcl2shim.UnknownVariableValue}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "config.#": &terraform.ResourceAttrDiff{ - Old: "2", - New: "", - RequiresNew: true, - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "overridden diff with a CustomizeDiff function, ForceNew not in schema", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - if err := d.SetNew("availability_zone", "bar"); err != nil { - return err - } - if err := d.ForceNew("availability_zone"); err != nil { - return err - } - return nil - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - // NOTE: This case is technically impossible in the current - // implementation, because optional+computed values never show up in the - // diff. In the event behavior changes this test should ensure that the - // intended diff still shows up. - Name: "overridden removed attribute diff with a CustomizeDiff function, ForceNew not in schema", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{}, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - if err := d.SetNew("availability_zone", "bar"); err != nil { - return err - } - if err := d.ForceNew("availability_zone"); err != nil { - return err - } - return nil - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - - Name: "overridden diff with a CustomizeDiff function, ForceNew in schema", - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - if err := d.SetNew("availability_zone", "bar"); err != nil { - return err - } - return nil - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - RequiresNew: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "required field with computed diff added with CustomizeDiff function", - Schema: map[string]*Schema{ - "ami_id": &Schema{ - Type: TypeString, - Required: true, - }, - "instance_id": &Schema{ - Type: TypeString, - Computed: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ami_id": "foo", - }, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - if err := d.SetNew("instance_id", "bar"); err != nil { - return err - } - return nil - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ami_id": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "instance_id": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - }, - }, - }, - - Err: false, - }, - - { - Name: "Set ForceNew only marks the changing element as ForceNew - CustomizeDiffFunc edition", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "3", - "ports.1": "1", - "ports.2": "2", - "ports.4": "4", - }, - }, - - Config: map[string]interface{}{ - "ports": []interface{}{5, 2, 6}, - }, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - if err := d.SetNew("ports", []interface{}{5, 2, 1}); err != nil { - return err - } - if err := d.ForceNew("ports"); err != nil { - return err - } - return nil - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "3", - New: "3", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "1", - New: "1", - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "2", - New: "2", - }, - "ports.5": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - RequiresNew: true, - }, - "ports.4": &terraform.ResourceAttrDiff{ - Old: "4", - New: "0", - NewRemoved: true, - RequiresNew: true, - }, - }, - }, - }, - - { - Name: "tainted resource does not run CustomizeDiffFunc", - Schema: map[string]*Schema{}, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "id": "someid", - }, - Tainted: true, - }, - - Config: map[string]interface{}{}, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - return errors.New("diff customization should not have run") - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{}, - DestroyTainted: true, - }, - - Err: false, - }, - - { - Name: "NewComputed based on a conditional with CustomizeDiffFunc", - Schema: map[string]*Schema{ - "etag": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - "version_id": &Schema{ - Type: TypeString, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "etag": "foo", - "version_id": "1", - }, - }, - - Config: map[string]interface{}{ - "etag": "bar", - }, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - if d.HasChange("etag") { - d.SetNewComputed("version_id") - } - return nil - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "etag": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "bar", - }, - "version_id": &terraform.ResourceAttrDiff{ - Old: "1", - New: "", - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "NewComputed should always propagate with CustomizeDiff", - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "foo": "", - }, - ID: "pre-existing", - }, - - Config: map[string]interface{}{}, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - d.SetNewComputed("foo") - return nil - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - { - Name: "vetoing a diff", - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "foo": "bar", - }, - }, - - Config: map[string]interface{}{ - "foo": "baz", - }, - - CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { - return fmt.Errorf("diff vetoed") - }, - - Err: true, - }, - - // A lot of resources currently depended on using the empty string as a - // nil/unset value. - // FIXME: We want this to eventually produce a diff, since there - // technically is a new value in the config. - { - Name: "optional, computed, empty string", - Schema: map[string]*Schema{ - "attr": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "attr": "bar", - }, - }, - - Config: map[string]interface{}{ - "attr": "", - }, - }, - - { - Name: "optional, computed, empty string should not crash in CustomizeDiff", - Schema: map[string]*Schema{ - "unrelated_set": { - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeString}, - }, - "stream_enabled": { - Type: TypeBool, - Optional: true, - }, - "stream_view_type": { - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "unrelated_set.#": "0", - "stream_enabled": "true", - "stream_view_type": "KEYS_ONLY", - }, - }, - Config: map[string]interface{}{ - "stream_enabled": false, - "stream_view_type": "", - }, - CustomizeDiff: func(diff *ResourceDiff, v interface{}) error { - v, ok := diff.GetOk("unrelated_set") - if ok { - return fmt.Errorf("Didn't expect unrelated_set: %#v", v) - } - return nil - }, - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "stream_enabled": { - Old: "true", - New: "false", - }, - }, - }, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - c := terraform.NewResourceConfigRaw(tc.Config) - - d, err := schemaMap(tc.Schema).Diff(tc.State, c, tc.CustomizeDiff, nil, true) - if err != nil != tc.Err { - t.Fatalf("err: %s", err) - } - - if !reflect.DeepEqual(tc.Diff, d) { - t.Fatalf("expected:\n%#v\n\ngot:\n%#v", tc.Diff, d) - } - }) - } -} - -func TestSchemaMap_Input(t *testing.T) { - cases := map[string]struct { - Schema map[string]*Schema - Config map[string]interface{} - Input map[string]string - Result map[string]interface{} - Err bool - }{ - /* - * String decode - */ - - "no input on optional field with no config": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - - Input: map[string]string{}, - Result: map[string]interface{}{}, - Err: false, - }, - - "input ignored when config has a value": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - - Config: map[string]interface{}{ - "availability_zone": "bar", - }, - - Input: map[string]string{ - "availability_zone": "foo", - }, - - Result: map[string]interface{}{}, - - Err: false, - }, - - "input ignored when schema has a default": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Default: "foo", - Optional: true, - }, - }, - - Input: map[string]string{ - "availability_zone": "bar", - }, - - Result: map[string]interface{}{}, - - Err: false, - }, - - "input ignored when default function returns a value": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - DefaultFunc: func() (interface{}, error) { - return "foo", nil - }, - Optional: true, - }, - }, - - Input: map[string]string{ - "availability_zone": "bar", - }, - - Result: map[string]interface{}{}, - - Err: false, - }, - - "input ignored when default function returns an empty string": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Default: "", - Optional: true, - }, - }, - - Input: map[string]string{ - "availability_zone": "bar", - }, - - Result: map[string]interface{}{}, - - Err: false, - }, - - "input used when default function returns nil": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - DefaultFunc: func() (interface{}, error) { - return nil, nil - }, - Required: true, - }, - }, - - Input: map[string]string{ - "availability_zone": "bar", - }, - - Result: map[string]interface{}{ - "availability_zone": "bar", - }, - - Err: false, - }, - - "input not used when optional default function returns nil": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - DefaultFunc: func() (interface{}, error) { - return nil, nil - }, - Optional: true, - }, - }, - - Input: map[string]string{}, - Result: map[string]interface{}{}, - Err: false, - }, - } - - for i, tc := range cases { - if tc.Config == nil { - tc.Config = make(map[string]interface{}) - } - - input := new(terraform.MockUIInput) - input.InputReturnMap = tc.Input - - rc := terraform.NewResourceConfigRaw(tc.Config) - rc.Config = make(map[string]interface{}) - - actual, err := schemaMap(tc.Schema).Input(input, rc) - if err != nil != tc.Err { - t.Fatalf("#%v err: %s", i, err) - } - - if !reflect.DeepEqual(tc.Result, actual.Config) { - t.Fatalf("#%v: bad:\n\ngot: %#v\nexpected: %#v", i, actual.Config, tc.Result) - } - } -} - -func TestSchemaMap_InputDefault(t *testing.T) { - emptyConfig := make(map[string]interface{}) - rc := terraform.NewResourceConfigRaw(emptyConfig) - rc.Config = make(map[string]interface{}) - - input := new(terraform.MockUIInput) - input.InputFn = func(opts *terraform.InputOpts) (string, error) { - t.Fatalf("InputFn should not be called on: %#v", opts) - return "", nil - } - - schema := map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Default: "foo", - Optional: true, - }, - } - actual, err := schemaMap(schema).Input(input, rc) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := map[string]interface{}{} - - if !reflect.DeepEqual(expected, actual.Config) { - t.Fatalf("got: %#v\nexpected: %#v", actual.Config, expected) - } -} - -func TestSchemaMap_InputDeprecated(t *testing.T) { - emptyConfig := make(map[string]interface{}) - rc := terraform.NewResourceConfigRaw(emptyConfig) - rc.Config = make(map[string]interface{}) - - input := new(terraform.MockUIInput) - input.InputFn = func(opts *terraform.InputOpts) (string, error) { - t.Fatalf("InputFn should not be called on: %#v", opts) - return "", nil - } - - schema := map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Deprecated: "long gone", - Optional: true, - }, - } - actual, err := schemaMap(schema).Input(input, rc) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := map[string]interface{}{} - - if !reflect.DeepEqual(expected, actual.Config) { - t.Fatalf("got: %#v\nexpected: %#v", actual.Config, expected) - } -} - -func TestSchemaMap_InternalValidate(t *testing.T) { - cases := map[string]struct { - In map[string]*Schema - Err bool - }{ - "nothing": { - nil, - false, - }, - - "Both optional and required": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - Required: true, - }, - }, - true, - }, - - "No optional and no required": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - }, - }, - true, - }, - - "Missing Type": { - map[string]*Schema{ - "foo": &Schema{ - Required: true, - }, - }, - true, - }, - - "Required but computed": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Required: true, - Computed: true, - }, - }, - true, - }, - - "Looks good": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - Required: true, - }, - }, - false, - }, - - "Computed but has default": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - Computed: true, - Default: "foo", - }, - }, - true, - }, - - "Required but has default": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - Required: true, - Default: "foo", - }, - }, - true, - }, - - "List element not set": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeList, - }, - }, - true, - }, - - "List default": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeList, - Elem: &Schema{Type: TypeInt}, - Default: "foo", - }, - }, - true, - }, - - "List element computed": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Schema{ - Type: TypeInt, - Computed: true, - }, - }, - }, - true, - }, - - "List element with Set set": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeList, - Elem: &Schema{Type: TypeInt}, - Set: func(interface{}) int { return 0 }, - Optional: true, - }, - }, - true, - }, - - "Set element with no Set set": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeSet, - Elem: &Schema{Type: TypeInt}, - Optional: true, - }, - }, - false, - }, - - "Required but computedWhen": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Required: true, - ComputedWhen: []string{"foo"}, - }, - }, - true, - }, - - "Conflicting attributes cannot be required": { - map[string]*Schema{ - "a": &Schema{ - Type: TypeBool, - Required: true, - }, - "b": &Schema{ - Type: TypeBool, - Optional: true, - ConflictsWith: []string{"a"}, - }, - }, - true, - }, - - "Attribute with conflicts cannot be required": { - map[string]*Schema{ - "b": &Schema{ - Type: TypeBool, - Required: true, - ConflictsWith: []string{"a"}, - }, - }, - true, - }, - - "ConflictsWith cannot be used w/ ComputedWhen": { - map[string]*Schema{ - "a": &Schema{ - Type: TypeBool, - ComputedWhen: []string{"foor"}, - }, - "b": &Schema{ - Type: TypeBool, - Required: true, - ConflictsWith: []string{"a"}, - }, - }, - true, - }, - - "Sub-resource invalid": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "foo": new(Schema), - }, - }, - }, - }, - true, - }, - - "Sub-resource valid": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "foo": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - }, - }, - }, - false, - }, - - "ValidateFunc on non-primitive": { - map[string]*Schema{ - "foo": &Schema{ - Type: TypeSet, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - return - }, - }, - }, - true, - }, - - "computed-only field with validateFunc": { - map[string]*Schema{ - "string": &Schema{ - Type: TypeString, - Computed: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - es = append(es, fmt.Errorf("this is not fine")) - return - }, - }, - }, - true, - }, - - "computed-only field with diffSuppressFunc": { - map[string]*Schema{ - "string": &Schema{ - Type: TypeString, - Computed: true, - DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { - // Always suppress any diff - return false - }, - }, - }, - true, - }, - - "invalid field name format #1": { - map[string]*Schema{ - "with space": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - true, - }, - - "invalid field name format #2": { - map[string]*Schema{ - "WithCapitals": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - true, - }, - - "invalid field name format of a Deprecated field": { - map[string]*Schema{ - "WithCapitals": &Schema{ - Type: TypeString, - Optional: true, - Deprecated: "Use with_underscores instead", - }, - }, - false, - }, - - "invalid field name format of a Removed field": { - map[string]*Schema{ - "WithCapitals": &Schema{ - Type: TypeString, - Optional: true, - Removed: "Use with_underscores instead", - }, - }, - false, - }, - - "ConfigModeBlock with Elem *Resource": { - map[string]*Schema{ - "block": &Schema{ - Type: TypeList, - ConfigMode: SchemaConfigModeBlock, - Optional: true, - Elem: &Resource{}, - }, - }, - false, - }, - - "ConfigModeBlock Computed with Elem *Resource": { - map[string]*Schema{ - "block": &Schema{ - Type: TypeList, - ConfigMode: SchemaConfigModeBlock, - Computed: true, - Elem: &Resource{}, - }, - }, - true, // ConfigMode of block cannot be used for computed schema - }, - - "ConfigModeBlock with Elem *Schema": { - map[string]*Schema{ - "block": &Schema{ - Type: TypeList, - ConfigMode: SchemaConfigModeBlock, - Optional: true, - Elem: &Schema{ - Type: TypeString, - }, - }, - }, - true, - }, - - "ConfigModeBlock with no Elem": { - map[string]*Schema{ - "block": &Schema{ - Type: TypeString, - ConfigMode: SchemaConfigModeBlock, - Optional: true, - }, - }, - true, - }, - - "ConfigModeBlock inside ConfigModeAttr": { - map[string]*Schema{ - "block": &Schema{ - Type: TypeList, - ConfigMode: SchemaConfigModeAttr, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "sub": &Schema{ - Type: TypeList, - ConfigMode: SchemaConfigModeBlock, - Elem: &Resource{}, - }, - }, - }, - }, - }, - true, // ConfigMode of block cannot be used in child of schema with ConfigMode of attribute - }, - - "ConfigModeAuto with *Resource inside ConfigModeAttr": { - map[string]*Schema{ - "block": &Schema{ - Type: TypeList, - ConfigMode: SchemaConfigModeAttr, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "sub": &Schema{ - Type: TypeList, - Elem: &Resource{}, - }, - }, - }, - }, - }, - true, // in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute - }, - } - - for tn, tc := range cases { - t.Run(tn, func(t *testing.T) { - err := schemaMap(tc.In).InternalValidate(nil) - if err != nil != tc.Err { - if tc.Err { - t.Fatalf("%q: Expected error did not occur:\n\n%#v", tn, tc.In) - } - t.Fatalf("%q: Unexpected error occurred: %s\n\n%#v", tn, err, tc.In) - } - }) - } - -} - -func TestSchemaMap_DiffSuppress(t *testing.T) { - cases := map[string]struct { - Schema map[string]*Schema - State *terraform.InstanceState - Config map[string]interface{} - ExpectedDiff *terraform.InstanceDiff - Err bool - }{ - "#0 - Suppress otherwise valid diff by returning true": { - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeString, - Optional: true, - DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { - // Always suppress any diff - return true - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - - ExpectedDiff: nil, - - Err: false, - }, - - "#1 - Don't suppress diff by returning false": { - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeString, - Optional: true, - DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { - // Always suppress any diff - return false - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - - ExpectedDiff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": { - Old: "", - New: "foo", - }, - }, - }, - - Err: false, - }, - - "Default with suppress makes no diff": { - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeString, - Optional: true, - Default: "foo", - DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { - return true - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{}, - - ExpectedDiff: nil, - - Err: false, - }, - - "Default with false suppress makes diff": { - Schema: map[string]*Schema{ - "availability_zone": { - Type: TypeString, - Optional: true, - Default: "foo", - DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { - return false - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{}, - - ExpectedDiff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "availability_zone": { - Old: "", - New: "foo", - }, - }, - }, - - Err: false, - }, - - "Complex structure with set of computed string should mark root set as computed": { - Schema: map[string]*Schema{ - "outer": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "outer_str": &Schema{ - Type: TypeString, - Optional: true, - }, - "inner": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "inner_str": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - }, - Set: func(v interface{}) int { - return 2 - }, - }, - }, - }, - Set: func(v interface{}) int { - return 1 - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "outer": []interface{}{ - map[string]interface{}{ - "outer_str": "foo", - "inner": []interface{}{ - map[string]interface{}{ - "inner_str": hcl2shim.UnknownVariableValue, - }, - }, - }, - }, - }, - - ExpectedDiff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "outer.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "outer.~1.outer_str": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "outer.~1.inner.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "outer.~1.inner.~2.inner_str": &terraform.ResourceAttrDiff{ - Old: "", - New: hcl2shim.UnknownVariableValue, - NewComputed: true, - }, - }, - }, - - Err: false, - }, - - "Complex structure with complex list of computed string should mark root set as computed": { - Schema: map[string]*Schema{ - "outer": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "outer_str": &Schema{ - Type: TypeString, - Optional: true, - }, - "inner": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "inner_str": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - Set: func(v interface{}) int { - return 1 - }, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "outer": []interface{}{ - map[string]interface{}{ - "outer_str": "foo", - "inner": []interface{}{ - map[string]interface{}{ - "inner_str": hcl2shim.UnknownVariableValue, - }, - }, - }, - }, - }, - - ExpectedDiff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "outer.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "outer.~1.outer_str": &terraform.ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "outer.~1.inner.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "outer.~1.inner.0.inner_str": &terraform.ResourceAttrDiff{ - Old: "", - New: hcl2shim.UnknownVariableValue, - NewComputed: true, - }, - }, - }, - - Err: false, - }, - } - - for tn, tc := range cases { - t.Run(tn, func(t *testing.T) { - c := terraform.NewResourceConfigRaw(tc.Config) - - d, err := schemaMap(tc.Schema).Diff(tc.State, c, nil, nil, true) - if err != nil != tc.Err { - t.Fatalf("#%q err: %s", tn, err) - } - - if !reflect.DeepEqual(tc.ExpectedDiff, d) { - t.Fatalf("#%q:\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.ExpectedDiff, d) - } - }) - } -} - -func TestSchemaMap_Validate(t *testing.T) { - cases := map[string]struct { - Schema map[string]*Schema - Config map[string]interface{} - Err bool - Errors []error - Warnings []string - }{ - "Good": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - Config: map[string]interface{}{ - "availability_zone": "foo", - }, - }, - - "Good, because the var is not set and that error will come elsewhere": { - Schema: map[string]*Schema{ - "size": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - - Config: map[string]interface{}{ - "size": hcl2shim.UnknownVariableValue, - }, - }, - - "Required field not set": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Required: true, - }, - }, - - Config: map[string]interface{}{}, - - Err: true, - }, - - "Invalid basic type": { - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - - Config: map[string]interface{}{ - "port": "I am invalid", - }, - - Err: true, - }, - - "Invalid complex type": { - Schema: map[string]*Schema{ - "user_data": &Schema{ - Type: TypeString, - Optional: true, - }, - }, - - Config: map[string]interface{}{ - "user_data": []interface{}{ - map[string]interface{}{ - "foo": "bar", - }, - }, - }, - - Err: true, - }, - - "Bad type": { - Schema: map[string]*Schema{ - "size": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - - Config: map[string]interface{}{ - "size": "nope", - }, - - Err: true, - }, - - "Required but has DefaultFunc": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Required: true, - DefaultFunc: func() (interface{}, error) { - return "foo", nil - }, - }, - }, - - Config: nil, - }, - - "Required but has DefaultFunc return nil": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Required: true, - DefaultFunc: func() (interface{}, error) { - return nil, nil - }, - }, - }, - - Config: nil, - - Err: true, - }, - - "List with promotion": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Elem: &Schema{Type: TypeInt}, - PromoteSingle: true, - Optional: true, - }, - }, - - Config: map[string]interface{}{ - "ingress": "5", - }, - - Err: false, - }, - - "List with promotion set as list": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Elem: &Schema{Type: TypeInt}, - PromoteSingle: true, - Optional: true, - }, - }, - - Config: map[string]interface{}{ - "ingress": []interface{}{"5"}, - }, - - Err: false, - }, - - "Optional sub-resource": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{}, - - Err: false, - }, - - "Sub-resource is the wrong type": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Required: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{ - "ingress": []interface{}{"foo"}, - }, - - Err: true, - }, - - "Not a list nested block": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{ - "ingress": "foo", - }, - - Err: true, - Errors: []error{ - fmt.Errorf(`ingress: should be a list`), - }, - }, - - "Not a list primitive": { - Schema: map[string]*Schema{ - "strings": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Schema{ - Type: TypeString, - }, - }, - }, - - Config: map[string]interface{}{ - "strings": "foo", - }, - - Err: true, - Errors: []error{ - fmt.Errorf(`strings: should be a list`), - }, - }, - - "Unknown list": { - Schema: map[string]*Schema{ - "strings": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Schema{ - Type: TypeString, - }, - }, - }, - - Config: map[string]interface{}{ - "strings": hcl2shim.UnknownVariableValue, - }, - - Err: false, - }, - - "Unknown + Deprecation": { - Schema: map[string]*Schema{ - "old_news": &Schema{ - Type: TypeString, - Optional: true, - Deprecated: "please use 'new_news' instead", - }, - }, - - Config: map[string]interface{}{ - "old_news": hcl2shim.UnknownVariableValue, - }, - - Warnings: []string{ - "\"old_news\": [DEPRECATED] please use 'new_news' instead", - }, - }, - - "Required sub-resource field": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{ - "ingress": []interface{}{ - map[string]interface{}{}, - }, - }, - - Err: true, - }, - - "Good sub-resource": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{ - "ingress": []interface{}{ - map[string]interface{}{ - "from": 80, - }, - }, - }, - - Err: false, - }, - - "Good sub-resource, computed value": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "from": &Schema{ - Type: TypeInt, - Optional: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{ - "ingress": []interface{}{ - map[string]interface{}{ - "from": hcl2shim.UnknownVariableValue, - }, - }, - }, - - Err: false, - }, - - "Invalid/unknown field": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - Config: map[string]interface{}{ - "foo": "bar", - }, - - Err: true, - }, - - "Invalid/unknown field with computed value": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - Config: map[string]interface{}{ - "foo": hcl2shim.UnknownVariableValue, - }, - - Err: true, - }, - - "Computed field set": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Computed: true, - }, - }, - - Config: map[string]interface{}{ - "availability_zone": "bar", - }, - - Err: true, - }, - - "Not a set": { - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - Config: map[string]interface{}{ - "ports": "foo", - }, - - Err: true, - }, - - "Maps": { - Schema: map[string]*Schema{ - "user_data": &Schema{ - Type: TypeMap, - Optional: true, - }, - }, - - Config: map[string]interface{}{ - "user_data": "foo", - }, - - Err: true, - }, - - "Good map: data surrounded by extra slice": { - Schema: map[string]*Schema{ - "user_data": &Schema{ - Type: TypeMap, - Optional: true, - }, - }, - - Config: map[string]interface{}{ - "user_data": []interface{}{ - map[string]interface{}{ - "foo": "bar", - }, - }, - }, - }, - - "Good map": { - Schema: map[string]*Schema{ - "user_data": &Schema{ - Type: TypeMap, - Optional: true, - }, - }, - - Config: map[string]interface{}{ - "user_data": map[string]interface{}{ - "foo": "bar", - }, - }, - }, - - "Map with type specified as value type": { - Schema: map[string]*Schema{ - "user_data": &Schema{ - Type: TypeMap, - Optional: true, - Elem: TypeBool, - }, - }, - - Config: map[string]interface{}{ - "user_data": map[string]interface{}{ - "foo": "not_a_bool", - }, - }, - - Err: true, - }, - - "Map with type specified as nested Schema": { - Schema: map[string]*Schema{ - "user_data": &Schema{ - Type: TypeMap, - Optional: true, - Elem: &Schema{Type: TypeBool}, - }, - }, - - Config: map[string]interface{}{ - "user_data": map[string]interface{}{ - "foo": "not_a_bool", - }, - }, - - Err: true, - }, - - "Bad map: just a slice": { - Schema: map[string]*Schema{ - "user_data": &Schema{ - Type: TypeMap, - Optional: true, - }, - }, - - Config: map[string]interface{}{ - "user_data": []interface{}{ - "foo", - }, - }, - - Err: true, - }, - - "Good set: config has slice with single interpolated value": { - Schema: map[string]*Schema{ - "security_groups": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &Schema{Type: TypeString}, - Set: func(v interface{}) int { - return len(v.(string)) - }, - }, - }, - - Config: map[string]interface{}{ - "security_groups": []interface{}{"${var.foo}"}, - }, - - Err: false, - }, - - "Bad set: config has single interpolated value": { - Schema: map[string]*Schema{ - "security_groups": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &Schema{Type: TypeString}, - }, - }, - - Config: map[string]interface{}{ - "security_groups": "${var.foo}", - }, - - Err: true, - }, - - "Bad, subresource should not allow unknown elements": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{ - "ingress": []interface{}{ - map[string]interface{}{ - "port": 80, - "other": "yes", - }, - }, - }, - - Err: true, - }, - - "Bad, subresource should not allow invalid types": { - Schema: map[string]*Schema{ - "ingress": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "port": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{ - "ingress": []interface{}{ - map[string]interface{}{ - "port": "bad", - }, - }, - }, - - Err: true, - }, - - "Bad, should not allow lists to be assigned to string attributes": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Required: true, - }, - }, - - Config: map[string]interface{}{ - "availability_zone": []interface{}{"foo", "bar", "baz"}, - }, - - Err: true, - }, - - "Bad, should not allow maps to be assigned to string attributes": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Required: true, - }, - }, - - Config: map[string]interface{}{ - "availability_zone": map[string]interface{}{"foo": "bar", "baz": "thing"}, - }, - - Err: true, - }, - - "Deprecated attribute usage generates warning, but not error": { - Schema: map[string]*Schema{ - "old_news": &Schema{ - Type: TypeString, - Optional: true, - Deprecated: "please use 'new_news' instead", - }, - }, - - Config: map[string]interface{}{ - "old_news": "extra extra!", - }, - - Err: false, - - Warnings: []string{ - "\"old_news\": [DEPRECATED] please use 'new_news' instead", - }, - }, - - "Deprecated generates no warnings if attr not used": { - Schema: map[string]*Schema{ - "old_news": &Schema{ - Type: TypeString, - Optional: true, - Deprecated: "please use 'new_news' instead", - }, - }, - - Err: false, - - Warnings: nil, - }, - - "Removed attribute usage generates error": { - Schema: map[string]*Schema{ - "long_gone": &Schema{ - Type: TypeString, - Optional: true, - Removed: "no longer supported by Cloud API", - }, - }, - - Config: map[string]interface{}{ - "long_gone": "still here!", - }, - - Err: true, - Errors: []error{ - fmt.Errorf("\"long_gone\": [REMOVED] no longer supported by Cloud API"), - }, - }, - - "Removed generates no errors if attr not used": { - Schema: map[string]*Schema{ - "long_gone": &Schema{ - Type: TypeString, - Optional: true, - Removed: "no longer supported by Cloud API", - }, - }, - - Err: false, - }, - - "Conflicting attributes generate error": { - Schema: map[string]*Schema{ - "b": &Schema{ - Type: TypeString, - Optional: true, - }, - "a": &Schema{ - Type: TypeString, - Optional: true, - ConflictsWith: []string{"b"}, - }, - }, - - Config: map[string]interface{}{ - "b": "b-val", - "a": "a-val", - }, - - Err: true, - Errors: []error{ - fmt.Errorf("\"a\": conflicts with b"), - }, - }, - - "Conflicting attributes okay when unknown 1": { - Schema: map[string]*Schema{ - "b": &Schema{ - Type: TypeString, - Optional: true, - }, - "a": &Schema{ - Type: TypeString, - Optional: true, - ConflictsWith: []string{"b"}, - }, - }, - - Config: map[string]interface{}{ - "b": "b-val", - "a": hcl2shim.UnknownVariableValue, - }, - - Err: false, - }, - - "Conflicting attributes okay when unknown 2": { - Schema: map[string]*Schema{ - "b": &Schema{ - Type: TypeString, - Optional: true, - }, - "a": &Schema{ - Type: TypeString, - Optional: true, - ConflictsWith: []string{"b"}, - }, - }, - - Config: map[string]interface{}{ - "b": hcl2shim.UnknownVariableValue, - "a": "a-val", - }, - - Err: false, - }, - - "Conflicting attributes generate error even if one is unknown": { - Schema: map[string]*Schema{ - "b": &Schema{ - Type: TypeString, - Optional: true, - ConflictsWith: []string{"a", "c"}, - }, - "a": &Schema{ - Type: TypeString, - Optional: true, - ConflictsWith: []string{"b", "c"}, - }, - "c": &Schema{ - Type: TypeString, - Optional: true, - ConflictsWith: []string{"b", "a"}, - }, - }, - - Config: map[string]interface{}{ - "b": hcl2shim.UnknownVariableValue, - "a": "a-val", - "c": "c-val", - }, - - Err: true, - Errors: []error{ - fmt.Errorf("\"a\": conflicts with c"), - fmt.Errorf("\"c\": conflicts with a"), - }, - }, - - "Required attribute & undefined conflicting optional are good": { - Schema: map[string]*Schema{ - "required_att": &Schema{ - Type: TypeString, - Required: true, - }, - "optional_att": &Schema{ - Type: TypeString, - Optional: true, - ConflictsWith: []string{"required_att"}, - }, - }, - - Config: map[string]interface{}{ - "required_att": "required-val", - }, - - Err: false, - }, - - "Required conflicting attribute & defined optional generate error": { - Schema: map[string]*Schema{ - "required_att": &Schema{ - Type: TypeString, - Required: true, - }, - "optional_att": &Schema{ - Type: TypeString, - Optional: true, - ConflictsWith: []string{"required_att"}, - }, - }, - - Config: map[string]interface{}{ - "required_att": "required-val", - "optional_att": "optional-val", - }, - - Err: true, - Errors: []error{ - fmt.Errorf(`"optional_att": conflicts with required_att`), - }, - }, - - "Computed + Optional fields conflicting with each other": { - Schema: map[string]*Schema{ - "foo_att": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"bar_att"}, - }, - "bar_att": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"foo_att"}, - }, - }, - - Config: map[string]interface{}{ - "foo_att": "foo-val", - "bar_att": "bar-val", - }, - - Err: true, - Errors: []error{ - fmt.Errorf(`"foo_att": conflicts with bar_att`), - fmt.Errorf(`"bar_att": conflicts with foo_att`), - }, - }, - - "Computed + Optional fields NOT conflicting with each other": { - Schema: map[string]*Schema{ - "foo_att": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"bar_att"}, - }, - "bar_att": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"foo_att"}, - }, - }, - - Config: map[string]interface{}{ - "foo_att": "foo-val", - }, - - Err: false, - }, - - "Computed + Optional fields that conflict with none set": { - Schema: map[string]*Schema{ - "foo_att": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"bar_att"}, - }, - "bar_att": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"foo_att"}, - }, - }, - - Config: map[string]interface{}{}, - - Err: false, - }, - - "Good with ValidateFunc": { - Schema: map[string]*Schema{ - "validate_me": &Schema{ - Type: TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - return - }, - }, - }, - Config: map[string]interface{}{ - "validate_me": "valid", - }, - Err: false, - }, - - "Bad with ValidateFunc": { - Schema: map[string]*Schema{ - "validate_me": &Schema{ - Type: TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - es = append(es, fmt.Errorf("something is not right here")) - return - }, - }, - }, - Config: map[string]interface{}{ - "validate_me": "invalid", - }, - Err: true, - Errors: []error{ - fmt.Errorf(`something is not right here`), - }, - }, - - "ValidateFunc not called when type does not match": { - Schema: map[string]*Schema{ - "number": &Schema{ - Type: TypeInt, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - t.Fatalf("Should not have gotten validate call") - return - }, - }, - }, - Config: map[string]interface{}{ - "number": "NaN", - }, - Err: true, - }, - - "ValidateFunc gets decoded type": { - Schema: map[string]*Schema{ - "maybe": &Schema{ - Type: TypeBool, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - if _, ok := v.(bool); !ok { - t.Fatalf("Expected bool, got: %#v", v) - } - return - }, - }, - }, - Config: map[string]interface{}{ - "maybe": "true", - }, - }, - - "ValidateFunc is not called with a computed value": { - Schema: map[string]*Schema{ - "validate_me": &Schema{ - Type: TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - es = append(es, fmt.Errorf("something is not right here")) - return - }, - }, - }, - Config: map[string]interface{}{ - "validate_me": hcl2shim.UnknownVariableValue, - }, - - Err: false, - }, - - "special timeouts field": { - Schema: map[string]*Schema{ - "availability_zone": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - - Config: map[string]interface{}{ - TimeoutsConfigKey: "bar", - }, - - Err: false, - }, - - "invalid bool field": { - Schema: map[string]*Schema{ - "bool_field": { - Type: TypeBool, - Optional: true, - }, - }, - Config: map[string]interface{}{ - "bool_field": "abcdef", - }, - Err: true, - }, - "invalid integer field": { - Schema: map[string]*Schema{ - "integer_field": { - Type: TypeInt, - Optional: true, - }, - }, - Config: map[string]interface{}{ - "integer_field": "abcdef", - }, - Err: true, - }, - "invalid float field": { - Schema: map[string]*Schema{ - "float_field": { - Type: TypeFloat, - Optional: true, - }, - }, - Config: map[string]interface{}{ - "float_field": "abcdef", - }, - Err: true, - }, - - // Invalid map values - "invalid bool map value": { - Schema: map[string]*Schema{ - "boolMap": &Schema{ - Type: TypeMap, - Elem: TypeBool, - Optional: true, - }, - }, - Config: map[string]interface{}{ - "boolMap": map[string]interface{}{ - "boolField": "notbool", - }, - }, - Err: true, - }, - "invalid int map value": { - Schema: map[string]*Schema{ - "intMap": &Schema{ - Type: TypeMap, - Elem: TypeInt, - Optional: true, - }, - }, - Config: map[string]interface{}{ - "intMap": map[string]interface{}{ - "intField": "notInt", - }, - }, - Err: true, - }, - "invalid float map value": { - Schema: map[string]*Schema{ - "floatMap": &Schema{ - Type: TypeMap, - Elem: TypeFloat, - Optional: true, - }, - }, - Config: map[string]interface{}{ - "floatMap": map[string]interface{}{ - "floatField": "notFloat", - }, - }, - Err: true, - }, - - "map with positive validate function": { - Schema: map[string]*Schema{ - "floatInt": &Schema{ - Type: TypeMap, - Elem: TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - return - }, - }, - }, - Config: map[string]interface{}{ - "floatInt": map[string]interface{}{ - "rightAnswer": "42", - "tooMuch": "43", - }, - }, - Err: false, - }, - "map with negative validate function": { - Schema: map[string]*Schema{ - "floatInt": &Schema{ - Type: TypeMap, - Elem: TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - es = append(es, fmt.Errorf("this is not fine")) - return - }, - }, - }, - Config: map[string]interface{}{ - "floatInt": map[string]interface{}{ - "rightAnswer": "42", - "tooMuch": "43", - }, - }, - Err: true, - }, - - // The Validation function should not see interpolation strings from - // non-computed values. - "set with partially computed list and map": { - Schema: map[string]*Schema{ - "outer": &Schema{ - Type: TypeSet, - Optional: true, - Computed: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "list": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Schema{ - Type: TypeString, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - if strings.HasPrefix(v.(string), "${") { - es = append(es, fmt.Errorf("should not have interpolations")) - } - return - }, - }, - }, - }, - }, - }, - }, - Config: map[string]interface{}{ - "outer": []interface{}{ - map[string]interface{}{ - "list": []interface{}{"A", hcl2shim.UnknownVariableValue, "c"}, - }, - }, - }, - Err: false, - }, - "unexpected nils values": { - Schema: map[string]*Schema{ - "strings": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Schema{ - Type: TypeString, - }, - }, - "block": &Schema{ - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "int": &Schema{ - Type: TypeInt, - Required: true, - }, - }, - }, - }, - }, - - Config: map[string]interface{}{ - "strings": []interface{}{"1", nil}, - "block": []interface{}{map[string]interface{}{ - "int": nil, - }, - nil, - }, - }, - Err: true, - }, - } - - for tn, tc := range cases { - t.Run(tn, func(t *testing.T) { - c := terraform.NewResourceConfigRaw(tc.Config) - - ws, es := schemaMap(tc.Schema).Validate(c) - if len(es) > 0 != tc.Err { - if len(es) == 0 { - t.Errorf("%q: no errors", tn) - } - - for _, e := range es { - t.Errorf("%q: err: %s", tn, e) - } - - t.FailNow() - } - - if !reflect.DeepEqual(ws, tc.Warnings) { - t.Fatalf("%q: warnings:\n\nexpected: %#v\ngot:%#v", tn, tc.Warnings, ws) - } - - if tc.Errors != nil { - sort.Sort(errorSort(es)) - sort.Sort(errorSort(tc.Errors)) - - if !reflect.DeepEqual(es, tc.Errors) { - t.Fatalf("%q: errors:\n\nexpected: %q\ngot: %q", tn, tc.Errors, es) - } - } - }) - - } -} - -func TestSchemaSet_ValidateMaxItems(t *testing.T) { - cases := map[string]struct { - Schema map[string]*Schema - State *terraform.InstanceState - Config map[string]interface{} - ConfigVariables map[string]string - Diff *terraform.InstanceDiff - Err bool - Errors []error - }{ - "#0": { - Schema: map[string]*Schema{ - "aliases": &Schema{ - Type: TypeSet, - Optional: true, - MaxItems: 1, - Elem: &Schema{Type: TypeString}, - }, - }, - State: nil, - Config: map[string]interface{}{ - "aliases": []interface{}{"foo", "bar"}, - }, - Diff: nil, - Err: true, - Errors: []error{ - fmt.Errorf("aliases: attribute supports 1 item maximum, config has 2 declared"), - }, - }, - "#1": { - Schema: map[string]*Schema{ - "aliases": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeString}, - }, - }, - State: nil, - Config: map[string]interface{}{ - "aliases": []interface{}{"foo", "bar"}, - }, - Diff: nil, - Err: false, - Errors: nil, - }, - "#2": { - Schema: map[string]*Schema{ - "aliases": &Schema{ - Type: TypeSet, - Optional: true, - MaxItems: 1, - Elem: &Schema{Type: TypeString}, - }, - }, - State: nil, - Config: map[string]interface{}{ - "aliases": []interface{}{"foo"}, - }, - Diff: nil, - Err: false, - Errors: nil, - }, - } - - for tn, tc := range cases { - c := terraform.NewResourceConfigRaw(tc.Config) - _, es := schemaMap(tc.Schema).Validate(c) - - if len(es) > 0 != tc.Err { - if len(es) == 0 { - t.Errorf("%q: no errors", tn) - } - - for _, e := range es { - t.Errorf("%q: err: %s", tn, e) - } - - t.FailNow() - } - - if tc.Errors != nil { - if !reflect.DeepEqual(es, tc.Errors) { - t.Fatalf("%q: expected: %q\ngot: %q", tn, tc.Errors, es) - } - } - } -} - -func TestSchemaSet_ValidateMinItems(t *testing.T) { - cases := map[string]struct { - Schema map[string]*Schema - State *terraform.InstanceState - Config map[string]interface{} - ConfigVariables map[string]string - Diff *terraform.InstanceDiff - Err bool - Errors []error - }{ - "#0": { - Schema: map[string]*Schema{ - "aliases": &Schema{ - Type: TypeSet, - Optional: true, - MinItems: 2, - Elem: &Schema{Type: TypeString}, - }, - }, - State: nil, - Config: map[string]interface{}{ - "aliases": []interface{}{"foo", "bar"}, - }, - Diff: nil, - Err: false, - Errors: nil, - }, - "#1": { - Schema: map[string]*Schema{ - "aliases": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Schema{Type: TypeString}, - }, - }, - State: nil, - Config: map[string]interface{}{ - "aliases": []interface{}{"foo", "bar"}, - }, - Diff: nil, - Err: false, - Errors: nil, - }, - "#2": { - Schema: map[string]*Schema{ - "aliases": &Schema{ - Type: TypeSet, - Optional: true, - MinItems: 2, - Elem: &Schema{Type: TypeString}, - }, - }, - State: nil, - Config: map[string]interface{}{ - "aliases": []interface{}{"foo"}, - }, - Diff: nil, - Err: true, - Errors: []error{ - fmt.Errorf("aliases: attribute supports 2 item as a minimum, config has 1 declared"), - }, - }, - } - - for tn, tc := range cases { - c := terraform.NewResourceConfigRaw(tc.Config) - _, es := schemaMap(tc.Schema).Validate(c) - - if len(es) > 0 != tc.Err { - if len(es) == 0 { - t.Errorf("%q: no errors", tn) - } - - for _, e := range es { - t.Errorf("%q: err: %s", tn, e) - } - - t.FailNow() - } - - if tc.Errors != nil { - if !reflect.DeepEqual(es, tc.Errors) { - t.Fatalf("%q: expected: %q\ngot: %q", tn, tc.Errors, es) - } - } - } -} - -// errorSort implements sort.Interface to sort errors by their error message -type errorSort []error - -func (e errorSort) Len() int { return len(e) } -func (e errorSort) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e errorSort) Less(i, j int) bool { - return e[i].Error() < e[j].Error() -} - -func TestSchemaMapDeepCopy(t *testing.T) { - schema := map[string]*Schema{ - "foo": &Schema{ - Type: TypeString, - }, - } - source := schemaMap(schema) - dest := source.DeepCopy() - dest["foo"].ForceNew = true - if reflect.DeepEqual(source, dest) { - t.Fatalf("source and dest should not match") - } -} diff --git a/internal/legacy/helper/schema/set.go b/internal/legacy/helper/schema/set.go deleted file mode 100644 index b44035c7c58f..000000000000 --- a/internal/legacy/helper/schema/set.go +++ /dev/null @@ -1,250 +0,0 @@ -package schema - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strconv" - "sync" - - "github.com/hashicorp/terraform/internal/legacy/helper/hashcode" -) - -// HashString hashes strings. If you want a Set of strings, this is the -// SchemaSetFunc you want. -func HashString(v interface{}) int { - return hashcode.String(v.(string)) -} - -// HashInt hashes integers. If you want a Set of integers, this is the -// SchemaSetFunc you want. -func HashInt(v interface{}) int { - return hashcode.String(strconv.Itoa(v.(int))) -} - -// HashResource hashes complex structures that are described using -// a *Resource. This is the default set implementation used when a set's -// element type is a full resource. -func HashResource(resource *Resource) SchemaSetFunc { - return func(v interface{}) int { - var buf bytes.Buffer - SerializeResourceForHash(&buf, v, resource) - return hashcode.String(buf.String()) - } -} - -// HashSchema hashes values that are described using a *Schema. This is the -// default set implementation used when a set's element type is a single -// schema. -func HashSchema(schema *Schema) SchemaSetFunc { - return func(v interface{}) int { - var buf bytes.Buffer - SerializeValueForHash(&buf, v, schema) - return hashcode.String(buf.String()) - } -} - -// Set is a set data structure that is returned for elements of type -// TypeSet. -type Set struct { - F SchemaSetFunc - - m map[string]interface{} - once sync.Once -} - -// NewSet is a convenience method for creating a new set with the given -// items. -func NewSet(f SchemaSetFunc, items []interface{}) *Set { - s := &Set{F: f} - for _, i := range items { - s.Add(i) - } - - return s -} - -// CopySet returns a copy of another set. -func CopySet(otherSet *Set) *Set { - return NewSet(otherSet.F, otherSet.List()) -} - -// Add adds an item to the set if it isn't already in the set. -func (s *Set) Add(item interface{}) { - s.add(item, false) -} - -// Remove removes an item if it's already in the set. Idempotent. -func (s *Set) Remove(item interface{}) { - s.remove(item) -} - -// Contains checks if the set has the given item. -func (s *Set) Contains(item interface{}) bool { - _, ok := s.m[s.hash(item)] - return ok -} - -// Len returns the amount of items in the set. -func (s *Set) Len() int { - return len(s.m) -} - -// List returns the elements of this set in slice format. -// -// The order of the returned elements is deterministic. Given the same -// set, the order of this will always be the same. -func (s *Set) List() []interface{} { - result := make([]interface{}, len(s.m)) - for i, k := range s.listCode() { - result[i] = s.m[k] - } - - return result -} - -// Difference performs a set difference of the two sets, returning -// a new third set that has only the elements unique to this set. -func (s *Set) Difference(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - if _, ok := other.m[k]; !ok { - result.m[k] = v - } - } - - return result -} - -// Intersection performs the set intersection of the two sets -// and returns a new third set. -func (s *Set) Intersection(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - if _, ok := other.m[k]; ok { - result.m[k] = v - } - } - - return result -} - -// Union performs the set union of the two sets and returns a new third -// set. -func (s *Set) Union(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - result.m[k] = v - } - for k, v := range other.m { - result.m[k] = v - } - - return result -} - -func (s *Set) Equal(raw interface{}) bool { - other, ok := raw.(*Set) - if !ok { - return false - } - - return reflect.DeepEqual(s.m, other.m) -} - -// HashEqual simply checks to the keys the top-level map to the keys in the -// other set's top-level map to see if they are equal. This obviously assumes -// you have a properly working hash function - use HashResource if in doubt. -func (s *Set) HashEqual(raw interface{}) bool { - other, ok := raw.(*Set) - if !ok { - return false - } - - ks1 := make([]string, 0) - ks2 := make([]string, 0) - - for k := range s.m { - ks1 = append(ks1, k) - } - for k := range other.m { - ks2 = append(ks2, k) - } - - sort.Strings(ks1) - sort.Strings(ks2) - - return reflect.DeepEqual(ks1, ks2) -} - -func (s *Set) GoString() string { - return fmt.Sprintf("*Set(%#v)", s.m) -} - -func (s *Set) init() { - s.m = make(map[string]interface{}) -} - -func (s *Set) add(item interface{}, computed bool) string { - s.once.Do(s.init) - - code := s.hash(item) - if computed { - code = "~" + code - - if isProto5() { - tmpCode := code - count := 0 - for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { - count++ - tmpCode = fmt.Sprintf("%s%d", code, count) - } - code = tmpCode - } - } - - if _, ok := s.m[code]; !ok { - s.m[code] = item - } - - return code -} - -func (s *Set) hash(item interface{}) string { - code := s.F(item) - // Always return a nonnegative hashcode. - if code < 0 { - code = -code - } - return strconv.Itoa(code) -} - -func (s *Set) remove(item interface{}) string { - s.once.Do(s.init) - - code := s.hash(item) - delete(s.m, code) - - return code -} - -func (s *Set) index(item interface{}) int { - return sort.SearchStrings(s.listCode(), s.hash(item)) -} - -func (s *Set) listCode() []string { - // Sort the hash codes so the order of the list is deterministic - keys := make([]string, 0, len(s.m)) - for k := range s.m { - keys = append(keys, k) - } - sort.Sort(sort.StringSlice(keys)) - return keys -} diff --git a/internal/legacy/helper/schema/testing.go b/internal/legacy/helper/schema/testing.go deleted file mode 100644 index 3b328a87c451..000000000000 --- a/internal/legacy/helper/schema/testing.go +++ /dev/null @@ -1,28 +0,0 @@ -package schema - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/legacy/terraform" -) - -// TestResourceDataRaw creates a ResourceData from a raw configuration map. -func TestResourceDataRaw( - t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { - t.Helper() - - c := terraform.NewResourceConfigRaw(raw) - - sm := schemaMap(schema) - diff, err := sm.Diff(nil, c, nil, nil, true) - if err != nil { - t.Fatalf("err: %s", err) - } - - result, err := sm.Data(nil, diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - return result -} diff --git a/internal/legacy/terraform/diff.go b/internal/legacy/terraform/diff.go deleted file mode 100644 index 77bea3258906..000000000000 --- a/internal/legacy/terraform/diff.go +++ /dev/null @@ -1,1451 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/zclconf/go-cty/cty" - - "github.com/mitchellh/copystructure" -) - -// DiffChangeType is an enum with the kind of changes a diff has planned. -type DiffChangeType byte - -const ( - DiffInvalid DiffChangeType = iota - DiffNone - DiffCreate - DiffUpdate - DiffDestroy - DiffDestroyCreate - - // DiffRefresh is only used in the UI for displaying diffs. - // Managed resource reads never appear in plan, and when data source - // reads appear they are represented as DiffCreate in core before - // transforming to DiffRefresh in the UI layer. - DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion -) - -// multiVal matches the index key to a flatmapped set, list or map -var multiVal = regexp.MustCompile(`\.(#|%)$`) - -// Diff tracks the changes that are necessary to apply a configuration -// to an existing infrastructure. -type Diff struct { - // Modules contains all the modules that have a diff - Modules []*ModuleDiff -} - -// Prune cleans out unused structures in the diff without affecting -// the behavior of the diff at all. -// -// This is not safe to call concurrently. This is safe to call on a -// nil Diff. -func (d *Diff) Prune() { - if d == nil { - return - } - - // Prune all empty modules - newModules := make([]*ModuleDiff, 0, len(d.Modules)) - for _, m := range d.Modules { - // If the module isn't empty, we keep it - if !m.Empty() { - newModules = append(newModules, m) - } - } - if len(newModules) == 0 { - newModules = nil - } - d.Modules = newModules -} - -// AddModule adds the module with the given path to the diff. -// -// This should be the preferred method to add module diffs since it -// allows us to optimize lookups later as well as control sorting. -func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - legacyPath := make([]string, len(path)) - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("diff cannot represent modules with count or for_each keys") - } - - legacyPath[i] = step.Name - } - - m := &ModuleDiff{Path: legacyPath} - m.init() - d.Modules = append(d.Modules, m) - return m -} - -// ModuleByPath is used to lookup the module diff for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { - if d == nil { - return nil - } - for _, mod := range d.Modules { - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// RootModule returns the ModuleState for the root module -func (d *Diff) RootModule() *ModuleDiff { - root := d.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Empty returns true if the diff has no changes. -func (d *Diff) Empty() bool { - if d == nil { - return true - } - - for _, m := range d.Modules { - if !m.Empty() { - return false - } - } - - return true -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *Diff) Equal(d2 *Diff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Sort the modules - sort.Sort(moduleDiffSort(d.Modules)) - sort.Sort(moduleDiffSort(d2.Modules)) - - // Copy since we have to modify the module destroy flag to false so - // we don't compare that. TODO: delete this when we get rid of the - // destroy flag on modules. - dCopy := d.DeepCopy() - d2Copy := d2.DeepCopy() - for _, m := range dCopy.Modules { - m.Destroy = false - } - for _, m := range d2Copy.Modules { - m.Destroy = false - } - - // Use DeepEqual - return reflect.DeepEqual(dCopy, d2Copy) -} - -// DeepCopy performs a deep copy of all parts of the Diff, making the -// resulting Diff safe to use without modifying this one. -func (d *Diff) DeepCopy() *Diff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*Diff) -} - -func (d *Diff) String() string { - var buf bytes.Buffer - - keys := make([]string, 0, len(d.Modules)) - lookup := make(map[string]*ModuleDiff) - for _, m := range d.Modules { - addr := normalizeModulePath(m.Path) - key := addr.String() - keys = append(keys, key) - lookup[key] = m - } - sort.Strings(keys) - - for _, key := range keys { - m := lookup[key] - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("%s:\n", key)) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return strings.TrimSpace(buf.String()) -} - -func (d *Diff) init() { - if d.Modules == nil { - rootDiff := &ModuleDiff{Path: rootModulePath} - d.Modules = []*ModuleDiff{rootDiff} - } - for _, m := range d.Modules { - m.init() - } -} - -// ModuleDiff tracks the differences between resources to apply within -// a single module. -type ModuleDiff struct { - Path []string - Resources map[string]*InstanceDiff - Destroy bool // Set only by the destroy plan -} - -func (d *ModuleDiff) init() { - if d.Resources == nil { - d.Resources = make(map[string]*InstanceDiff) - } - for _, r := range d.Resources { - r.init() - } -} - -// ChangeType returns the type of changes that the diff for this -// module includes. -// -// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or -// DiffCreate. If an instance within the module has a DiffDestroyCreate -// then this will register as a DiffCreate for a module. -func (d *ModuleDiff) ChangeType() DiffChangeType { - result := DiffNone - for _, r := range d.Resources { - change := r.ChangeType() - switch change { - case DiffCreate, DiffDestroy: - if result == DiffNone { - result = change - } - case DiffDestroyCreate, DiffUpdate: - result = DiffUpdate - } - } - - return result -} - -// Empty returns true if the diff has no changes within this module. -func (d *ModuleDiff) Empty() bool { - if d.Destroy { - return false - } - - if len(d.Resources) == 0 { - return true - } - - for _, rd := range d.Resources { - if !rd.Empty() { - return false - } - } - - return true -} - -// Instances returns the instance diffs for the id given. This can return -// multiple instance diffs if there are counts within the resource. -func (d *ModuleDiff) Instances(id string) []*InstanceDiff { - var result []*InstanceDiff - for k, diff := range d.Resources { - if k == id || strings.HasPrefix(k, id+".") { - if !diff.Empty() { - result = append(result, diff) - } - } - } - - return result -} - -// IsRoot says whether or not this module diff is for the root module. -func (d *ModuleDiff) IsRoot() bool { - return reflect.DeepEqual(d.Path, rootModulePath) -} - -// String outputs the diff in a long but command-line friendly output -// format that users can read to quickly inspect a diff. -func (d *ModuleDiff) String() string { - var buf bytes.Buffer - - names := make([]string, 0, len(d.Resources)) - for name, _ := range d.Resources { - names = append(names, name) - } - sort.Strings(names) - - for _, name := range names { - rdiff := d.Resources[name] - - crud := "UPDATE" - switch { - case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): - crud = "DESTROY/CREATE" - case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): - crud = "DESTROY" - case rdiff.RequiresNew(): - crud = "CREATE" - } - - extra := "" - if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { - extra = " (deposed only)" - } - - buf.WriteString(fmt.Sprintf( - "%s: %s%s\n", - crud, - name, - extra)) - - keyLen := 0 - rdiffAttrs := rdiff.CopyAttributes() - keys := make([]string, 0, len(rdiffAttrs)) - for key, _ := range rdiffAttrs { - if key == "id" { - continue - } - - keys = append(keys, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(keys) - - for _, attrK := range keys { - attrDiff, _ := rdiff.GetAttribute(attrK) - - v := attrDiff.New - u := attrDiff.Old - if attrDiff.NewComputed { - v = "" - } - - if attrDiff.Sensitive { - u = "" - v = "" - } - - updateMsg := "" - if attrDiff.RequiresNew { - updateMsg = " (forces new resource)" - } else if attrDiff.Sensitive { - updateMsg = " (attribute changed)" - } - - buf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v, - updateMsg)) - } - } - - return buf.String() -} - -// InstanceDiff is the diff of a resource from some state to another. -type InstanceDiff struct { - mu sync.Mutex - Attributes map[string]*ResourceAttrDiff - Destroy bool - DestroyDeposed bool - DestroyTainted bool - - // Meta is a simple K/V map that is stored in a diff and persisted to - // plans but otherwise is completely ignored by Terraform core. It is - // meant to be used for additional data a resource may want to pass through. - // The value here must only contain Go primitives and collections. - Meta map[string]interface{} -} - -func (d *InstanceDiff) Lock() { d.mu.Lock() } -func (d *InstanceDiff) Unlock() { d.mu.Unlock() } - -// ApplyToValue merges the receiver into the given base value, returning a -// new value that incorporates the planned changes. The given value must -// conform to the given schema, or this method will panic. -// -// This method is intended for shimming old subsystems that still use this -// legacy diff type to work with the new-style types. -func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { - // Create an InstanceState attributes from our existing state. - // We can use this to more easily apply the diff changes. - attrs := hcl2shim.FlatmapValueFromHCL2(base) - applied, err := d.Apply(attrs, schema) - if err != nil { - return base, err - } - - val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) - if err != nil { - return base, err - } - - return schema.CoerceValue(val) -} - -// Apply applies the diff to the provided flatmapped attributes, -// returning the new instance attributes. -// -// This method is intended for shimming old subsystems that still use this -// legacy diff type to work with the new-style types. -func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - // We always build a new value here, even if the given diff is "empty", - // because we might be planning to create a new instance that happens - // to have no attributes set, and so we want to produce an empty object - // rather than just echoing back the null old value. - if attrs == nil { - attrs = map[string]string{} - } - - // Rather applying the diff to mutate the attrs, we'll copy new values into - // here to avoid the possibility of leaving stale values. - result := map[string]string{} - - if d.Destroy || d.DestroyDeposed || d.DestroyTainted { - return result, nil - } - - return d.applyBlockDiff(nil, attrs, schema) -} - -func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - result := map[string]string{} - name := "" - if len(path) > 0 { - name = path[len(path)-1] - } - - // localPrefix is used to build the local result map - localPrefix := "" - if name != "" { - localPrefix = name + "." - } - - // iterate over the schema rather than the attributes, so we can handle - // different block types separately from plain attributes - for n, attrSchema := range schema.Attributes { - var err error - newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) - - if err != nil { - return result, err - } - - for k, v := range newAttrs { - result[localPrefix+k] = v - } - } - - blockPrefix := strings.Join(path, ".") - if blockPrefix != "" { - blockPrefix += "." - } - for n, block := range schema.BlockTypes { - // we need to find the set of all keys that traverse this block - candidateKeys := map[string]bool{} - blockKey := blockPrefix + n + "." - localBlockPrefix := localPrefix + n + "." - - // we can only trust the diff for sets, since the path changes, so don't - // count existing values as candidate keys. If it turns out we're - // keeping the attributes, we will catch it down below with "keepBlock" - // after we check the set count. - if block.Nesting != configschema.NestingSet { - for k := range attrs { - if strings.HasPrefix(k, blockKey) { - nextDot := strings.Index(k[len(blockKey):], ".") - if nextDot < 0 { - continue - } - nextDot += len(blockKey) - candidateKeys[k[len(blockKey):nextDot]] = true - } - } - } - - for k, diff := range d.Attributes { - // helper/schema should not insert nil diff values, but don't panic - // if it does. - if diff == nil { - continue - } - - if strings.HasPrefix(k, blockKey) { - nextDot := strings.Index(k[len(blockKey):], ".") - if nextDot < 0 { - continue - } - - if diff.NewRemoved { - continue - } - - nextDot += len(blockKey) - candidateKeys[k[len(blockKey):nextDot]] = true - } - } - - // check each set candidate to see if it was removed. - // we need to do this, because when entire sets are removed, they may - // have the wrong key, and ony show diffs going to "" - if block.Nesting == configschema.NestingSet { - for k := range candidateKeys { - indexPrefix := strings.Join(append(path, n, k), ".") + "." - keep := false - // now check each set element to see if it's a new diff, or one - // that we're dropping. Since we're only applying the "New" - // portion of the set, we can ignore diffs that only contain "Old" - for attr, diff := range d.Attributes { - // helper/schema should not insert nil diff values, but don't panic - // if it does. - if diff == nil { - continue - } - - if !strings.HasPrefix(attr, indexPrefix) { - continue - } - - // check for empty "count" keys - if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { - continue - } - - // removed items don't count either - if diff.NewRemoved { - continue - } - - // this must be a diff to keep - keep = true - break - } - if !keep { - delete(candidateKeys, k) - } - } - } - - for k := range candidateKeys { - newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) - if err != nil { - return result, err - } - - for attr, v := range newAttrs { - result[localBlockPrefix+attr] = v - } - } - - keepBlock := true - // check this block's count diff directly first, since we may not - // have candidates because it was removed and only set to "0" - if diff, ok := d.Attributes[blockKey+"#"]; ok { - if diff.New == "0" || diff.NewRemoved { - keepBlock = false - } - } - - // if there was no diff at all, then we need to keep the block attributes - if len(candidateKeys) == 0 && keepBlock { - for k, v := range attrs { - if strings.HasPrefix(k, blockKey) { - // we need the key relative to this block, so remove the - // entire prefix, then re-insert the block name. - localKey := localBlockPrefix + k[len(blockKey):] - result[localKey] = v - } - } - } - - countAddr := strings.Join(append(path, n, "#"), ".") - if countDiff, ok := d.Attributes[countAddr]; ok { - if countDiff.NewComputed { - result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue - } else { - result[localBlockPrefix+"#"] = countDiff.New - - // While sets are complete, list are not, and we may not have all the - // information to track removals. If the list was truncated, we need to - // remove the extra items from the result. - if block.Nesting == configschema.NestingList && - countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { - length, _ := strconv.Atoi(countDiff.New) - for k := range result { - if !strings.HasPrefix(k, localBlockPrefix) { - continue - } - - index := k[len(localBlockPrefix):] - nextDot := strings.Index(index, ".") - if nextDot < 1 { - continue - } - index = index[:nextDot] - i, err := strconv.Atoi(index) - if err != nil { - // this shouldn't happen since we added these - // ourself, but make note of it just in case. - log.Printf("[ERROR] bad list index in %q: %s", k, err) - continue - } - if i >= length { - delete(result, k) - } - } - } - } - } else if origCount, ok := attrs[countAddr]; ok && keepBlock { - result[localBlockPrefix+"#"] = origCount - } else { - result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) - } - } - - return result, nil -} - -func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - ty := attrSchema.Type - switch { - case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): - return d.applyCollectionDiff(path, attrs, attrSchema) - case ty.IsSetType(): - return d.applySetDiff(path, attrs, attrSchema) - default: - return d.applySingleAttrDiff(path, attrs, attrSchema) - } -} - -func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - currentKey := strings.Join(path, ".") - - attr := path[len(path)-1] - - result := map[string]string{} - diff := d.Attributes[currentKey] - old, exists := attrs[currentKey] - - if diff != nil && diff.NewComputed { - result[attr] = hcl2shim.UnknownVariableValue - return result, nil - } - - // "id" must exist and not be an empty string, or it must be unknown. - // This only applied to top-level "id" fields. - if attr == "id" && len(path) == 1 { - if old == "" { - result[attr] = hcl2shim.UnknownVariableValue - } else { - result[attr] = old - } - return result, nil - } - - // attribute diffs are sometimes missed, so assume no diff means keep the - // old value - if diff == nil { - if exists { - result[attr] = old - } else { - // We need required values, so set those with an empty value. It - // must be set in the config, since if it were missing it would have - // failed validation. - if attrSchema.Required { - // we only set a missing string here, since bool or number types - // would have distinct zero value which shouldn't have been - // lost. - if attrSchema.Type == cty.String { - result[attr] = "" - } - } - } - return result, nil - } - - // check for missmatched diff values - if exists && - old != diff.Old && - old != hcl2shim.UnknownVariableValue && - diff.Old != hcl2shim.UnknownVariableValue { - return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) - } - - if diff.NewRemoved { - // don't set anything in the new value - return map[string]string{}, nil - } - - if diff.Old == diff.New && diff.New == "" { - // this can only be a valid empty string - if attrSchema.Type == cty.String { - result[attr] = "" - } - return result, nil - } - - if attrSchema.Computed && diff.NewComputed { - result[attr] = hcl2shim.UnknownVariableValue - return result, nil - } - - result[attr] = diff.New - - return result, nil -} - -func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - result := map[string]string{} - - prefix := "" - if len(path) > 1 { - prefix = strings.Join(path[:len(path)-1], ".") + "." - } - - name := "" - if len(path) > 0 { - name = path[len(path)-1] - } - - currentKey := prefix + name - - // check the index first for special handling - for k, diff := range d.Attributes { - // check the index value, which can be set, and 0 - if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { - if diff.NewRemoved { - return result, nil - } - - if diff.NewComputed { - result[k[len(prefix):]] = hcl2shim.UnknownVariableValue - return result, nil - } - - // do what the diff tells us to here, so that it's consistent with applies - if diff.New == "0" { - result[k[len(prefix):]] = "0" - return result, nil - } - } - } - - // collect all the keys from the diff and the old state - noDiff := true - keys := map[string]bool{} - for k := range d.Attributes { - if !strings.HasPrefix(k, currentKey+".") { - continue - } - noDiff = false - keys[k] = true - } - - noAttrs := true - for k := range attrs { - if !strings.HasPrefix(k, currentKey+".") { - continue - } - noAttrs = false - keys[k] = true - } - - // If there's no diff and no attrs, then there's no value at all. - // This prevents an unexpected zero-count attribute in the attributes. - if noDiff && noAttrs { - return result, nil - } - - idx := "#" - if attrSchema.Type.IsMapType() { - idx = "%" - } - - for k := range keys { - // generate an schema placeholder for the values - elSchema := &configschema.Attribute{ - Type: attrSchema.Type.ElementType(), - } - - res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) - if err != nil { - return result, err - } - - for k, v := range res { - result[name+"."+k] = v - } - } - - // Just like in nested list blocks, for simple lists we may need to fill in - // missing empty strings. - countKey := name + "." + idx - count := result[countKey] - length, _ := strconv.Atoi(count) - - if count != "" && count != hcl2shim.UnknownVariableValue && - attrSchema.Type.Equals(cty.List(cty.String)) { - // insert empty strings into missing indexes - for i := 0; i < length; i++ { - key := fmt.Sprintf("%s.%d", name, i) - if _, ok := result[key]; !ok { - result[key] = "" - } - } - } - - // now check for truncation in any type of list - if attrSchema.Type.IsListType() { - for key := range result { - if key == countKey { - continue - } - - if len(key) <= len(name)+1 { - // not sure what this is, but don't panic - continue - } - - index := key[len(name)+1:] - - // It is possible to have nested sets or maps, so look for another dot - dot := strings.Index(index, ".") - if dot > 0 { - index = index[:dot] - } - - // This shouldn't have any more dots, since the element type is only string. - num, err := strconv.Atoi(index) - if err != nil { - log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) - continue - } - - if num >= length { - delete(result, key) - } - } - } - - // Fill in the count value if it wasn't present in the diff for some reason, - // or if there is no count at all. - _, countDiff := d.Attributes[countKey] - if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { - result[countKey] = countFlatmapContainerValues(countKey, result) - } - - return result, nil -} - -func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - // We only need this special behavior for sets of object. - if !attrSchema.Type.ElementType().IsObjectType() { - // The normal collection apply behavior will work okay for this one, then. - return d.applyCollectionDiff(path, attrs, attrSchema) - } - - // When we're dealing with a set of an object type we actually want to - // use our normal _block type_ apply behaviors, so we'll construct ourselves - // a synthetic schema that treats the object type as a block type and - // then delegate to our block apply method. - synthSchema := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute), - } - - for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { - // We can safely make everything into an attribute here because in the - // event that there are nested set attributes we'll end up back in - // here again recursively and can then deal with the next level of - // expansion. - synthSchema.Attributes[name] = &configschema.Attribute{ - Type: ty, - Optional: true, - } - } - - parentPath := path[:len(path)-1] - childName := path[len(path)-1] - containerSchema := &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - childName: { - Nesting: configschema.NestingSet, - Block: *synthSchema, - }, - }, - } - - return d.applyBlockDiff(parentPath, attrs, containerSchema) -} - -// countFlatmapContainerValues returns the number of values in the flatmapped container -// (set, map, list) indexed by key. The key argument is expected to include the -// trailing ".#", or ".%". -func countFlatmapContainerValues(key string, attrs map[string]string) string { - if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - panic(fmt.Sprintf("invalid index value %q", key)) - } - - prefix := key[:len(key)-1] - items := map[string]int{} - - for k := range attrs { - if k == key { - continue - } - if !strings.HasPrefix(k, prefix) { - continue - } - - suffix := k[len(prefix):] - dot := strings.Index(suffix, ".") - if dot > 0 { - suffix = suffix[:dot] - } - - items[suffix]++ - } - return strconv.Itoa(len(items)) -} - -// ResourceAttrDiff is the diff of a single attribute of a resource. -type ResourceAttrDiff struct { - Old string // Old Value - New string // New Value - NewComputed bool // True if new value is computed (unknown currently) - NewRemoved bool // True if this attribute is being removed - NewExtra interface{} // Extra information for the provider - RequiresNew bool // True if change requires new resource - Sensitive bool // True if the data should not be displayed in UI output - Type DiffAttrType -} - -// Empty returns true if the diff for this attr is neutral -func (d *ResourceAttrDiff) Empty() bool { - return d.Old == d.New && !d.NewComputed && !d.NewRemoved -} - -func (d *ResourceAttrDiff) GoString() string { - return fmt.Sprintf("*%#v", *d) -} - -// DiffAttrType is an enum type that says whether a resource attribute -// diff is an input attribute (comes from the configuration) or an -// output attribute (comes as a result of applying the configuration). An -// example input would be "ami" for AWS and an example output would be -// "private_ip". -type DiffAttrType byte - -const ( - DiffAttrUnknown DiffAttrType = iota - DiffAttrInput - DiffAttrOutput -) - -func (d *InstanceDiff) init() { - if d.Attributes == nil { - d.Attributes = make(map[string]*ResourceAttrDiff) - } -} - -func NewInstanceDiff() *InstanceDiff { - return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} -} - -func (d *InstanceDiff) Copy() (*InstanceDiff, error) { - if d == nil { - return nil, nil - } - - dCopy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - return nil, err - } - - return dCopy.(*InstanceDiff), nil -} - -// ChangeType returns the DiffChangeType represented by the diff -// for this single instance. -func (d *InstanceDiff) ChangeType() DiffChangeType { - if d.Empty() { - return DiffNone - } - - if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { - return DiffDestroyCreate - } - - if d.GetDestroy() || d.GetDestroyDeposed() { - return DiffDestroy - } - - if d.RequiresNew() { - return DiffCreate - } - - return DiffUpdate -} - -// Empty returns true if this diff encapsulates no changes. -func (d *InstanceDiff) Empty() bool { - if d == nil { - return true - } - - d.mu.Lock() - defer d.mu.Unlock() - return !d.Destroy && - !d.DestroyTainted && - !d.DestroyDeposed && - len(d.Attributes) == 0 -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Use DeepEqual - return reflect.DeepEqual(d, d2) -} - -// DeepCopy performs a deep copy of all parts of the InstanceDiff -func (d *InstanceDiff) DeepCopy() *InstanceDiff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*InstanceDiff) -} - -func (d *InstanceDiff) GoString() string { - return fmt.Sprintf("*%#v", InstanceDiff{ - Attributes: d.Attributes, - Destroy: d.Destroy, - DestroyTainted: d.DestroyTainted, - DestroyDeposed: d.DestroyDeposed, - }) -} - -// RequiresNew returns true if the diff requires the creation of a new -// resource (implying the destruction of the old). -func (d *InstanceDiff) RequiresNew() bool { - if d == nil { - return false - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.requiresNew() -} - -func (d *InstanceDiff) requiresNew() bool { - if d == nil { - return false - } - - if d.DestroyTainted { - return true - } - - for _, rd := range d.Attributes { - if rd != nil && rd.RequiresNew { - return true - } - } - - return false -} - -func (d *InstanceDiff) GetDestroyDeposed() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyDeposed -} - -func (d *InstanceDiff) SetDestroyDeposed(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyDeposed = b -} - -// These methods are properly locked, for use outside other InstanceDiff -// methods but everywhere else within the terraform package. -// TODO refactor the locking scheme -func (d *InstanceDiff) SetTainted(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyTainted = b -} - -func (d *InstanceDiff) GetDestroyTainted() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyTainted -} - -func (d *InstanceDiff) SetDestroy(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Destroy = b -} - -func (d *InstanceDiff) GetDestroy() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.Destroy -} - -func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Attributes[key] = attr -} - -func (d *InstanceDiff) DelAttribute(key string) { - d.mu.Lock() - defer d.mu.Unlock() - - delete(d.Attributes, key) -} - -func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { - d.mu.Lock() - defer d.mu.Unlock() - - attr, ok := d.Attributes[key] - return attr, ok -} -func (d *InstanceDiff) GetAttributesLen() int { - d.mu.Lock() - defer d.mu.Unlock() - - return len(d.Attributes) -} - -// Safely copies the Attributes map -func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { - d.mu.Lock() - defer d.mu.Unlock() - - attrs := make(map[string]*ResourceAttrDiff) - for k, v := range d.Attributes { - attrs[k] = v - } - - return attrs -} - -// Same checks whether or not two InstanceDiff's are the "same". When -// we say "same", it is not necessarily exactly equal. Instead, it is -// just checking that the same attributes are changing, a destroy -// isn't suddenly happening, etc. -func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { - // we can safely compare the pointers without a lock - switch { - case d == nil && d2 == nil: - return true, "" - case d == nil || d2 == nil: - return false, "one nil" - case d == d2: - return true, "" - } - - d.mu.Lock() - defer d.mu.Unlock() - - // If we're going from requiring new to NOT requiring new, then we have - // to see if all required news were computed. If so, it is allowed since - // computed may also mean "same value and therefore not new". - oldNew := d.requiresNew() - newNew := d2.RequiresNew() - if oldNew && !newNew { - oldNew = false - - // This section builds a list of ignorable attributes for requiresNew - // by removing off any elements of collections going to zero elements. - // For collections going to zero, they may not exist at all in the - // new diff (and hence RequiresNew == false). - ignoreAttrs := make(map[string]struct{}) - for k, diffOld := range d.Attributes { - if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { - continue - } - - // This case is in here as a protection measure. The bug that this - // code originally fixed (GH-11349) didn't have to deal with computed - // so I'm not 100% sure what the correct behavior is. Best to leave - // the old behavior. - if diffOld.NewComputed { - continue - } - - // We're looking for the case a map goes to exactly 0. - if diffOld.New != "0" { - continue - } - - // Found it! Ignore all of these. The prefix here is stripping - // off the "%" so it is just "k." - prefix := k[:len(k)-1] - for k2, _ := range d.Attributes { - if strings.HasPrefix(k2, prefix) { - ignoreAttrs[k2] = struct{}{} - } - } - } - - for k, rd := range d.Attributes { - if _, ok := ignoreAttrs[k]; ok { - continue - } - - // If the field is requires new and NOT computed, then what - // we have is a diff mismatch for sure. We set that the old - // diff does REQUIRE a ForceNew. - if rd != nil && rd.RequiresNew && !rd.NewComputed { - oldNew = true - break - } - } - } - - if oldNew != newNew { - return false, fmt.Sprintf( - "diff RequiresNew; old: %t, new: %t", oldNew, newNew) - } - - // Verify that destroy matches. The second boolean here allows us to - // have mismatching Destroy if we're moving from RequiresNew true - // to false above. Therefore, the second boolean will only pass if - // we're moving from Destroy: true to false as well. - if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { - return false, fmt.Sprintf( - "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) - } - - // Go through the old diff and make sure the new diff has all the - // same attributes. To start, build up the check map to be all the keys. - checkOld := make(map[string]struct{}) - checkNew := make(map[string]struct{}) - for k, _ := range d.Attributes { - checkOld[k] = struct{}{} - } - for k, _ := range d2.CopyAttributes() { - checkNew[k] = struct{}{} - } - - // Make an ordered list so we are sure the approximated hashes are left - // to process at the end of the loop - keys := make([]string, 0, len(d.Attributes)) - for k, _ := range d.Attributes { - keys = append(keys, k) - } - sort.StringSlice(keys).Sort() - - for _, k := range keys { - diffOld := d.Attributes[k] - - if _, ok := checkOld[k]; !ok { - // We're not checking this key for whatever reason (see where - // check is modified). - continue - } - - // Remove this key since we'll never hit it again - delete(checkOld, k) - delete(checkNew, k) - - _, ok := d2.GetAttribute(k) - if !ok { - // If there's no new attribute, and the old diff expected the attribute - // to be removed, that's just fine. - if diffOld.NewRemoved { - continue - } - - // If the last diff was a computed value then the absense of - // that value is allowed since it may mean the value ended up - // being the same. - if diffOld.NewComputed { - ok = true - } - - // No exact match, but maybe this is a set containing computed - // values. So check if there is an approximate hash in the key - // and if so, try to match the key. - if strings.Contains(k, "~") { - parts := strings.Split(k, ".") - parts2 := append([]string(nil), parts...) - - re := regexp.MustCompile(`^~\d+$`) - for i, part := range parts { - if re.MatchString(part) { - // we're going to consider this the base of a - // computed hash, and remove all longer matching fields - ok = true - - parts2[i] = `\d+` - parts2 = parts2[:i+1] - break - } - } - - re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) - if err != nil { - return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) - } - - for k2, _ := range checkNew { - if re.MatchString(k2) { - delete(checkNew, k2) - } - } - } - - // This is a little tricky, but when a diff contains a computed - // list, set, or map that can only be interpolated after the apply - // command has created the dependent resources, it could turn out - // that the result is actually the same as the existing state which - // would remove the key from the diff. - if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - // Similarly, in a RequiresNew scenario, a list that shows up in the plan - // diff can disappear from the apply diff, which is calculated from an - // empty state. - if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - if !ok { - return false, fmt.Sprintf("attribute mismatch: %s", k) - } - } - - // search for the suffix of the base of a [computed] map, list or set. - match := multiVal.FindStringSubmatch(k) - - if diffOld.NewComputed && len(match) == 2 { - matchLen := len(match[1]) - - // This is a computed list, set, or map, so remove any keys with - // this prefix from the check list. - kprefix := k[:len(k)-matchLen] - for k2, _ := range checkOld { - if strings.HasPrefix(k2, kprefix) { - delete(checkOld, k2) - } - } - for k2, _ := range checkNew { - if strings.HasPrefix(k2, kprefix) { - delete(checkNew, k2) - } - } - } - - // We don't compare the values because we can't currently actually - // guarantee to generate the same value two two diffs created from - // the same state+config: we have some pesky interpolation functions - // that do not behave as pure functions (uuid, timestamp) and so they - // can be different each time a diff is produced. - // FIXME: Re-organize our config handling so that we don't re-evaluate - // expressions when we produce a second comparison diff during - // apply (for EvalCompareDiff). - } - - // Check for leftover attributes - if len(checkNew) > 0 { - extras := make([]string, 0, len(checkNew)) - for attr, _ := range checkNew { - extras = append(extras, attr) - } - return false, - fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) - } - - return true, "" -} - -// moduleDiffSort implements sort.Interface to sort module diffs by path. -type moduleDiffSort []*ModuleDiff - -func (s moduleDiffSort) Len() int { return len(s) } -func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s moduleDiffSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} diff --git a/internal/legacy/terraform/diff_test.go b/internal/legacy/terraform/diff_test.go deleted file mode 100644 index 5388eb44eb6b..000000000000 --- a/internal/legacy/terraform/diff_test.go +++ /dev/null @@ -1,1252 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestDiffEmpty(t *testing.T) { - var diff *Diff - if !diff.Empty() { - t.Fatal("should be empty") - } - - diff = new(Diff) - if !diff.Empty() { - t.Fatal("should be empty") - } - - mod := diff.AddModule(addrs.RootModuleInstance) - mod.Resources["nodeA"] = &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "foo", - New: "bar", - }, - }, - } - - if diff.Empty() { - t.Fatal("should not be empty") - } -} - -func TestDiffEmpty_taintedIsNotEmpty(t *testing.T) { - diff := new(Diff) - - mod := diff.AddModule(addrs.RootModuleInstance) - mod.Resources["nodeA"] = &InstanceDiff{ - DestroyTainted: true, - } - - if diff.Empty() { - t.Fatal("should not be empty, since DestroyTainted was set") - } -} - -func TestDiffEqual(t *testing.T) { - cases := map[string]struct { - D1, D2 *Diff - Equal bool - }{ - "nil": { - nil, - new(Diff), - false, - }, - - "empty": { - new(Diff), - new(Diff), - true, - }, - - "different module order": { - &Diff{ - Modules: []*ModuleDiff{ - &ModuleDiff{Path: []string{"root", "foo"}}, - &ModuleDiff{Path: []string{"root", "bar"}}, - }, - }, - &Diff{ - Modules: []*ModuleDiff{ - &ModuleDiff{Path: []string{"root", "bar"}}, - &ModuleDiff{Path: []string{"root", "foo"}}, - }, - }, - true, - }, - - "different module diff destroys": { - &Diff{ - Modules: []*ModuleDiff{ - &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, - }, - }, - &Diff{ - Modules: []*ModuleDiff{ - &ModuleDiff{Path: []string{"root", "foo"}, Destroy: false}, - }, - }, - true, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - actual := tc.D1.Equal(tc.D2) - if actual != tc.Equal { - t.Fatalf("expected: %v\n\n%#v\n\n%#v", tc.Equal, tc.D1, tc.D2) - } - }) - } -} - -func TestDiffPrune(t *testing.T) { - cases := map[string]struct { - D1, D2 *Diff - }{ - "nil": { - nil, - nil, - }, - - "empty": { - new(Diff), - new(Diff), - }, - - "empty module": { - &Diff{ - Modules: []*ModuleDiff{ - &ModuleDiff{Path: []string{"root", "foo"}}, - }, - }, - &Diff{}, - }, - - "destroy module": { - &Diff{ - Modules: []*ModuleDiff{ - &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, - }, - }, - &Diff{ - Modules: []*ModuleDiff{ - &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, - }, - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - tc.D1.Prune() - if !tc.D1.Equal(tc.D2) { - t.Fatalf("bad:\n\n%#v\n\n%#v", tc.D1, tc.D2) - } - }) - } -} - -func TestModuleDiff_ChangeType(t *testing.T) { - cases := []struct { - Diff *ModuleDiff - Result DiffChangeType - }{ - { - &ModuleDiff{}, - DiffNone, - }, - { - &ModuleDiff{ - Resources: map[string]*InstanceDiff{ - "foo": &InstanceDiff{Destroy: true}, - }, - }, - DiffDestroy, - }, - { - &ModuleDiff{ - Resources: map[string]*InstanceDiff{ - "foo": &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "bar", - }, - }, - }, - }, - }, - DiffUpdate, - }, - { - &ModuleDiff{ - Resources: map[string]*InstanceDiff{ - "foo": &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "bar", - RequiresNew: true, - }, - }, - }, - }, - }, - DiffCreate, - }, - { - &ModuleDiff{ - Resources: map[string]*InstanceDiff{ - "foo": &InstanceDiff{ - Destroy: true, - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "bar", - RequiresNew: true, - }, - }, - }, - }, - }, - DiffUpdate, - }, - } - - for i, tc := range cases { - actual := tc.Diff.ChangeType() - if actual != tc.Result { - t.Fatalf("%d: %#v", i, actual) - } - } -} - -func TestDiff_DeepCopy(t *testing.T) { - cases := map[string]*Diff{ - "empty": &Diff{}, - - "basic diff": &Diff{ - Modules: []*ModuleDiff{ - &ModuleDiff{ - Path: []string{"root"}, - Resources: map[string]*InstanceDiff{ - "aws_instance.foo": &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "num": &ResourceAttrDiff{ - Old: "0", - New: "2", - }, - }, - }, - }, - }, - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - dup := tc.DeepCopy() - if !reflect.DeepEqual(dup, tc) { - t.Fatalf("\n%#v\n\n%#v", dup, tc) - } - }) - } -} - -func TestModuleDiff_Empty(t *testing.T) { - diff := new(ModuleDiff) - if !diff.Empty() { - t.Fatal("should be empty") - } - - diff.Resources = map[string]*InstanceDiff{ - "nodeA": &InstanceDiff{}, - } - - if !diff.Empty() { - t.Fatal("should be empty") - } - - diff.Resources["nodeA"].Attributes = map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "foo", - New: "bar", - }, - } - - if diff.Empty() { - t.Fatal("should not be empty") - } - - diff.Resources["nodeA"].Attributes = nil - diff.Resources["nodeA"].Destroy = true - - if diff.Empty() { - t.Fatal("should not be empty") - } -} - -func TestModuleDiff_String(t *testing.T) { - diff := &ModuleDiff{ - Resources: map[string]*InstanceDiff{ - "nodeA": &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "foo", - New: "bar", - }, - "bar": &ResourceAttrDiff{ - Old: "foo", - NewComputed: true, - }, - "longfoo": &ResourceAttrDiff{ - Old: "foo", - New: "bar", - RequiresNew: true, - }, - "secretfoo": &ResourceAttrDiff{ - Old: "foo", - New: "bar", - Sensitive: true, - }, - }, - }, - }, - } - - actual := strings.TrimSpace(diff.String()) - expected := strings.TrimSpace(moduleDiffStrBasic) - if actual != expected { - t.Fatalf("bad:\n%s", actual) - } -} - -func TestInstanceDiff_ChangeType(t *testing.T) { - cases := []struct { - Diff *InstanceDiff - Result DiffChangeType - }{ - { - &InstanceDiff{}, - DiffNone, - }, - { - &InstanceDiff{Destroy: true}, - DiffDestroy, - }, - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "bar", - }, - }, - }, - DiffUpdate, - }, - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "bar", - RequiresNew: true, - }, - }, - }, - DiffCreate, - }, - { - &InstanceDiff{ - Destroy: true, - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "bar", - RequiresNew: true, - }, - }, - }, - DiffDestroyCreate, - }, - { - &InstanceDiff{ - DestroyTainted: true, - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "bar", - RequiresNew: true, - }, - }, - }, - DiffDestroyCreate, - }, - } - - for i, tc := range cases { - actual := tc.Diff.ChangeType() - if actual != tc.Result { - t.Fatalf("%d: %#v", i, actual) - } - } -} - -func TestInstanceDiff_Empty(t *testing.T) { - var rd *InstanceDiff - - if !rd.Empty() { - t.Fatal("should be empty") - } - - rd = new(InstanceDiff) - - if !rd.Empty() { - t.Fatal("should be empty") - } - - rd = &InstanceDiff{Destroy: true} - - if rd.Empty() { - t.Fatal("should not be empty") - } - - rd = &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - New: "bar", - }, - }, - } - - if rd.Empty() { - t.Fatal("should not be empty") - } -} - -func TestModuleDiff_Instances(t *testing.T) { - yesDiff := &InstanceDiff{Destroy: true} - noDiff := &InstanceDiff{Destroy: true, DestroyTainted: true} - - cases := []struct { - Diff *ModuleDiff - Id string - Result []*InstanceDiff - }{ - { - &ModuleDiff{ - Resources: map[string]*InstanceDiff{ - "foo": yesDiff, - "bar": noDiff, - }, - }, - "foo", - []*InstanceDiff{ - yesDiff, - }, - }, - - { - &ModuleDiff{ - Resources: map[string]*InstanceDiff{ - "foo": yesDiff, - "foo.0": yesDiff, - "bar": noDiff, - }, - }, - "foo", - []*InstanceDiff{ - yesDiff, - yesDiff, - }, - }, - - { - &ModuleDiff{ - Resources: map[string]*InstanceDiff{ - "foo": yesDiff, - "foo.0": yesDiff, - "foo_bar": noDiff, - "bar": noDiff, - }, - }, - "foo", - []*InstanceDiff{ - yesDiff, - yesDiff, - }, - }, - } - - for i, tc := range cases { - actual := tc.Diff.Instances(tc.Id) - if !reflect.DeepEqual(actual, tc.Result) { - t.Fatalf("%d: %#v", i, actual) - } - } -} - -func TestInstanceDiff_RequiresNew(t *testing.T) { - rd := &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{}, - }, - } - - if rd.RequiresNew() { - t.Fatal("should not require new") - } - - rd.Attributes["foo"].RequiresNew = true - - if !rd.RequiresNew() { - t.Fatal("should require new") - } -} - -func TestInstanceDiff_RequiresNew_nil(t *testing.T) { - var rd *InstanceDiff - - if rd.RequiresNew() { - t.Fatal("should not require new") - } -} - -func TestInstanceDiffSame(t *testing.T) { - cases := []struct { - One, Two *InstanceDiff - Same bool - Reason string - }{ - { - &InstanceDiff{}, - &InstanceDiff{}, - true, - "", - }, - - { - nil, - nil, - true, - "", - }, - - { - &InstanceDiff{Destroy: false}, - &InstanceDiff{Destroy: true}, - false, - "diff: Destroy; old: false, new: true", - }, - - { - &InstanceDiff{Destroy: true}, - &InstanceDiff{Destroy: true}, - true, - "", - }, - - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{}, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{}, - }, - }, - true, - "", - }, - - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "bar": &ResourceAttrDiff{}, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{}, - }, - }, - false, - "attribute mismatch: bar", - }, - - // Extra attributes - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{}, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{}, - "bar": &ResourceAttrDiff{}, - }, - }, - false, - "extra attributes: bar", - }, - - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{RequiresNew: true}, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{RequiresNew: false}, - }, - }, - false, - "diff RequiresNew; old: true, new: false", - }, - - // NewComputed on primitive - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "${var.foo}", - NewComputed: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - }, - }, - true, - "", - }, - - // NewComputed on primitive, removed - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "${var.foo}", - NewComputed: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{}, - }, - true, - "", - }, - - // NewComputed on set, removed - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "", - New: "", - NewComputed: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.1": &ResourceAttrDiff{ - Old: "foo", - New: "", - NewRemoved: true, - }, - "foo.2": &ResourceAttrDiff{ - Old: "", - New: "bar", - }, - }, - }, - true, - "", - }, - - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{NewComputed: true}, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.0": &ResourceAttrDiff{ - Old: "", - New: "12", - }, - }, - }, - true, - "", - }, - - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.~35964334.bar": &ResourceAttrDiff{ - Old: "", - New: "${var.foo}", - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.87654323.bar": &ResourceAttrDiff{ - Old: "", - New: "12", - }, - }, - }, - true, - "", - }, - - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - NewComputed: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{}, - }, - true, - "", - }, - - // Computed can change RequiresNew by removal, and that's okay - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - NewComputed: true, - RequiresNew: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{}, - }, - true, - "", - }, - - // Computed can change Destroy by removal, and that's okay - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - NewComputed: true, - RequiresNew: true, - }, - }, - - Destroy: true, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{}, - }, - true, - "", - }, - - // Computed can change Destroy by elements - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - NewComputed: true, - RequiresNew: true, - }, - }, - - Destroy: true, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "1", - New: "1", - }, - "foo.12": &ResourceAttrDiff{ - Old: "4", - New: "12", - RequiresNew: true, - }, - }, - - Destroy: true, - }, - true, - "", - }, - - // Computed sets may not contain all fields in the original diff, and - // because multiple entries for the same set can compute to the same - // hash before the values are computed or interpolated, the overall - // count can change as well. - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.~35964334.bar": &ResourceAttrDiff{ - Old: "", - New: "${var.foo}", - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "2", - }, - "foo.87654323.bar": &ResourceAttrDiff{ - Old: "", - New: "12", - }, - "foo.87654325.bar": &ResourceAttrDiff{ - Old: "", - New: "12", - }, - "foo.87654325.baz": &ResourceAttrDiff{ - Old: "", - New: "12", - }, - }, - }, - true, - "", - }, - - // Computed values in maps will fail the "Same" check as well - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.%": &ResourceAttrDiff{ - Old: "", - New: "", - NewComputed: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.%": &ResourceAttrDiff{ - Old: "0", - New: "1", - NewComputed: false, - }, - "foo.val": &ResourceAttrDiff{ - Old: "", - New: "something", - }, - }, - }, - true, - "", - }, - - // In a DESTROY/CREATE scenario, the plan diff will be run against the - // state of the old instance, while the apply diff will be run against an - // empty state (because the state is cleared when the destroy runs.) - // For complex attributes, this can result in keys that seem to disappear - // between the two diffs, when in reality everything is working just fine. - // - // Same() needs to take into account this scenario by analyzing NewRemoved - // and treating as "Same" a diff that does indeed have that key removed. - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "somemap.oldkey": &ResourceAttrDiff{ - Old: "long ago", - New: "", - NewRemoved: true, - }, - "somemap.newkey": &ResourceAttrDiff{ - Old: "", - New: "brave new world", - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "somemap.newkey": &ResourceAttrDiff{ - Old: "", - New: "brave new world", - }, - }, - }, - true, - "", - }, - - // Another thing that can occur in DESTROY/CREATE scenarios is that list - // values that are going to zero have diffs that show up at plan time but - // are gone at apply time. The NewRemoved handling catches the fields and - // treats them as OK, but it also needs to treat the .# field itself as - // okay to be present in the old diff but not in the new one. - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "reqnew": &ResourceAttrDiff{ - Old: "old", - New: "new", - RequiresNew: true, - }, - "somemap.#": &ResourceAttrDiff{ - Old: "1", - New: "0", - }, - "somemap.oldkey": &ResourceAttrDiff{ - Old: "long ago", - New: "", - NewRemoved: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "reqnew": &ResourceAttrDiff{ - Old: "", - New: "new", - RequiresNew: true, - }, - }, - }, - true, - "", - }, - - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "reqnew": &ResourceAttrDiff{ - Old: "old", - New: "new", - RequiresNew: true, - }, - "somemap.%": &ResourceAttrDiff{ - Old: "1", - New: "0", - }, - "somemap.oldkey": &ResourceAttrDiff{ - Old: "long ago", - New: "", - NewRemoved: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "reqnew": &ResourceAttrDiff{ - Old: "", - New: "new", - RequiresNew: true, - }, - }, - }, - true, - "", - }, - - // Innner computed set should allow outer change in key - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.~1.outer_val": &ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "foo.~1.inner.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.~1.inner.~2.value": &ResourceAttrDiff{ - Old: "", - New: "${var.bar}", - NewComputed: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.12.outer_val": &ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "foo.12.inner.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.12.inner.42.value": &ResourceAttrDiff{ - Old: "", - New: "baz", - }, - }, - }, - true, - "", - }, - - // Innner computed list should allow outer change in key - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.~1.outer_val": &ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "foo.~1.inner.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.~1.inner.0.value": &ResourceAttrDiff{ - Old: "", - New: "${var.bar}", - NewComputed: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.12.outer_val": &ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "foo.12.inner.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "foo.12.inner.0.value": &ResourceAttrDiff{ - Old: "", - New: "baz", - }, - }, - }, - true, - "", - }, - - // When removing all collection items, the diff is allowed to contain - // nothing when re-creating the resource. This should be the "Same" - // since we said we were going from 1 to 0. - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.%": &ResourceAttrDiff{ - Old: "1", - New: "0", - RequiresNew: true, - }, - "foo.bar": &ResourceAttrDiff{ - Old: "baz", - New: "", - NewRemoved: true, - RequiresNew: true, - }, - }, - }, - &InstanceDiff{}, - true, - "", - }, - - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo.#": &ResourceAttrDiff{ - Old: "1", - New: "0", - RequiresNew: true, - }, - "foo.0": &ResourceAttrDiff{ - Old: "baz", - New: "", - NewRemoved: true, - RequiresNew: true, - }, - }, - }, - &InstanceDiff{}, - true, - "", - }, - - // Make sure that DestroyTainted diffs pass as well, especially when diff - // two works off of no state. - { - &InstanceDiff{ - DestroyTainted: true, - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "foo", - New: "foo", - }, - }, - }, - &InstanceDiff{ - DestroyTainted: true, - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "foo", - }, - }, - }, - true, - "", - }, - // RequiresNew in different attribute - { - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "foo", - New: "foo", - }, - "bar": &ResourceAttrDiff{ - Old: "bar", - New: "baz", - RequiresNew: true, - }, - }, - }, - &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "bar": &ResourceAttrDiff{ - Old: "", - New: "baz", - RequiresNew: true, - }, - }, - }, - true, - "", - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - same, reason := tc.One.Same(tc.Two) - if same != tc.Same { - t.Fatalf("%d: expected same: %t, got %t (%s)\n\n one: %#v\n\ntwo: %#v", - i, tc.Same, same, reason, tc.One, tc.Two) - } - if reason != tc.Reason { - t.Fatalf( - "%d: bad reason\n\nexpected: %#v\n\ngot: %#v", i, tc.Reason, reason) - } - }) - } -} - -const moduleDiffStrBasic = ` -CREATE: nodeA - bar: "foo" => "" - foo: "foo" => "bar" - longfoo: "foo" => "bar" (forces new resource) - secretfoo: "" => "" (attribute changed) -` - -func TestCountFlatmapContainerValues(t *testing.T) { - for i, tc := range []struct { - attrs map[string]string - key string - count string - }{ - { - attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, - key: "set.2.list.#", - count: "1", - }, - { - attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, - key: "set.#", - count: "1", - }, - { - attrs: map[string]string{"set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, - key: "set.#", - count: "1", - }, - { - attrs: map[string]string{"map.#": "3", "map.a": "b", "map.a.#": "0", "map.b": "4"}, - key: "map.#", - count: "2", - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - count := countFlatmapContainerValues(tc.key, tc.attrs) - if count != tc.count { - t.Fatalf("expected %q, got %q", tc.count, count) - } - }) - } -} diff --git a/internal/legacy/terraform/provider_mock.go b/internal/legacy/terraform/provider_mock.go deleted file mode 100644 index abdba432460e..000000000000 --- a/internal/legacy/terraform/provider_mock.go +++ /dev/null @@ -1,363 +0,0 @@ -package terraform - -import ( - "encoding/json" - "sync" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/providers" -) - -var _ providers.Interface = (*MockProvider)(nil) - -// MockProvider implements providers.Interface but mocks out all the -// calls for testing purposes. -type MockProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetProviderSchemaResponse for compatibility with old tests - - ValidateProviderConfigCalled bool - ValidateProviderConfigResponse providers.ValidateProviderConfigResponse - ValidateProviderConfigRequest providers.ValidateProviderConfigRequest - ValidateProviderConfigFn func(providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse - - ValidateResourceConfigCalled bool - ValidateResourceConfigTypeName string - ValidateResourceConfigResponse providers.ValidateResourceConfigResponse - ValidateResourceConfigRequest providers.ValidateResourceConfigRequest - ValidateResourceConfigFn func(providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse - - ValidateDataResourceConfigCalled bool - ValidateDataResourceConfigTypeName string - ValidateDataResourceConfigResponse providers.ValidateDataResourceConfigResponse - ValidateDataResourceConfigRequest providers.ValidateDataResourceConfigRequest - ValidateDataResourceConfigFn func(providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse - - UpgradeResourceStateCalled bool - UpgradeResourceStateTypeName string - UpgradeResourceStateResponse providers.UpgradeResourceStateResponse - UpgradeResourceStateRequest providers.UpgradeResourceStateRequest - UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse - - ConfigureProviderCalled bool - ConfigureProviderResponse providers.ConfigureProviderResponse - ConfigureProviderRequest providers.ConfigureProviderRequest - ConfigureProviderFn func(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse - - StopCalled bool - StopFn func() error - StopResponse error - - ReadResourceCalled bool - ReadResourceResponse providers.ReadResourceResponse - ReadResourceRequest providers.ReadResourceRequest - ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse - - PlanResourceChangeCalled bool - PlanResourceChangeResponse providers.PlanResourceChangeResponse - PlanResourceChangeRequest providers.PlanResourceChangeRequest - PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse - - ApplyResourceChangeCalled bool - ApplyResourceChangeResponse providers.ApplyResourceChangeResponse - ApplyResourceChangeRequest providers.ApplyResourceChangeRequest - ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse - - ImportResourceStateCalled bool - ImportResourceStateResponse providers.ImportResourceStateResponse - ImportResourceStateRequest providers.ImportResourceStateRequest - ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse - // Legacy return type for existing tests, which will be shimmed into an - // ImportResourceStateResponse if set - ImportStateReturn []*InstanceState - - ReadDataSourceCalled bool - ReadDataSourceResponse providers.ReadDataSourceResponse - ReadDataSourceRequest providers.ReadDataSourceRequest - ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse - - CloseCalled bool - CloseError error -} - -func (p *MockProvider) GetProviderSchema() providers.GetProviderSchemaResponse { - p.Lock() - defer p.Unlock() - p.GetSchemaCalled = true - return p.getSchema() -} - -func (p *MockProvider) getSchema() providers.GetProviderSchemaResponse { - // This version of getSchema doesn't do any locking, so it's suitable to - // call from other methods of this mock as long as they are already - // holding the lock. - - ret := providers.GetProviderSchemaResponse{ - Provider: providers.Schema{}, - DataSources: map[string]providers.Schema{}, - ResourceTypes: map[string]providers.Schema{}, - } - if p.GetSchemaReturn != nil { - ret.Provider.Block = p.GetSchemaReturn.Provider - ret.ProviderMeta.Block = p.GetSchemaReturn.ProviderMeta - for n, s := range p.GetSchemaReturn.DataSources { - ret.DataSources[n] = providers.Schema{ - Block: s, - } - } - for n, s := range p.GetSchemaReturn.ResourceTypes { - ret.ResourceTypes[n] = providers.Schema{ - Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), - Block: s, - } - } - } - - return ret -} - -func (p *MockProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateProviderConfigCalled = true - p.ValidateProviderConfigRequest = r - if p.ValidateProviderConfigFn != nil { - return p.ValidateProviderConfigFn(r) - } - return p.ValidateProviderConfigResponse -} - -func (p *MockProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateResourceConfigCalled = true - p.ValidateResourceConfigRequest = r - - if p.ValidateResourceConfigFn != nil { - return p.ValidateResourceConfigFn(r) - } - - return p.ValidateResourceConfigResponse -} - -func (p *MockProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateDataResourceConfigCalled = true - p.ValidateDataResourceConfigRequest = r - - if p.ValidateDataResourceConfigFn != nil { - return p.ValidateDataResourceConfigFn(r) - } - - return p.ValidateDataResourceConfigResponse -} - -func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { - p.Lock() - defer p.Unlock() - - schemas := p.getSchema() - schema := schemas.ResourceTypes[r.TypeName] - schemaType := schema.Block.ImpliedType() - - p.UpgradeResourceStateCalled = true - p.UpgradeResourceStateRequest = r - - if p.UpgradeResourceStateFn != nil { - return p.UpgradeResourceStateFn(r) - } - - resp := p.UpgradeResourceStateResponse - - if resp.UpgradedState == cty.NilVal { - switch { - case r.RawStateFlatmap != nil: - v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - case len(r.RawStateJSON) > 0: - v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) - - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - } - } - return resp -} - -func (p *MockProvider) ConfigureProvider(r providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { - p.Lock() - defer p.Unlock() - - p.ConfigureProviderCalled = true - p.ConfigureProviderRequest = r - - if p.ConfigureProviderFn != nil { - return p.ConfigureProviderFn(r) - } - - return p.ConfigureProviderResponse -} - -func (p *MockProvider) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provider itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadResourceCalled = true - p.ReadResourceRequest = r - - if p.ReadResourceFn != nil { - return p.ReadResourceFn(r) - } - - resp := p.ReadResourceResponse - if resp.NewState != cty.NilVal { - // make sure the NewState fits the schema - // This isn't always the case for the existing tests - newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(resp.NewState) - if err != nil { - panic(err) - } - resp.NewState = newState - return resp - } - - // just return the same state we received - resp.NewState = r.PriorState - return resp -} - -func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - p.Lock() - defer p.Unlock() - - p.PlanResourceChangeCalled = true - p.PlanResourceChangeRequest = r - - if p.PlanResourceChangeFn != nil { - return p.PlanResourceChangeFn(r) - } - - return p.PlanResourceChangeResponse -} - -func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - p.Lock() - p.ApplyResourceChangeCalled = true - p.ApplyResourceChangeRequest = r - p.Unlock() - - if p.ApplyResourceChangeFn != nil { - return p.ApplyResourceChangeFn(r) - } - - return p.ApplyResourceChangeResponse -} - -func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { - p.Lock() - defer p.Unlock() - - if p.ImportStateReturn != nil { - for _, is := range p.ImportStateReturn { - if is.Attributes == nil { - is.Attributes = make(map[string]string) - } - is.Attributes["id"] = is.ID - - typeName := is.Ephemeral.Type - // Use the requested type if the resource has no type of it's own. - // We still return the empty type, which will error, but this prevents a panic. - if typeName == "" { - typeName = r.TypeName - } - - schema := p.GetSchemaReturn.ResourceTypes[typeName] - if schema == nil { - panic("no schema found for " + typeName) - } - - private, err := json.Marshal(is.Meta) - if err != nil { - panic(err) - } - - state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) - if err != nil { - panic(err) - } - - state, err = schema.CoerceValue(state) - if err != nil { - panic(err) - } - - p.ImportResourceStateResponse.ImportedResources = append( - p.ImportResourceStateResponse.ImportedResources, - providers.ImportedResource{ - TypeName: is.Ephemeral.Type, - State: state, - Private: private, - }) - } - } - - p.ImportResourceStateCalled = true - p.ImportResourceStateRequest = r - if p.ImportResourceStateFn != nil { - return p.ImportResourceStateFn(r) - } - - return p.ImportResourceStateResponse -} - -func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadDataSourceCalled = true - p.ReadDataSourceRequest = r - - if p.ReadDataSourceFn != nil { - return p.ReadDataSourceFn(r) - } - - return p.ReadDataSourceResponse -} - -func (p *MockProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} diff --git a/internal/legacy/terraform/provisioner_mock.go b/internal/legacy/terraform/provisioner_mock.go deleted file mode 100644 index fe76157a2daf..000000000000 --- a/internal/legacy/terraform/provisioner_mock.go +++ /dev/null @@ -1,104 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/internal/provisioners" -) - -var _ provisioners.Interface = (*MockProvisioner)(nil) - -// MockProvisioner implements provisioners.Interface but mocks out all the -// calls for testing purposes. -type MockProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaResponse provisioners.GetSchemaResponse - - ValidateProvisionerConfigCalled bool - ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest - ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse - ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse - - ProvisionResourceCalled bool - ProvisionResourceRequest provisioners.ProvisionResourceRequest - ProvisionResourceResponse provisioners.ProvisionResourceResponse - ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse - - StopCalled bool - StopResponse error - StopFn func() error - - CloseCalled bool - CloseResponse error - CloseFn func() error -} - -func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - return p.getSchema() -} - -// getSchema is the implementation of GetSchema, which can be called from other -// methods on MockProvisioner that may already be holding the lock. -func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { - return p.GetSchemaResponse -} - -func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateProvisionerConfigCalled = true - p.ValidateProvisionerConfigRequest = r - if p.ValidateProvisionerConfigFn != nil { - return p.ValidateProvisionerConfigFn(r) - } - return p.ValidateProvisionerConfigResponse -} - -func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { - p.Lock() - defer p.Unlock() - - p.ProvisionResourceCalled = true - p.ProvisionResourceRequest = r - if p.ProvisionResourceFn != nil { - fn := p.ProvisionResourceFn - return fn(r) - } - - return p.ProvisionResourceResponse -} - -func (p *MockProvisioner) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provisioner itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvisioner) Close() error { - p.Lock() - defer p.Unlock() - - p.CloseCalled = true - if p.CloseFn != nil { - return p.CloseFn() - } - - return p.CloseResponse -} diff --git a/internal/legacy/terraform/resource.go b/internal/legacy/terraform/resource.go deleted file mode 100644 index ddec8f828a31..000000000000 --- a/internal/legacy/terraform/resource.go +++ /dev/null @@ -1,516 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/copystructure" - "github.com/mitchellh/reflectwalk" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" -) - -// Resource is a legacy way to identify a particular resource instance. -// -// New code should use addrs.ResourceInstance instead. This is still here -// only for codepaths that haven't been updated yet. -type Resource struct { - // These are all used by the new EvalNode stuff. - Name string - Type string - CountIndex int - - // These aren't really used anymore anywhere, but we keep them around - // since we haven't done a proper cleanup yet. - Id string - Info *InstanceInfo - Config *ResourceConfig - Dependencies []string - Diff *InstanceDiff - Provider ResourceProvider - State *InstanceState - Flags ResourceFlag -} - -// NewResource constructs a legacy Resource object from an -// addrs.ResourceInstance value. -// -// This is provided to shim to old codepaths that haven't been updated away -// from this type yet. Since this old type is not able to represent instances -// that have string keys, this function will panic if given a resource address -// that has a string key. -func NewResource(addr addrs.ResourceInstance) *Resource { - ret := &Resource{ - Name: addr.Resource.Name, - Type: addr.Resource.Type, - } - - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - ret.CountIndex = int(tk) - default: - panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) - } - } - - return ret -} - -// ResourceKind specifies what kind of instance we're working with, whether -// its a primary instance, a tainted instance, or an orphan. -type ResourceFlag byte - -// InstanceInfo is used to hold information about the instance and/or -// resource being modified. -type InstanceInfo struct { - // Id is a unique name to represent this instance. This is not related - // to InstanceState.ID in any way. - Id string - - // ModulePath is the complete path of the module containing this - // instance. - ModulePath []string - - // Type is the resource type of this instance - Type string - - // uniqueExtra is an internal field that can be populated to supply - // extra metadata that is used to identify a unique instance in - // the graph walk. This will be appended to HumanID when uniqueId - // is called. - uniqueExtra string -} - -// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. -// -// InstanceInfo is a legacy type, and uses of it should be gradually replaced -// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as -// appropriate. -// -// The legacy InstanceInfo type cannot represent module instances with instance -// keys, so this function will panic if given such a path. Uses of this type -// should all be removed or replaced before implementing "count" and "for_each" -// arguments on modules in order to avoid such panics. -// -// This legacy type also cannot represent resource instances with string -// instance keys. It will panic if the given key is not either NoKey or an -// IntKey. -func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { - // We need an old-style []string module path for InstanceInfo. - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - panic("NewInstanceInfo cannot convert module instance with key") - } - path[i] = step.Name - } - - // This is a funny old meaning of "id" that is no longer current. It should - // not be used for anything users might see. Note that it does not include - // a representation of the resource mode, and so it's impossible to - // determine from an InstanceInfo alone whether it is a managed or data - // resource that is being referred to. - id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) - if addr.Resource.Resource.Mode == addrs.DataResourceMode { - id = "data." + id - } - if addr.Resource.Key != addrs.NoKey { - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - id = id + fmt.Sprintf(".%d", int(k)) - default: - panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) - } - } - - return &InstanceInfo{ - Id: id, - ModulePath: path, - Type: addr.Resource.Resource.Type, - } -} - -// ResourceAddress returns the address of the resource that the receiver is describing. -func (i *InstanceInfo) ResourceAddress() *ResourceAddress { - // GROSS: for tainted and deposed instances, their status gets appended - // to i.Id to create a unique id for the graph node. Historically these - // ids were displayed to the user, so it's designed to be human-readable: - // "aws_instance.bar.0 (deposed #0)" - // - // So here we detect such suffixes and try to interpret them back to - // their original meaning so we can then produce a ResourceAddress - // with a suitable InstanceType. - id := i.Id - instanceType := TypeInvalid - if idx := strings.Index(id, " ("); idx != -1 { - remain := id[idx:] - id = id[:idx] - - switch { - case strings.Contains(remain, "tainted"): - instanceType = TypeTainted - case strings.Contains(remain, "deposed"): - instanceType = TypeDeposed - } - } - - addr, err := parseResourceAddressInternal(id) - if err != nil { - // should never happen, since that would indicate a bug in the - // code that constructed this InstanceInfo. - panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) - } - if len(i.ModulePath) > 1 { - addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied - } - if instanceType != TypeInvalid { - addr.InstanceTypeSet = true - addr.InstanceType = instanceType - } - return addr -} - -// ResourceConfig is a legacy type that was formerly used to represent -// interpolatable configuration blocks. It is now only used to shim to old -// APIs that still use this type, via NewResourceConfigShimmed. -type ResourceConfig struct { - ComputedKeys []string - Raw map[string]interface{} - Config map[string]interface{} -} - -// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly -// the given value. -// -// The given value may contain hcl2shim.UnknownVariableValue to signal that -// something is computed, but it must not contain unprocessed interpolation -// sequences as we might've seen in Terraform v0.11 and prior. -func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { - v := hcl2shim.HCL2ValueFromConfigValue(raw) - - // This is a little weird but we round-trip the value through the hcl2shim - // package here for two reasons: firstly, because that reduces the risk - // of it including something unlike what NewResourceConfigShimmed would - // produce, and secondly because it creates a copy of "raw" just in case - // something is relying on the fact that in the old world the raw and - // config maps were always distinct, and thus you could in principle mutate - // one without affecting the other. (I sure hope nobody was doing that, though!) - cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) - - return &ResourceConfig{ - Raw: raw, - Config: cfg, - - ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), - } -} - -// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy -// ResourceConfig object, so that it can be passed to older APIs that expect -// this wrapping. -// -// The returned ResourceConfig is already interpolated and cannot be -// re-interpolated. It is, therefore, useful only to functions that expect -// an already-populated ResourceConfig which they then treat as read-only. -// -// If the given value is not of an object type that conforms to the given -// schema then this function will panic. -func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { - if !val.Type().IsObjectType() { - panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) - } - ret := &ResourceConfig{} - - legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) - if legacyVal != nil { - ret.Config = legacyVal - - // Now we need to walk through our structure and find any unknown values, - // producing the separate list ComputedKeys to represent these. We use the - // schema here so that we can preserve the expected invariant - // that an attribute is always either wholly known or wholly unknown, while - // a child block can be partially unknown. - ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") - } else { - ret.Config = make(map[string]interface{}) - } - ret.Raw = ret.Config - - return ret -} - -// Record the any config values in ComputedKeys. This field had been unused in -// helper/schema, but in the new protocol we're using this so that the SDK can -// now handle having an unknown collection. The legacy diff code doesn't -// properly handle the unknown, because it can't be expressed in the same way -// between the config and diff. -func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { - var ret []string - ty := val.Type() - - if val.IsNull() { - return ret - } - - if !val.IsKnown() { - // we shouldn't have an entirely unknown resource, but prevent empty - // strings just in case - if len(path) > 0 { - ret = append(ret, path) - } - return ret - } - - if path != "" { - path += "." - } - switch { - case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): - i := 0 - for it := val.ElementIterator(); it.Next(); i++ { - _, subVal := it.Element() - keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) - ret = append(ret, keys...) - } - - case ty.IsMapType(), ty.IsObjectType(): - for it := val.ElementIterator(); it.Next(); { - subK, subVal := it.Element() - keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) - ret = append(ret, keys...) - } - } - - return ret -} - -// DeepCopy performs a deep copy of the configuration. This makes it safe -// to modify any of the structures that are part of the resource config without -// affecting the original configuration. -func (c *ResourceConfig) DeepCopy() *ResourceConfig { - // DeepCopying a nil should return a nil to avoid panics - if c == nil { - return nil - } - - // Copy, this will copy all the exported attributes - copy, err := copystructure.Config{Lock: true}.Copy(c) - if err != nil { - panic(err) - } - - // Force the type - result := copy.(*ResourceConfig) - - return result -} - -// Equal checks the equality of two resource configs. -func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { - // If either are nil, then they're only equal if they're both nil - if c == nil || c2 == nil { - return c == c2 - } - - // Sort the computed keys so they're deterministic - sort.Strings(c.ComputedKeys) - sort.Strings(c2.ComputedKeys) - - // Two resource configs if their exported properties are equal. - // We don't compare "raw" because it is never used again after - // initialization and for all intents and purposes they are equal - // if the exported properties are equal. - check := [][2]interface{}{ - {c.ComputedKeys, c2.ComputedKeys}, - {c.Raw, c2.Raw}, - {c.Config, c2.Config}, - } - for _, pair := range check { - if !reflect.DeepEqual(pair[0], pair[1]) { - return false - } - } - - return true -} - -// CheckSet checks that the given list of configuration keys is -// properly set. If not, errors are returned for each unset key. -// -// This is useful to be called in the Validate method of a ResourceProvider. -func (c *ResourceConfig) CheckSet(keys []string) []error { - var errs []error - - for _, k := range keys { - if !c.IsSet(k) { - errs = append(errs, fmt.Errorf("%s must be set", k)) - } - } - - return errs -} - -// Get looks up a configuration value by key and returns the value. -// -// The second return value is true if the get was successful. Get will -// return the raw value if the key is computed, so you should pair this -// with IsComputed. -func (c *ResourceConfig) Get(k string) (interface{}, bool) { - // We aim to get a value from the configuration. If it is computed, - // then we return the pure raw value. - source := c.Config - if c.IsComputed(k) { - source = c.Raw - } - - return c.get(k, source) -} - -// GetRaw looks up a configuration value by key and returns the value, -// from the raw, uninterpolated config. -// -// The second return value is true if the get was successful. Get will -// not succeed if the value is being computed. -func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { - return c.get(k, c.Raw) -} - -// IsComputed returns whether the given key is computed or not. -func (c *ResourceConfig) IsComputed(k string) bool { - // The next thing we do is check the config if we get a computed - // value out of it. - v, ok := c.get(k, c.Config) - if !ok { - return false - } - - // If value is nil, then it isn't computed - if v == nil { - return false - } - - // Test if the value contains an unknown value - var w unknownCheckWalker - if err := reflectwalk.Walk(v, &w); err != nil { - panic(err) - } - - return w.Unknown -} - -// IsSet checks if the key in the configuration is set. A key is set if -// it has a value or the value is being computed (is unknown currently). -// -// This function should be used rather than checking the keys of the -// raw configuration itself, since a key may be omitted from the raw -// configuration if it is being computed. -func (c *ResourceConfig) IsSet(k string) bool { - if c == nil { - return false - } - - if c.IsComputed(k) { - return true - } - - if _, ok := c.Get(k); ok { - return true - } - - return false -} - -func (c *ResourceConfig) get( - k string, raw map[string]interface{}) (interface{}, bool) { - parts := strings.Split(k, ".") - if len(parts) == 1 && parts[0] == "" { - parts = nil - } - - var current interface{} = raw - var previous interface{} = nil - for i, part := range parts { - if current == nil { - return nil, false - } - - cv := reflect.ValueOf(current) - switch cv.Kind() { - case reflect.Map: - previous = current - v := cv.MapIndex(reflect.ValueOf(part)) - if !v.IsValid() { - if i > 0 && i != (len(parts)-1) { - tryKey := strings.Join(parts[i:], ".") - v := cv.MapIndex(reflect.ValueOf(tryKey)) - if !v.IsValid() { - return nil, false - } - - return v.Interface(), true - } - - return nil, false - } - - current = v.Interface() - case reflect.Slice: - previous = current - - if part == "#" { - // If any value in a list is computed, this whole thing - // is computed and we can't read any part of it. - for i := 0; i < cv.Len(); i++ { - if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { - return v, true - } - } - - current = cv.Len() - } else { - i, err := strconv.ParseInt(part, 0, 0) - if err != nil { - return nil, false - } - if int(i) < 0 || int(i) >= cv.Len() { - return nil, false - } - current = cv.Index(int(i)).Interface() - } - case reflect.String: - // This happens when map keys contain "." and have a common - // prefix so were split as path components above. - actualKey := strings.Join(parts[i-1:], ".") - if prevMap, ok := previous.(map[string]interface{}); ok { - v, ok := prevMap[actualKey] - return v, ok - } - - return nil, false - default: - panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) - } - } - - return current, true -} - -// unknownCheckWalker -type unknownCheckWalker struct { - Unknown bool -} - -func (w *unknownCheckWalker) Primitive(v reflect.Value) error { - if v.Interface() == hcl2shim.UnknownVariableValue { - w.Unknown = true - } - - return nil -} diff --git a/internal/legacy/terraform/resource_provisioner.go b/internal/legacy/terraform/resource_provisioner.go deleted file mode 100644 index 647693a9fa52..000000000000 --- a/internal/legacy/terraform/resource_provisioner.go +++ /dev/null @@ -1,69 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/provisioners" -) - -// ResourceProvisioner is an interface that must be implemented by any -// resource provisioner: the thing that initializes resources in -// a Terraform configuration. -type ResourceProvisioner interface { - // GetConfigSchema returns the schema for the provisioner type's main - // configuration block. This is called prior to Validate to enable some - // basic structural validation to be performed automatically and to allow - // the configuration to be properly extracted from potentially-ambiguous - // configuration file formats. - GetConfigSchema() (*configschema.Block, error) - - // Validate is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - Validate(*ResourceConfig) ([]string, []error) - - // Apply runs the provisioner on a specific resource and returns an error. - // Instead of a diff, the ResourceConfig is provided since provisioners - // only run after a resource has been newly created. - Apply(UIOutput, *InstanceState, *ResourceConfig) error - - // Stop is called when the provisioner should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error -} - -// ResourceProvisionerCloser is an interface that provisioners that can close -// connections that aren't needed anymore must implement. -type ResourceProvisionerCloser interface { - Close() error -} - -// ResourceProvisionerFactory is a function type that creates a new instance -// of a resource provisioner. -type ResourceProvisionerFactory func() (ResourceProvisioner, error) - -// ProvisionerFactory is a function type that creates a new instance -// of a provisioners.Interface. -type ProvisionerFactory = provisioners.Factory diff --git a/internal/legacy/terraform/resource_test.go b/internal/legacy/terraform/resource_test.go deleted file mode 100644 index c91c70c1c6e1..000000000000 --- a/internal/legacy/terraform/resource_test.go +++ /dev/null @@ -1,674 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/mitchellh/reflectwalk" -) - -func TestResourceConfigGet(t *testing.T) { - fooStringSchema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - } - fooListSchema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.List(cty.Number), Optional: true}, - }, - } - - cases := []struct { - Config cty.Value - Schema *configschema.Block - Key string - Value interface{} - }{ - { - Config: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - }), - Schema: fooStringSchema, - Key: "foo", - Value: "bar", - }, - - { - Config: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.UnknownVal(cty.String), - }), - Schema: fooStringSchema, - Key: "foo", - Value: hcl2shim.UnknownVariableValue, - }, - - { - Config: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ListVal([]cty.Value{ - cty.NumberIntVal(1), - cty.NumberIntVal(2), - cty.NumberIntVal(5), - }), - }), - Schema: fooListSchema, - Key: "foo.0", - Value: 1, - }, - - { - Config: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ListVal([]cty.Value{ - cty.NumberIntVal(1), - cty.NumberIntVal(2), - cty.NumberIntVal(5), - }), - }), - Schema: fooListSchema, - Key: "foo.5", - Value: nil, - }, - - { - Config: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ListVal([]cty.Value{ - cty.NumberIntVal(1), - cty.NumberIntVal(2), - cty.NumberIntVal(5), - }), - }), - Schema: fooListSchema, - Key: "foo.-1", - Value: nil, - }, - - // get from map - { - Config: cty.ObjectVal(map[string]cty.Value{ - "mapname": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key": cty.NumberIntVal(1), - }), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, - }, - }, - Key: "mapname.0.key", - Value: 1, - }, - - // get from map with dot in key - { - Config: cty.ObjectVal(map[string]cty.Value{ - "mapname": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key.name": cty.NumberIntVal(1), - }), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, - }, - }, - Key: "mapname.0.key.name", - Value: 1, - }, - - // get from map with overlapping key names - { - Config: cty.ObjectVal(map[string]cty.Value{ - "mapname": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key.name": cty.NumberIntVal(1), - "key.name.2": cty.NumberIntVal(2), - }), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, - }, - }, - Key: "mapname.0.key.name.2", - Value: 2, - }, - { - Config: cty.ObjectVal(map[string]cty.Value{ - "mapname": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key.name": cty.NumberIntVal(1), - "key.name.foo": cty.NumberIntVal(2), - }), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, - }, - }, - Key: "mapname.0.key.name", - Value: 1, - }, - { - Config: cty.ObjectVal(map[string]cty.Value{ - "mapname": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "listkey": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key": cty.NumberIntVal(3), - }), - }), - }), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "mapname": {Type: cty.List(cty.Map(cty.List(cty.Map(cty.Number)))), Optional: true}, - }, - }, - Key: "mapname.0.listkey.0.key", - Value: 3, - }, - } - - for i, tc := range cases { - rc := NewResourceConfigShimmed(tc.Config, tc.Schema) - - // Test getting a key - t.Run(fmt.Sprintf("get-%d", i), func(t *testing.T) { - v, ok := rc.Get(tc.Key) - if ok && v == nil { - t.Fatal("(nil, true) returned from Get") - } - - if !reflect.DeepEqual(v, tc.Value) { - t.Fatalf("%d bad: %#v", i, v) - } - }) - - // Test copying and equality - t.Run(fmt.Sprintf("copy-and-equal-%d", i), func(t *testing.T) { - copy := rc.DeepCopy() - if !reflect.DeepEqual(copy, rc) { - t.Fatalf("bad:\n\n%#v\n\n%#v", copy, rc) - } - - if !copy.Equal(rc) { - t.Fatalf("copy != rc:\n\n%#v\n\n%#v", copy, rc) - } - if !rc.Equal(copy) { - t.Fatalf("rc != copy:\n\n%#v\n\n%#v", copy, rc) - } - }) - } -} - -func TestResourceConfigDeepCopy_nil(t *testing.T) { - var nilRc *ResourceConfig - actual := nilRc.DeepCopy() - if actual != nil { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceConfigDeepCopy_nilComputed(t *testing.T) { - rc := &ResourceConfig{} - actual := rc.DeepCopy() - if actual.ComputedKeys != nil { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceConfigEqual_nil(t *testing.T) { - var nilRc *ResourceConfig - notNil := NewResourceConfigShimmed(cty.EmptyObjectVal, &configschema.Block{}) - - if nilRc.Equal(notNil) { - t.Fatal("should not be equal") - } - - if notNil.Equal(nilRc) { - t.Fatal("should not be equal") - } -} - -func TestResourceConfigEqual_computedKeyOrder(t *testing.T) { - v := cty.ObjectVal(map[string]cty.Value{ - "foo": cty.UnknownVal(cty.String), - }) - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - } - rc := NewResourceConfigShimmed(v, schema) - rc2 := NewResourceConfigShimmed(v, schema) - - // Set the computed keys manually to force ordering to differ - rc.ComputedKeys = []string{"foo", "bar"} - rc2.ComputedKeys = []string{"bar", "foo"} - - if !rc.Equal(rc2) { - t.Fatal("should be equal") - } -} - -func TestUnknownCheckWalker(t *testing.T) { - cases := []struct { - Name string - Input interface{} - Result bool - }{ - { - "primitive", - 42, - false, - }, - - { - "primitive computed", - hcl2shim.UnknownVariableValue, - true, - }, - - { - "list", - []interface{}{"foo", hcl2shim.UnknownVariableValue}, - true, - }, - - { - "nested list", - []interface{}{ - "foo", - []interface{}{hcl2shim.UnknownVariableValue}, - }, - true, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - var w unknownCheckWalker - if err := reflectwalk.Walk(tc.Input, &w); err != nil { - t.Fatalf("err: %s", err) - } - - if w.Unknown != tc.Result { - t.Fatalf("bad: %v", w.Unknown) - } - }) - } -} - -func TestNewResourceConfigShimmed(t *testing.T) { - for _, tc := range []struct { - Name string - Val cty.Value - Schema *configschema.Block - Expected *ResourceConfig - }{ - { - Name: "empty object", - Val: cty.NullVal(cty.EmptyObject), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - Expected: &ResourceConfig{ - Raw: map[string]interface{}{}, - Config: map[string]interface{}{}, - }, - }, - { - Name: "basic", - Val: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - Expected: &ResourceConfig{ - Raw: map[string]interface{}{ - "foo": "bar", - }, - Config: map[string]interface{}{ - "foo": "bar", - }, - }, - }, - { - Name: "null string", - Val: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - Expected: &ResourceConfig{ - Raw: map[string]interface{}{}, - Config: map[string]interface{}{}, - }, - }, - { - Name: "unknown string", - Val: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.UnknownVal(cty.String), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - Expected: &ResourceConfig{ - ComputedKeys: []string{"foo"}, - Raw: map[string]interface{}{ - "foo": hcl2shim.UnknownVariableValue, - }, - Config: map[string]interface{}{ - "foo": hcl2shim.UnknownVariableValue, - }, - }, - }, - { - Name: "unknown collections", - Val: cty.ObjectVal(map[string]cty.Value{ - "bar": cty.UnknownVal(cty.Map(cty.String)), - "baz": cty.UnknownVal(cty.List(cty.String)), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.Map(cty.String), - Required: true, - }, - "baz": { - Type: cty.List(cty.String), - Optional: true, - }, - }, - }, - Expected: &ResourceConfig{ - ComputedKeys: []string{"bar", "baz"}, - Raw: map[string]interface{}{ - "bar": hcl2shim.UnknownVariableValue, - "baz": hcl2shim.UnknownVariableValue, - }, - Config: map[string]interface{}{ - "bar": hcl2shim.UnknownVariableValue, - "baz": hcl2shim.UnknownVariableValue, - }, - }, - }, - { - Name: "null collections", - Val: cty.ObjectVal(map[string]cty.Value{ - "bar": cty.NullVal(cty.Map(cty.String)), - "baz": cty.NullVal(cty.List(cty.String)), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.Map(cty.String), - Required: true, - }, - "baz": { - Type: cty.List(cty.String), - Optional: true, - }, - }, - }, - Expected: &ResourceConfig{ - Raw: map[string]interface{}{}, - Config: map[string]interface{}{}, - }, - }, - { - Name: "unknown blocks", - Val: cty.ObjectVal(map[string]cty.Value{ - "bar": cty.UnknownVal(cty.Map(cty.String)), - "baz": cty.UnknownVal(cty.List(cty.String)), - }), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "bar": { - Block: configschema.Block{}, - Nesting: configschema.NestingList, - }, - "baz": { - Block: configschema.Block{}, - Nesting: configschema.NestingSet, - }, - }, - }, - Expected: &ResourceConfig{ - ComputedKeys: []string{"bar", "baz"}, - Raw: map[string]interface{}{ - "bar": hcl2shim.UnknownVariableValue, - "baz": hcl2shim.UnknownVariableValue, - }, - Config: map[string]interface{}{ - "bar": hcl2shim.UnknownVariableValue, - "baz": hcl2shim.UnknownVariableValue, - }, - }, - }, - { - Name: "unknown in nested blocks", - Val: cty.ObjectVal(map[string]cty.Value{ - "bar": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "baz": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "list": cty.UnknownVal(cty.List(cty.String)), - }), - }), - }), - }), - }), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "bar": { - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "baz": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "list": {Type: cty.List(cty.String), - Optional: true, - }, - }, - }, - Nesting: configschema.NestingList, - }, - }, - }, - Nesting: configschema.NestingList, - }, - }, - }, - Expected: &ResourceConfig{ - ComputedKeys: []string{"bar.0.baz.0.list"}, - Raw: map[string]interface{}{ - "bar": []interface{}{map[string]interface{}{ - "baz": []interface{}{map[string]interface{}{ - "list": "74D93920-ED26-11E3-AC10-0800200C9A66", - }}, - }}, - }, - Config: map[string]interface{}{ - "bar": []interface{}{map[string]interface{}{ - "baz": []interface{}{map[string]interface{}{ - "list": "74D93920-ED26-11E3-AC10-0800200C9A66", - }}, - }}, - }, - }, - }, - { - Name: "unknown in set", - Val: cty.ObjectVal(map[string]cty.Value{ - "bar": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "val": cty.UnknownVal(cty.String), - }), - }), - }), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "bar": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "val": { - Type: cty.String, - Optional: true, - }, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - Expected: &ResourceConfig{ - ComputedKeys: []string{"bar.0.val"}, - Raw: map[string]interface{}{ - "bar": []interface{}{map[string]interface{}{ - "val": "74D93920-ED26-11E3-AC10-0800200C9A66", - }}, - }, - Config: map[string]interface{}{ - "bar": []interface{}{map[string]interface{}{ - "val": "74D93920-ED26-11E3-AC10-0800200C9A66", - }}, - }, - }, - }, - { - Name: "unknown in attribute sets", - Val: cty.ObjectVal(map[string]cty.Value{ - "bar": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "val": cty.UnknownVal(cty.String), - }), - }), - "baz": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "obj": cty.UnknownVal(cty.Object(map[string]cty.Type{ - "attr": cty.List(cty.String), - })), - }), - cty.ObjectVal(map[string]cty.Value{ - "obj": cty.ObjectVal(map[string]cty.Value{ - "attr": cty.UnknownVal(cty.List(cty.String)), - }), - }), - }), - }), - Schema: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": &configschema.Attribute{ - Type: cty.Set(cty.Object(map[string]cty.Type{ - "val": cty.String, - })), - }, - "baz": &configschema.Attribute{ - Type: cty.Set(cty.Object(map[string]cty.Type{ - "obj": cty.Object(map[string]cty.Type{ - "attr": cty.List(cty.String), - }), - })), - }, - }, - }, - Expected: &ResourceConfig{ - ComputedKeys: []string{"bar.0.val", "baz.0.obj.attr", "baz.1.obj"}, - Raw: map[string]interface{}{ - "bar": []interface{}{map[string]interface{}{ - "val": "74D93920-ED26-11E3-AC10-0800200C9A66", - }}, - "baz": []interface{}{ - map[string]interface{}{ - "obj": map[string]interface{}{ - "attr": "74D93920-ED26-11E3-AC10-0800200C9A66", - }, - }, - map[string]interface{}{ - "obj": "74D93920-ED26-11E3-AC10-0800200C9A66", - }, - }, - }, - Config: map[string]interface{}{ - "bar": []interface{}{map[string]interface{}{ - "val": "74D93920-ED26-11E3-AC10-0800200C9A66", - }}, - "baz": []interface{}{ - map[string]interface{}{ - "obj": map[string]interface{}{ - "attr": "74D93920-ED26-11E3-AC10-0800200C9A66", - }, - }, - map[string]interface{}{ - "obj": "74D93920-ED26-11E3-AC10-0800200C9A66", - }, - }, - }, - }, - }, - { - Name: "null blocks", - Val: cty.ObjectVal(map[string]cty.Value{ - "bar": cty.NullVal(cty.Map(cty.String)), - "baz": cty.NullVal(cty.List(cty.String)), - }), - Schema: &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "bar": { - Block: configschema.Block{}, - Nesting: configschema.NestingMap, - }, - "baz": { - Block: configschema.Block{}, - Nesting: configschema.NestingSingle, - }, - }, - }, - Expected: &ResourceConfig{ - Raw: map[string]interface{}{}, - Config: map[string]interface{}{}, - }, - }, - } { - t.Run(tc.Name, func(*testing.T) { - cfg := NewResourceConfigShimmed(tc.Val, tc.Schema) - if !tc.Expected.Equal(cfg) { - t.Fatalf("expected:\n%#v\ngot:\n%#v", tc.Expected, cfg) - } - }) - } -} diff --git a/internal/legacy/terraform/schemas.go b/internal/legacy/terraform/schemas.go deleted file mode 100644 index 20b77ea9734a..000000000000 --- a/internal/legacy/terraform/schemas.go +++ /dev/null @@ -1,285 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Schemas is a container for various kinds of schema that Terraform needs -// during processing. -type Schemas struct { - Providers map[addrs.Provider]*ProviderSchema - Provisioners map[string]*configschema.Block -} - -// ProviderSchema returns the entire ProviderSchema object that was produced -// by the plugin for the given provider, or nil if no such schema is available. -// -// It's usually better to go use the more precise methods offered by type -// Schemas to handle this detail automatically. -func (ss *Schemas) ProviderSchema(provider addrs.Provider) *ProviderSchema { - if ss.Providers == nil { - return nil - } - return ss.Providers[provider] -} - -// ProviderConfig returns the schema for the provider configuration of the -// given provider type, or nil if no such schema is available. -func (ss *Schemas) ProviderConfig(provider addrs.Provider) *configschema.Block { - ps := ss.ProviderSchema(provider) - if ps == nil { - return nil - } - return ps.Provider -} - -// ResourceTypeConfig returns the schema for the configuration of a given -// resource type belonging to a given provider type, or nil of no such -// schema is available. -// -// In many cases the provider type is inferrable from the resource type name, -// but this is not always true because users can override the provider for -// a resource using the "provider" meta-argument. Therefore it's important to -// always pass the correct provider name, even though it many cases it feels -// redundant. -func (ss *Schemas) ResourceTypeConfig(provider addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { - ps := ss.ProviderSchema(provider) - if ps == nil || ps.ResourceTypes == nil { - return nil, 0 - } - return ps.SchemaForResourceType(resourceMode, resourceType) -} - -// ProvisionerConfig returns the schema for the configuration of a given -// provisioner, or nil of no such schema is available. -func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { - return ss.Provisioners[name] -} - -// LoadSchemas searches the given configuration, state and plan (any of which -// may be nil) for constructs that have an associated schema, requests the -// necessary schemas from the given component factory (which must _not_ be nil), -// and returns a single object representing all of the necessary schemas. -// -// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing -// errors across multiple separate objects. Errors here will usually indicate -// either misbehavior on the part of one of the providers or of the provider -// protocol itself. When returned with errors, the returned schemas object is -// still valid but may be incomplete. -func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { - schemas := &Schemas{ - Providers: map[addrs.Provider]*ProviderSchema{}, - Provisioners: map[string]*configschema.Block{}, - } - var diags tfdiags.Diagnostics - - newDiags := loadProviderSchemas(schemas.Providers, config, state, components) - diags = diags.Append(newDiags) - newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) - diags = diags.Append(newDiags) - - return schemas, diags.Err() -} - -func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(fqn addrs.Provider) { - name := fqn.String() - - if _, exists := schemas[fqn]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", name) - provider, err := components.ResourceProvider(fqn) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[fqn] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", name, err), - ) - return - } - defer func() { - provider.Close() - }() - - resp := provider.GetProviderSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[fqn] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()), - ) - return - } - - s := &ProviderSchema{ - Provider: resp.Provider.Block, - ResourceTypes: make(map[string]*configschema.Block), - DataSources: make(map[string]*configschema.Block), - - ResourceTypeSchemaVersions: make(map[string]uint64), - } - - if resp.Provider.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version provider configuration for provider %q", name), - ) - } - - for t, r := range resp.ResourceTypes { - s.ResourceTypes[t] = r.Block - s.ResourceTypeSchemaVersions[t] = uint64(r.Version) - if r.Version < 0 { - diags = diags.Append( - fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, name), - ) - } - } - - for t, d := range resp.DataSources { - s.DataSources[t] = d.Block - if d.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, name), - ) - } - } - - schemas[fqn] = s - - if resp.ProviderMeta.Block != nil { - s.ProviderMeta = resp.ProviderMeta.Block - } - } - - if config != nil { - for _, fqn := range config.ProviderTypes() { - ensure(fqn) - } - } - - if state != nil { - needed := providers.AddressedTypesAbs(state.ProviderAddrs()) - for _, typeAddr := range needed { - ensure(typeAddr) - } - } - - return diags -} - -func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(name string) { - if _, exists := schemas[name]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) - provisioner, err := components.ResourceProvisioner(name) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), - ) - return - } - defer func() { - if closer, ok := provisioner.(ResourceProvisionerCloser); ok { - closer.Close() - } - }() - - resp := provisioner.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), - ) - return - } - - schemas[name] = resp.Provisioner - } - - if config != nil { - for _, rc := range config.Module.ManagedResources { - for _, pc := range rc.Managed.Provisioners { - ensure(pc.Type) - } - } - - // Must also visit our child modules, recursively. - for _, cc := range config.Children { - childDiags := loadProvisionerSchemas(schemas, cc, components) - diags = diags.Append(childDiags) - } - } - - return diags -} - -// ProviderSchema represents the schema for a provider's own configuration -// and the configuration for some or all of its resources and data sources. -// -// The completeness of this structure depends on how it was constructed. -// When constructed for a configuration, it will generally include only -// resource types and data sources used by that configuration. -type ProviderSchema struct { - Provider *configschema.Block - ProviderMeta *configschema.Block - ResourceTypes map[string]*configschema.Block - DataSources map[string]*configschema.Block - - ResourceTypeSchemaVersions map[string]uint64 -} - -// SchemaForResourceType attempts to find a schema for the given mode and type. -// Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { - switch mode { - case addrs.ManagedResourceMode: - return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] - case addrs.DataResourceMode: - // Data resources don't have schema versions right now, since state is discarded for each refresh - return ps.DataSources[typeName], 0 - default: - // Shouldn't happen, because the above cases are comprehensive. - return nil, 0 - } -} - -// SchemaForResourceAddr attempts to find a schema for the mode and type from -// the given resource address. Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { - return ps.SchemaForResourceType(addr.Mode, addr.Type) -} - -// ProviderSchemaRequest is used to describe to a ResourceProvider which -// aspects of schema are required, when calling the GetSchema method. -type ProviderSchemaRequest struct { - ResourceTypes []string - DataSources []string -} diff --git a/internal/legacy/terraform/state.go b/internal/legacy/terraform/state.go deleted file mode 100644 index 4d68ac956e87..000000000000 --- a/internal/legacy/terraform/state.go +++ /dev/null @@ -1,2254 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - uuid "github.com/hashicorp/go-uuid" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - "github.com/mitchellh/copystructure" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -const ( - // StateVersion is the current version for our state file - StateVersion = 3 -) - -// rootModulePath is the path of the root module -var rootModulePath = []string{"root"} - -// normalizeModulePath transforms a legacy module path (which may or may not -// have a redundant "root" label at the start of it) into an -// addrs.ModuleInstance representing the same module. -// -// For legacy reasons, different parts of Terraform disagree about whether the -// root module has the path []string{} or []string{"root"}, and so this -// function accepts both and trims off the "root". An implication of this is -// that it's not possible to actually have a module call in the root module -// that is itself named "root", since that would be ambiguous. -// -// normalizeModulePath takes a raw module path and returns a path that -// has the rootModulePath prepended to it. If I could go back in time I -// would've never had a rootModulePath (empty path would be root). We can -// still fix this but thats a big refactor that my branch doesn't make sense -// for. Instead, this function normalizes paths. -func normalizeModulePath(p []string) addrs.ModuleInstance { - // FIXME: Remove this once everyone is using addrs.ModuleInstance. - - if len(p) > 0 && p[0] == "root" { - p = p[1:] - } - - ret := make(addrs.ModuleInstance, len(p)) - for i, name := range p { - // For now we don't actually support modules with multiple instances - // identified by keys, so we just treat every path element as a - // step with no key. - ret[i] = addrs.ModuleInstanceStep{ - Name: name, - } - } - return ret -} - -// State keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -type State struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *RemoteState `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *BackendState `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*ModuleState `json:"modules"` - - mu sync.Mutex -} - -func (s *State) Lock() { s.mu.Lock() } -func (s *State) Unlock() { s.mu.Unlock() } - -// NewState is used to initialize a blank state -func NewState() *State { - s := &State{} - s.init() - return s -} - -// Children returns the ModuleStates that are direct children of -// the given path. If the path is "root", for example, then children -// returned might be "root.child", but not "root.child.grandchild". -func (s *State) Children(path []string) []*ModuleState { - s.Lock() - defer s.Unlock() - // TODO: test - - return s.children(path) -} - -func (s *State) children(path []string) []*ModuleState { - result := make([]*ModuleState, 0) - for _, m := range s.Modules { - if m == nil { - continue - } - - if len(m.Path) != len(path)+1 { - continue - } - if !reflect.DeepEqual(path, m.Path[:len(path)]) { - continue - } - - result = append(result, m) - } - - return result -} - -// AddModule adds the module with the given path to the state. -// -// This should be the preferred method to add module states since it -// allows us to optimize lookups later as well as control sorting. -func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { - s.Lock() - defer s.Unlock() - - return s.addModule(path) -} - -func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { - // check if the module exists first - m := s.moduleByPath(path) - if m != nil { - return m - } - - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - // For the purposes of state, the legacy address format also includes - // a redundant extra prefix element "root". It is important to include - // this because the "prune" method will remove any module that has a - // path length less than one, and other parts of the state code will - // trim off the first element indiscriminately. - legacyPath := make([]string, len(path)+1) - legacyPath[0] = "root" - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("state cannot represent modules with count or for_each keys") - } - - legacyPath[i+1] = step.Name - } - - m = &ModuleState{Path: legacyPath} - m.init() - s.Modules = append(s.Modules, m) - s.sort() - return m -} - -// ModuleByPath is used to lookup the module state for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { - if s == nil { - return nil - } - s.Lock() - defer s.Unlock() - - return s.moduleByPath(path) -} - -func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { - for _, mod := range s.Modules { - if mod == nil { - continue - } - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// Empty returns true if the state is empty. -func (s *State) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return len(s.Modules) == 0 -} - -// HasResources returns true if the state contains any resources. -// -// This is similar to !s.Empty, but returns true also in the case where the -// state has modules but all of them are devoid of resources. -func (s *State) HasResources() bool { - if s.Empty() { - return false - } - - for _, mod := range s.Modules { - if len(mod.Resources) > 0 { - return true - } - } - - return false -} - -// IsRemote returns true if State represents a state that exists and is -// remote. -func (s *State) IsRemote() bool { - if s == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Remote == nil { - return false - } - if s.Remote.Type == "" { - return false - } - - return true -} - -// Validate validates the integrity of this state file. -// -// Certain properties of the statefile are expected by Terraform in order -// to behave properly. The core of Terraform will assume that once it -// receives a State structure that it has been validated. This validation -// check should be called to ensure that. -// -// If this returns an error, then the user should be notified. The error -// response will include detailed information on the nature of the error. -func (s *State) Validate() error { - s.Lock() - defer s.Unlock() - - var result error - - // !!!! FOR DEVELOPERS !!!! - // - // Any errors returned from this Validate function will BLOCK TERRAFORM - // from loading a state file. Therefore, this should only contain checks - // that are only resolvable through manual intervention. - // - // !!!! FOR DEVELOPERS !!!! - - // Make sure there are no duplicate module states. We open a new - // block here so we can use basic variable names and future validations - // can do the same. - { - found := make(map[string]struct{}) - for _, ms := range s.Modules { - if ms == nil { - continue - } - - key := strings.Join(ms.Path, ".") - if _, ok := found[key]; ok { - result = multierror.Append(result, fmt.Errorf( - strings.TrimSpace(stateValidateErrMultiModule), key)) - continue - } - - found[key] = struct{}{} - } - } - - return result -} - -// Remove removes the item in the state at the given address, returning -// any errors that may have occurred. -// -// If the address references a module state or resource, it will delete -// all children as well. To check what will be deleted, use a StateFilter -// first. -func (s *State) Remove(addr ...string) error { - s.Lock() - defer s.Unlock() - - // Filter out what we need to delete - filter := &StateFilter{State: s} - results, err := filter.Filter(addr...) - if err != nil { - return err - } - - // If we have no results, just exit early, we're not going to do anything. - // While what happens below is fairly fast, this is an important early - // exit since the prune below might modify the state more and we don't - // want to modify the state if we don't have to. - if len(results) == 0 { - return nil - } - - // Go through each result and grab what we need - removed := make(map[interface{}]struct{}) - for _, r := range results { - // Convert the path to our own type - path := append([]string{"root"}, r.Path...) - - // If we removed this already, then ignore - if _, ok := removed[r.Value]; ok { - continue - } - - // If we removed the parent already, then ignore - if r.Parent != nil { - if _, ok := removed[r.Parent.Value]; ok { - continue - } - } - - // Add this to the removed list - removed[r.Value] = struct{}{} - - switch v := r.Value.(type) { - case *ModuleState: - s.removeModule(path, v) - case *ResourceState: - s.removeResource(path, v) - case *InstanceState: - s.removeInstance(path, r.Parent.Value.(*ResourceState), v) - default: - return fmt.Errorf("unknown type to delete: %T", r.Value) - } - } - - // Prune since the removal functions often do the bare minimum to - // remove a thing and may leave around dangling empty modules, resources, - // etc. Prune will clean that all up. - s.prune() - - return nil -} - -func (s *State) removeModule(path []string, v *ModuleState) { - for i, m := range s.Modules { - if m == v { - s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil - return - } - } -} - -func (s *State) removeResource(path []string, v *ResourceState) { - // Get the module this resource lives in. If it doesn't exist, we're done. - mod := s.moduleByPath(normalizeModulePath(path)) - if mod == nil { - return - } - - // Find this resource. This is a O(N) lookup when if we had the key - // it could be O(1) but even with thousands of resources this shouldn't - // matter right now. We can easily up performance here when the time comes. - for k, r := range mod.Resources { - if r == v { - // Found it - delete(mod.Resources, k) - return - } - } -} - -func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { - // Go through the resource and find the instance that matches this - // (if any) and remove it. - - // Check primary - if r.Primary == v { - r.Primary = nil - return - } - - // Check lists - lists := [][]*InstanceState{r.Deposed} - for _, is := range lists { - for i, instance := range is { - if instance == v { - // Found it, remove it - is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil - - // Done - return - } - } - } -} - -// RootModule returns the ModuleState for the root module -func (s *State) RootModule() *ModuleState { - root := s.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Equal tests if one state is equal to another. -func (s *State) Equal(other *State) bool { - // If one is nil, we do a direct check - if s == nil || other == nil { - return s == other - } - - s.Lock() - defer s.Unlock() - return s.equal(other) -} - -func (s *State) equal(other *State) bool { - if s == nil || other == nil { - return s == other - } - - // If the versions are different, they're certainly not equal - if s.Version != other.Version { - return false - } - - // If any of the modules are not equal, then this state isn't equal - if len(s.Modules) != len(other.Modules) { - return false - } - for _, m := range s.Modules { - // This isn't very optimal currently but works. - otherM := other.moduleByPath(normalizeModulePath(m.Path)) - if otherM == nil { - return false - } - - // If they're not equal, then we're not equal! - if !m.Equal(otherM) { - return false - } - } - - return true -} - -// MarshalEqual is similar to Equal but provides a stronger definition of -// "equal", where two states are equal if and only if their serialized form -// is byte-for-byte identical. -// -// This is primarily useful for callers that are trying to save snapshots -// of state to persistent storage, allowing them to detect when a new -// snapshot must be taken. -// -// Note that the serial number and lineage are included in the serialized form, -// so it's the caller's responsibility to properly manage these attributes -// so that this method is only called on two states that have the same -// serial and lineage, unless detecting such differences is desired. -func (s *State) MarshalEqual(other *State) bool { - if s == nil && other == nil { - return true - } else if s == nil || other == nil { - return false - } - - recvBuf := &bytes.Buffer{} - otherBuf := &bytes.Buffer{} - - err := WriteState(s, recvBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - err = WriteState(other, otherBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) -} - -type StateAgeComparison int - -const ( - StateAgeEqual StateAgeComparison = 0 - StateAgeReceiverNewer StateAgeComparison = 1 - StateAgeReceiverOlder StateAgeComparison = -1 -) - -// CompareAges compares one state with another for which is "older". -// -// This is a simple check using the state's serial, and is thus only as -// reliable as the serial itself. In the normal case, only one state -// exists for a given combination of lineage/serial, but Terraform -// does not guarantee this and so the result of this method should be -// used with care. -// -// Returns an integer that is negative if the receiver is older than -// the argument, positive if the converse, and zero if they are equal. -// An error is returned if the two states are not of the same lineage, -// in which case the integer returned has no meaning. -func (s *State) CompareAges(other *State) (StateAgeComparison, error) { - // nil states are "older" than actual states - switch { - case s != nil && other == nil: - return StateAgeReceiverNewer, nil - case s == nil && other != nil: - return StateAgeReceiverOlder, nil - case s == nil && other == nil: - return StateAgeEqual, nil - } - - if !s.SameLineage(other) { - return StateAgeEqual, fmt.Errorf( - "can't compare two states of differing lineage", - ) - } - - s.Lock() - defer s.Unlock() - - switch { - case s.Serial < other.Serial: - return StateAgeReceiverOlder, nil - case s.Serial > other.Serial: - return StateAgeReceiverNewer, nil - default: - return StateAgeEqual, nil - } -} - -// SameLineage returns true only if the state given in argument belongs -// to the same "lineage" of states as the receiver. -func (s *State) SameLineage(other *State) bool { - s.Lock() - defer s.Unlock() - - // If one of the states has no lineage then it is assumed to predate - // this concept, and so we'll accept it as belonging to any lineage - // so that a lineage string can be assigned to newer versions - // without breaking compatibility with older versions. - if s.Lineage == "" || other.Lineage == "" { - return true - } - - return s.Lineage == other.Lineage -} - -// DeepCopy performs a deep copy of the state structure and returns -// a new structure. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*State) -} - -// FromFutureTerraform checks if this state was written by a Terraform -// version from the future. -func (s *State) FromFutureTerraform() bool { - s.Lock() - defer s.Unlock() - - // No TF version means it is certainly from the past - if s.TFVersion == "" { - return false - } - - v := version.Must(version.NewVersion(s.TFVersion)) - return tfversion.SemVer.LessThan(v) -} - -func (s *State) Init() { - s.Lock() - defer s.Unlock() - s.init() -} - -func (s *State) init() { - if s.Version == 0 { - s.Version = StateVersion - } - - if s.moduleByPath(addrs.RootModuleInstance) == nil { - s.addModule(addrs.RootModuleInstance) - } - s.ensureHasLineage() - - for _, mod := range s.Modules { - if mod != nil { - mod.init() - } - } - - if s.Remote != nil { - s.Remote.init() - } - -} - -func (s *State) EnsureHasLineage() { - s.Lock() - defer s.Unlock() - - s.ensureHasLineage() -} - -func (s *State) ensureHasLineage() { - if s.Lineage == "" { - lineage, err := uuid.GenerateUUID() - if err != nil { - panic(fmt.Errorf("Failed to generate lineage: %v", err)) - } - s.Lineage = lineage - log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) - } else { - log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) - } -} - -// AddModuleState insert this module state and override any existing ModuleState -func (s *State) AddModuleState(mod *ModuleState) { - mod.init() - s.Lock() - defer s.Unlock() - - s.addModuleState(mod) -} - -func (s *State) addModuleState(mod *ModuleState) { - for i, m := range s.Modules { - if reflect.DeepEqual(m.Path, mod.Path) { - s.Modules[i] = mod - return - } - } - - s.Modules = append(s.Modules, mod) - s.sort() -} - -// prune is used to remove any resources that are no longer required -func (s *State) prune() { - if s == nil { - return - } - - // Filter out empty modules. - // A module is always assumed to have a path, and it's length isn't always - // bounds checked later on. Modules may be "emptied" during destroy, but we - // never want to store those in the state. - for i := 0; i < len(s.Modules); i++ { - if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { - s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) - i-- - } - } - - for _, mod := range s.Modules { - mod.prune() - } - if s.Remote != nil && s.Remote.Empty() { - s.Remote = nil - } -} - -// sort sorts the modules -func (s *State) sort() { - sort.Sort(moduleStateSort(s.Modules)) - - // Allow modules to be sorted - for _, m := range s.Modules { - if m != nil { - m.sort() - } - } -} - -func (s *State) String() string { - if s == nil { - return "" - } - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - for _, m := range s.Modules { - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// BackendState stores the configuration to connect to a remote backend. -type BackendState struct { - Type string `json:"type"` // Backend type - ConfigRaw json.RawMessage `json:"config"` // Backend raw config - Hash uint64 `json:"hash"` // Hash of portion of configuration from config files -} - -// Empty returns true if BackendState has no state. -func (s *BackendState) Empty() bool { - return s == nil || s.Type == "" -} - -// Config decodes the type-specific configuration object using the provided -// schema and returns the result as a cty.Value. -// -// An error is returned if the stored configuration does not conform to the -// given schema. -func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { - ty := schema.ImpliedType() - if s == nil { - return cty.NullVal(ty), nil - } - return ctyjson.Unmarshal(s.ConfigRaw, ty) -} - -// SetConfig replaces (in-place) the type-specific configuration object using -// the provided value and associated schema. -// -// An error is returned if the given value does not conform to the implied -// type of the schema. -func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { - ty := schema.ImpliedType() - buf, err := ctyjson.Marshal(val, ty) - if err != nil { - return err - } - s.ConfigRaw = buf - return nil -} - -// ForPlan produces an alternative representation of the reciever that is -// suitable for storing in a plan. The current workspace must additionally -// be provided, to be stored alongside the backend configuration. -// -// The backend configuration schema is required in order to properly -// encode the backend-specific configuration settings. -func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { - if s == nil { - return nil, nil - } - - configVal, err := s.Config(schema) - if err != nil { - return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) - } - return plans.NewBackend(s.Type, configVal, schema, workspaceName) -} - -// RemoteState is used to track the information about a remote -// state store that we push/pull state to. -type RemoteState struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` - - mu sync.Mutex -} - -func (s *RemoteState) Lock() { s.mu.Lock() } -func (s *RemoteState) Unlock() { s.mu.Unlock() } - -func (r *RemoteState) init() { - r.Lock() - defer r.Unlock() - - if r.Config == nil { - r.Config = make(map[string]string) - } -} - -func (r *RemoteState) deepcopy() *RemoteState { - r.Lock() - defer r.Unlock() - - confCopy := make(map[string]string, len(r.Config)) - for k, v := range r.Config { - confCopy[k] = v - } - return &RemoteState{ - Type: r.Type, - Config: confCopy, - } -} - -func (r *RemoteState) Empty() bool { - if r == nil { - return true - } - r.Lock() - defer r.Unlock() - - return r.Type == "" -} - -func (r *RemoteState) Equals(other *RemoteState) bool { - r.Lock() - defer r.Unlock() - - if r.Type != other.Type { - return false - } - if len(r.Config) != len(other.Config) { - return false - } - for k, v := range r.Config { - if other.Config[k] != v { - return false - } - } - return true -} - -// OutputState is used to track the state relevant to a single output. -type OutputState struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` - - mu sync.Mutex -} - -func (s *OutputState) Lock() { s.mu.Lock() } -func (s *OutputState) Unlock() { s.mu.Unlock() } - -func (s *OutputState) String() string { - return fmt.Sprintf("%#v", s.Value) -} - -// Equal compares two OutputState structures for equality. nil values are -// considered equal. -func (s *OutputState) Equal(other *OutputState) bool { - if s == nil && other == nil { - return true - } - - if s == nil || other == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Sensitive != other.Sensitive { - return false - } - - if !reflect.DeepEqual(s.Value, other.Value) { - return false - } - - return true -} - -func (s *OutputState) deepcopy() *OutputState { - if s == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(fmt.Errorf("Error copying output value: %s", err)) - } - - return stateCopy.(*OutputState) -} - -// ModuleState is used to track all the state relevant to a single -// module. Previous to Terraform 0.3, all state belonged to the "root" -// module. -type ModuleState struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Locals are kept only transiently in-memory, because we can always - // re-compute them. - Locals map[string]interface{} `json:"-"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*OutputState `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*ResourceState `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - mu sync.Mutex -} - -func (s *ModuleState) Lock() { s.mu.Lock() } -func (s *ModuleState) Unlock() { s.mu.Unlock() } - -// Equal tests whether one module state is equal to another. -func (m *ModuleState) Equal(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - // Paths must be equal - if !reflect.DeepEqual(m.Path, other.Path) { - return false - } - - // Outputs must be equal - if len(m.Outputs) != len(other.Outputs) { - return false - } - for k, v := range m.Outputs { - if !other.Outputs[k].Equal(v) { - return false - } - } - - // Dependencies must be equal. This sorts these in place but - // this shouldn't cause any problems. - sort.Strings(m.Dependencies) - sort.Strings(other.Dependencies) - if len(m.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range m.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // Resources must be equal - if len(m.Resources) != len(other.Resources) { - return false - } - for k, r := range m.Resources { - otherR, ok := other.Resources[k] - if !ok { - return false - } - - if !r.Equal(otherR) { - return false - } - } - - return true -} - -// IsRoot says whether or not this module diff is for the root module. -func (m *ModuleState) IsRoot() bool { - m.Lock() - defer m.Unlock() - return reflect.DeepEqual(m.Path, rootModulePath) -} - -// IsDescendent returns true if other is a descendent of this module. -func (m *ModuleState) IsDescendent(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - i := len(m.Path) - return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) -} - -// Orphans returns a list of keys of resources that are in the State -// but aren't present in the configuration itself. Hence, these keys -// represent the state of resources that are orphans. -func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { - m.Lock() - defer m.Unlock() - - inConfig := make(map[string]struct{}) - if c != nil { - for _, r := range c.ManagedResources { - inConfig[r.Addr().String()] = struct{}{} - } - for _, r := range c.DataResources { - inConfig[r.Addr().String()] = struct{}{} - } - } - - var result []addrs.ResourceInstance - for k := range m.Resources { - // Since we've not yet updated state to use our new address format, - // we need to do some shimming here. - legacyAddr, err := parseResourceAddressInternal(k) - if err != nil { - // Suggests that the user tampered with the state, since we always - // generate valid internal addresses. - log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) - continue - } - - addr := legacyAddr.AbsResourceInstanceAddr().Resource - compareKey := addr.Resource.String() // compare by resource address, ignoring instance key - if _, exists := inConfig[compareKey]; !exists { - result = append(result, addr) - } - } - return result -} - -// RemovedOutputs returns a list of outputs that are in the State but aren't -// present in the configuration itself. -func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { - if outputs == nil { - // If we got no output map at all then we'll just treat our set of - // configured outputs as empty, since that suggests that they've all - // been removed by removing their containing module. - outputs = make(map[string]*configs.Output) - } - - s.Lock() - defer s.Unlock() - - var ret []addrs.OutputValue - for n := range s.Outputs { - if _, declared := outputs[n]; !declared { - ret = append(ret, addrs.OutputValue{ - Name: n, - }) - } - } - - return ret -} - -// View returns a view with the given resource prefix. -func (m *ModuleState) View(id string) *ModuleState { - if m == nil { - return m - } - - r := m.deepcopy() - for k, _ := range r.Resources { - if id == k || strings.HasPrefix(k, id+".") { - continue - } - - delete(r.Resources, k) - } - - return r -} - -func (m *ModuleState) init() { - m.Lock() - defer m.Unlock() - - if m.Path == nil { - m.Path = []string{} - } - if m.Outputs == nil { - m.Outputs = make(map[string]*OutputState) - } - if m.Resources == nil { - m.Resources = make(map[string]*ResourceState) - } - - if m.Dependencies == nil { - m.Dependencies = make([]string, 0) - } - - for _, rs := range m.Resources { - rs.init() - } -} - -func (m *ModuleState) deepcopy() *ModuleState { - if m == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - - return stateCopy.(*ModuleState) -} - -// prune is used to remove any resources that are no longer required -func (m *ModuleState) prune() { - m.Lock() - defer m.Unlock() - - for k, v := range m.Resources { - if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { - delete(m.Resources, k) - continue - } - - v.prune() - } - - for k, v := range m.Outputs { - if v.Value == hcl2shim.UnknownVariableValue { - delete(m.Outputs, k) - } - } - - m.Dependencies = uniqueStrings(m.Dependencies) -} - -func (m *ModuleState) sort() { - for _, v := range m.Resources { - v.sort() - } -} - -func (m *ModuleState) String() string { - m.Lock() - defer m.Unlock() - - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - names := make([]string, 0, len(m.Resources)) - for name, _ := range m.Resources { - names = append(names, name) - } - - sort.Sort(resourceNameSort(names)) - - for _, k := range names { - rs := m.Resources[k] - var id string - if rs.Primary != nil { - id = rs.Primary.ID - } - if id == "" { - id = "" - } - - taintStr := "" - if rs.Primary.Tainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(rs.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - if rs.Provider != "" { - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) - } - - var attributes map[string]string - if rs.Primary != nil { - attributes = rs.Primary.Attributes - } - attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - for idx, t := range rs.Deposed { - taintStr := "" - if t.Tainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) - } - - if len(rs.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range rs.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep)) - } - } - } - - if len(m.Outputs) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.Outputs)) - for k, _ := range m.Outputs { - ks = append(ks, k) - } - - sort.Strings(ks) - - for _, k := range ks { - v := m.Outputs[k] - switch vTyped := v.Value.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key, _ := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - } - } - } - - return buf.String() -} - -func (m *ModuleState) Empty() bool { - return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 -} - -// ResourceStateKey is a structured representation of the key used for the -// ModuleState.Resources mapping -type ResourceStateKey struct { - Name string - Type string - Mode ResourceMode - Index int -} - -// Equal determines whether two ResourceStateKeys are the same -func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { - if rsk == nil || other == nil { - return false - } - if rsk.Mode != other.Mode { - return false - } - if rsk.Type != other.Type { - return false - } - if rsk.Name != other.Name { - return false - } - if rsk.Index != other.Index { - return false - } - return true -} - -func (rsk *ResourceStateKey) String() string { - if rsk == nil { - return "" - } - var prefix string - switch rsk.Mode { - case ManagedResourceMode: - prefix = "" - case DataResourceMode: - prefix = "data." - default: - panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) - } - if rsk.Index == -1 { - return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) - } - return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) -} - -// ParseResourceStateKey accepts a key in the format used by -// ModuleState.Resources and returns a resource name and resource index. In the -// state, a resource has the format "type.name.index" or "type.name". In the -// latter case, the index is returned as -1. -func ParseResourceStateKey(k string) (*ResourceStateKey, error) { - parts := strings.Split(k, ".") - mode := ManagedResourceMode - if len(parts) > 0 && parts[0] == "data" { - mode = DataResourceMode - // Don't need the constant "data" prefix for parsing - // now that we've figured out the mode. - parts = parts[1:] - } - if len(parts) < 2 || len(parts) > 3 { - return nil, fmt.Errorf("Malformed resource state key: %s", k) - } - rsk := &ResourceStateKey{ - Mode: mode, - Type: parts[0], - Name: parts[1], - Index: -1, - } - if len(parts) == 3 { - index, err := strconv.Atoi(parts[2]) - if err != nil { - return nil, fmt.Errorf("Malformed resource state key index: %s", k) - } - rsk.Index = index - } - return rsk, nil -} - -// ResourceState holds the state of a resource that is used so that -// a provider can find and manage an existing resource as well as for -// storing attributes that are used to populate variables of child -// resources. -// -// Attributes has attributes about the created resource that are -// queryable in interpolation: "${type.id.attr}" -// -// Extra is just extra data that a provider can return that we store -// for later, but is not exposed in any way to the user. -type ResourceState struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *InstanceState `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*InstanceState `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` - - mu sync.Mutex -} - -func (s *ResourceState) Lock() { s.mu.Lock() } -func (s *ResourceState) Unlock() { s.mu.Unlock() } - -// Equal tests whether two ResourceStates are equal. -func (s *ResourceState) Equal(other *ResourceState) bool { - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Provider != other.Provider { - return false - } - - // Dependencies must be equal - sort.Strings(s.Dependencies) - sort.Strings(other.Dependencies) - if len(s.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range s.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // States must be equal - if !s.Primary.Equal(other.Primary) { - return false - } - - return true -} - -// Taint marks a resource as tainted. -func (s *ResourceState) Taint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = true - } -} - -// Untaint unmarks a resource as tainted. -func (s *ResourceState) Untaint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = false - } -} - -// ProviderAddr returns the provider address for the receiver, by parsing the -// string representation saved in state. An error can be returned if the -// value in state is corrupt. -func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { - var diags tfdiags.Diagnostics - - str := s.Provider - traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(travDiags) - if travDiags.HasErrors() { - return addrs.AbsProviderConfig{}, diags.Err() - } - - addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags.Err() -} - -func (s *ResourceState) init() { - s.Lock() - defer s.Unlock() - - if s.Primary == nil { - s.Primary = &InstanceState{} - } - s.Primary.init() - - if s.Dependencies == nil { - s.Dependencies = []string{} - } - - if s.Deposed == nil { - s.Deposed = make([]*InstanceState, 0) - } -} - -func (s *ResourceState) deepcopy() *ResourceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*ResourceState) -} - -// prune is used to remove any instances that are no longer required -func (s *ResourceState) prune() { - s.Lock() - defer s.Unlock() - - n := len(s.Deposed) - for i := 0; i < n; i++ { - inst := s.Deposed[i] - if inst == nil || inst.ID == "" { - copy(s.Deposed[i:], s.Deposed[i+1:]) - s.Deposed[n-1] = nil - n-- - i-- - } - } - s.Deposed = s.Deposed[:n] - - s.Dependencies = uniqueStrings(s.Dependencies) -} - -func (s *ResourceState) sort() { - s.Lock() - defer s.Unlock() - - sort.Strings(s.Dependencies) -} - -func (s *ResourceState) String() string { - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) - return buf.String() -} - -// InstanceState is used to track the unique state information belonging -// to a given instance. -type InstanceState struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral EphemeralState `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - ProviderMeta cty.Value - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` - - mu sync.Mutex -} - -func (s *InstanceState) Lock() { s.mu.Lock() } -func (s *InstanceState) Unlock() { s.mu.Unlock() } - -func (s *InstanceState) init() { - s.Lock() - defer s.Unlock() - - if s.Attributes == nil { - s.Attributes = make(map[string]string) - } - if s.Meta == nil { - s.Meta = make(map[string]interface{}) - } - s.Ephemeral.init() -} - -// NewInstanceStateShimmedFromValue is a shim method to lower a new-style -// object value representing the attributes of an instance object into the -// legacy InstanceState representation. -// -// This is for shimming to old components only and should not be used in new code. -func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { - attrs := hcl2shim.FlatmapValueFromHCL2(state) - return &InstanceState{ - ID: attrs["id"], - Attributes: attrs, - Meta: map[string]interface{}{ - "schema_version": schemaVersion, - }, - } -} - -// AttrsAsObjectValue shims from the legacy InstanceState representation to -// a new-style cty object value representation of the state attributes, using -// the given type for guidance. -// -// The given type must be the implied type of the schema of the resource type -// of the object whose state is being converted, or the result is undefined. -// -// This is for shimming from old components only and should not be used in -// new code. -func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { - if s == nil { - // if the state is nil, we need to construct a complete cty.Value with - // null attributes, rather than a single cty.NullVal(ty) - s = &InstanceState{} - } - - if s.Attributes == nil { - s.Attributes = map[string]string{} - } - - // make sure ID is included in the attributes. The InstanceState.ID value - // takes precedence. - if s.ID != "" { - s.Attributes["id"] = s.ID - } - - return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) -} - -// Copy all the Fields from another InstanceState -func (s *InstanceState) Set(from *InstanceState) { - s.Lock() - defer s.Unlock() - - from.Lock() - defer from.Unlock() - - s.ID = from.ID - s.Attributes = from.Attributes - s.Ephemeral = from.Ephemeral - s.Meta = from.Meta - s.Tainted = from.Tainted -} - -func (s *InstanceState) DeepCopy() *InstanceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*InstanceState) -} - -func (s *InstanceState) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return s.ID == "" -} - -func (s *InstanceState) Equal(other *InstanceState) bool { - // Short circuit some nil checks - if s == nil || other == nil { - return s == other - } - s.Lock() - defer s.Unlock() - - // IDs must be equal - if s.ID != other.ID { - return false - } - - // Attributes must be equal - if len(s.Attributes) != len(other.Attributes) { - return false - } - for k, v := range s.Attributes { - otherV, ok := other.Attributes[k] - if !ok { - return false - } - - if v != otherV { - return false - } - } - - // Meta must be equal - if len(s.Meta) != len(other.Meta) { - return false - } - if s.Meta != nil && other.Meta != nil { - // We only do the deep check if both are non-nil. If one is nil - // we treat it as equal since their lengths are both zero (check - // above). - // - // Since this can contain numeric values that may change types during - // serialization, let's compare the serialized values. - sMeta, err := json.Marshal(s.Meta) - if err != nil { - // marshaling primitives shouldn't ever error out - panic(err) - } - otherMeta, err := json.Marshal(other.Meta) - if err != nil { - panic(err) - } - - if !bytes.Equal(sMeta, otherMeta) { - return false - } - } - - if s.Tainted != other.Tainted { - return false - } - - return true -} - -// MergeDiff takes a ResourceDiff and merges the attributes into -// this resource state in order to generate a new state. This new -// state can be used to provide updated attribute lookups for -// variable interpolation. -// -// If the diff attribute requires computing the value, and hence -// won't be available until apply, the value is replaced with the -// computeID. -func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { - result := s.DeepCopy() - if result == nil { - result = new(InstanceState) - } - result.init() - - if s != nil { - s.Lock() - defer s.Unlock() - for k, v := range s.Attributes { - result.Attributes[k] = v - } - } - if d != nil { - for k, diff := range d.CopyAttributes() { - if diff.NewRemoved { - delete(result.Attributes, k) - continue - } - if diff.NewComputed { - result.Attributes[k] = hcl2shim.UnknownVariableValue - continue - } - - result.Attributes[k] = diff.New - } - } - - return result -} - -func (s *InstanceState) String() string { - notCreated := "" - - if s == nil { - return notCreated - } - - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - - if s.ID == "" { - return notCreated - } - - buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) - - attributes := s.Attributes - attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) - } - - buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) - - return buf.String() -} - -// EphemeralState is used for transient state that is only kept in-memory -type EphemeralState struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` - - // Type is used to specify the resource type for this instance. This is only - // required for import operations (as documented). If the documentation - // doesn't state that you need to set this, then don't worry about - // setting it. - Type string `json:"-"` -} - -func (e *EphemeralState) init() { - if e.ConnInfo == nil { - e.ConnInfo = make(map[string]string) - } -} - -func (e *EphemeralState) DeepCopy() *EphemeralState { - copy, err := copystructure.Config{Lock: true}.Copy(e) - if err != nil { - panic(err) - } - - return copy.(*EphemeralState) -} - -type jsonStateVersionIdentifier struct { - Version int `json:"version"` -} - -// Check if this is a V0 format - the magic bytes at the start of the file -// should be "tfstate" if so. We no longer support upgrading this type of -// state but return an error message explaining to a user how they can -// upgrade via the 0.6.x series. -func testForV0State(buf *bufio.Reader) error { - start, err := buf.Peek(len("tfstate")) - if err != nil { - return fmt.Errorf("Failed to check for magic bytes: %v", err) - } - if string(start) == "tfstate" { - return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + - "format which was used prior to Terraform 0.3. Please upgrade\n" + - "this state file using Terraform 0.6.16 prior to using it with\n" + - "Terraform 0.7.") - } - - return nil -} - -// ErrNoState is returned by ReadState when the io.Reader contains no data -var ErrNoState = errors.New("no state") - -// ReadState reads a state structure out of a reader in the format that -// was written by WriteState. -func ReadState(src io.Reader) (*State, error) { - // check for a nil file specifically, since that produces a platform - // specific error if we try to use it in a bufio.Reader. - if f, ok := src.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - buf := bufio.NewReader(src) - - if _, err := buf.Peek(1); err != nil { - if err == io.EOF { - return nil, ErrNoState - } - return nil, err - } - - if err := testForV0State(buf); err != nil { - return nil, err - } - - // If we are JSON we buffer the whole thing in memory so we can read it twice. - // This is suboptimal, but will work for now. - jsonBytes, err := ioutil.ReadAll(buf) - if err != nil { - return nil, fmt.Errorf("Reading state file failed: %v", err) - } - - versionIdentifier := &jsonStateVersionIdentifier{} - if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { - return nil, fmt.Errorf("Decoding state file version failed: %v", err) - } - - var result *State - switch versionIdentifier.Version { - case 0: - return nil, fmt.Errorf("State version 0 is not supported as JSON.") - case 1: - v1State, err := ReadStateV1(jsonBytes) - if err != nil { - return nil, err - } - - v2State, err := upgradeStateV1ToV2(v1State) - if err != nil { - return nil, err - } - - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - // increment the Serial whenever we upgrade state - v3State.Serial++ - result = v3State - case 2: - v2State, err := ReadStateV2(jsonBytes) - if err != nil { - return nil, err - } - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - v3State.Serial++ - result = v3State - case 3: - v3State, err := ReadStateV3(jsonBytes) - if err != nil { - return nil, err - } - - result = v3State - default: - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), versionIdentifier.Version) - } - - // If we reached this place we must have a result set - if result == nil { - panic("resulting state in load not set, assertion failed") - } - - // Prune the state when read it. Its possible to write unpruned states or - // for a user to make a state unpruned (nil-ing a module state for example). - result.prune() - - // Validate the state file is valid - if err := result.Validate(); err != nil { - return nil, err - } - - return result, nil -} - -func ReadStateV1(jsonBytes []byte) (*stateV1, error) { - v1State := &stateV1{} - if err := json.Unmarshal(jsonBytes, v1State); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - if v1State.Version != 1 { - return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ - "read %d, expected 1", v1State.Version) - } - - return v1State, nil -} - -func ReadStateV2(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - return state, nil -} - -func ReadStateV3(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - // Now we write the state back out to detect any changes in normaliztion. - // If our state is now written out differently, bump the serial number to - // prevent conflicts. - var buf bytes.Buffer - err := WriteState(state, &buf) - if err != nil { - return nil, err - } - - if !bytes.Equal(jsonBytes, buf.Bytes()) { - log.Println("[INFO] state modified during read or write. incrementing serial number") - state.Serial++ - } - - return state, nil -} - -// WriteState writes a state somewhere in a binary format. -func WriteState(d *State, dst io.Writer) error { - // writing a nil state is a noop. - if d == nil { - return nil - } - - // make sure we have no uninitialized fields - d.init() - - // Make sure it is sorted - d.sort() - - // Ensure the version is set - d.Version = StateVersion - - // If the TFVersion is set, verify it. We used to just set the version - // here, but this isn't safe since it changes the MD5 sum on some remote - // state storage backends such as Atlas. We now leave it be if needed. - if d.TFVersion != "" { - if _, err := version.NewVersion(d.TFVersion); err != nil { - return fmt.Errorf( - "Error writing state, invalid version: %s\n\n"+ - "The Terraform version when writing the state must be a semantic\n"+ - "version.", - d.TFVersion) - } - } - - // Encode the data in a human-friendly way - data, err := json.MarshalIndent(d, "", " ") - if err != nil { - return fmt.Errorf("Failed to encode state: %s", err) - } - - // We append a newline to the data because MarshalIndent doesn't - data = append(data, '\n') - - // Write the data out to the dst - if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { - return fmt.Errorf("Failed to write state: %v", err) - } - - return nil -} - -// resourceNameSort implements the sort.Interface to sort name parts lexically for -// strings and numerically for integer indexes. -type resourceNameSort []string - -func (r resourceNameSort) Len() int { return len(r) } -func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } - -func (r resourceNameSort) Less(i, j int) bool { - iParts := strings.Split(r[i], ".") - jParts := strings.Split(r[j], ".") - - end := len(iParts) - if len(jParts) < end { - end = len(jParts) - } - - for idx := 0; idx < end; idx++ { - if iParts[idx] == jParts[idx] { - continue - } - - // sort on the first non-matching part - iInt, iIntErr := strconv.Atoi(iParts[idx]) - jInt, jIntErr := strconv.Atoi(jParts[idx]) - - switch { - case iIntErr == nil && jIntErr == nil: - // sort numerically if both parts are integers - return iInt < jInt - case iIntErr == nil: - // numbers sort before strings - return true - case jIntErr == nil: - return false - default: - return iParts[idx] < jParts[idx] - } - } - - return r[i] < r[j] -} - -// moduleStateSort implements sort.Interface to sort module states -type moduleStateSort []*ModuleState - -func (s moduleStateSort) Len() int { - return len(s) -} - -func (s moduleStateSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If either is nil, then the nil one is "less" than - if a == nil || b == nil { - return a == nil - } - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} - -func (s moduleStateSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -const stateValidateErrMultiModule = ` -Multiple modules with the same path: %s - -This means that there are multiple entries in the "modules" field -in your state file that point to the same module. This will cause Terraform -to behave in unexpected and error prone ways and is invalid. Please back up -and modify your state file manually to resolve this. -` diff --git a/internal/legacy/terraform/state_test.go b/internal/legacy/terraform/state_test.go deleted file mode 100644 index 1edbfb6912fd..000000000000 --- a/internal/legacy/terraform/state_test.go +++ /dev/null @@ -1,1894 +0,0 @@ -package terraform - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - "reflect" - "sort" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" -) - -func TestStateValidate(t *testing.T) { - cases := map[string]struct { - In *State - Err bool - }{ - "empty state": { - &State{}, - false, - }, - - "multiple modules": { - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo"}, - }, - &ModuleState{ - Path: []string{"root", "foo"}, - }, - }, - }, - true, - }, - } - - for name, tc := range cases { - // Init the state - tc.In.init() - - err := tc.In.Validate() - if (err != nil) != tc.Err { - t.Fatalf("%s: err: %s", name, err) - } - } -} - -func TestStateAddModule(t *testing.T) { - cases := []struct { - In []addrs.ModuleInstance - Out [][]string - }{ - { - []addrs.ModuleInstance{ - addrs.RootModuleInstance, - addrs.RootModuleInstance.Child("child", addrs.NoKey), - }, - [][]string{ - []string{"root"}, - []string{"root", "child"}, - }, - }, - - { - []addrs.ModuleInstance{ - addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("bar", addrs.NoKey), - addrs.RootModuleInstance.Child("foo", addrs.NoKey), - addrs.RootModuleInstance, - addrs.RootModuleInstance.Child("bar", addrs.NoKey), - }, - [][]string{ - []string{"root"}, - []string{"root", "bar"}, - []string{"root", "foo"}, - []string{"root", "foo", "bar"}, - }, - }, - // Same last element, different middle element - { - []addrs.ModuleInstance{ - addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("bar", addrs.NoKey), // This one should sort after... - addrs.RootModuleInstance.Child("foo", addrs.NoKey), - addrs.RootModuleInstance, - addrs.RootModuleInstance.Child("bar", addrs.NoKey).Child("bar", addrs.NoKey), // ...this one. - addrs.RootModuleInstance.Child("bar", addrs.NoKey), - }, - [][]string{ - []string{"root"}, - []string{"root", "bar"}, - []string{"root", "foo"}, - []string{"root", "bar", "bar"}, - []string{"root", "foo", "bar"}, - }, - }, - } - - for _, tc := range cases { - s := new(State) - for _, p := range tc.In { - s.AddModule(p) - } - - actual := make([][]string, 0, len(tc.In)) - for _, m := range s.Modules { - actual = append(actual, m.Path) - } - - if !reflect.DeepEqual(actual, tc.Out) { - t.Fatalf("wrong result\ninput: %sgot: %#v\nwant: %#v", spew.Sdump(tc.In), actual, tc.Out) - } - } -} - -func TestStateOutputTypeRoundTrip(t *testing.T) { - state := &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Outputs: map[string]*OutputState{ - "string_output": &OutputState{ - Value: "String Value", - Type: "string", - }, - }, - }, - }, - } - state.init() - - buf := new(bytes.Buffer) - if err := WriteState(state, buf); err != nil { - t.Fatalf("err: %s", err) - } - - roundTripped, err := ReadState(buf) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !reflect.DeepEqual(state, roundTripped) { - t.Logf("expected:\n%#v", state) - t.Fatalf("got:\n%#v", roundTripped) - } -} - -func TestStateDeepCopy(t *testing.T) { - cases := []struct { - State *State - }{ - // Nil - {nil}, - - // Version - { - &State{Version: 5}, - }, - // TFVersion - { - &State{TFVersion: "5"}, - }, - // Modules - { - &State{ - Version: 6, - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Primary: &InstanceState{ - Meta: map[string]interface{}{}, - }, - }, - }, - }, - }, - }, - }, - // Deposed - // The nil values shouldn't be there if the State was properly init'ed, - // but the Copy should still work anyway. - { - &State{ - Version: 6, - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Primary: &InstanceState{ - Meta: map[string]interface{}{}, - }, - Deposed: []*InstanceState{ - {ID: "test"}, - nil, - }, - }, - }, - }, - }, - }, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("copy-%d", i), func(t *testing.T) { - actual := tc.State.DeepCopy() - expected := tc.State - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("Expected: %#v\nRecevied: %#v\n", expected, actual) - } - }) - } -} - -func TestStateEqual(t *testing.T) { - cases := []struct { - Name string - Result bool - One, Two *State - }{ - // Nils - { - "one nil", - false, - nil, - &State{Version: 2}, - }, - - { - "both nil", - true, - nil, - nil, - }, - - // Different versions - { - "different state versions", - false, - &State{Version: 5}, - &State{Version: 2}, - }, - - // Different modules - { - "different module states", - false, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - }, - }, - }, - &State{}, - }, - - { - "same module states", - true, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - }, - }, - }, - }, - - // Meta differs - { - "differing meta values with primitives", - false, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Primary: &InstanceState{ - Meta: map[string]interface{}{ - "schema_version": "1", - }, - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Primary: &InstanceState{ - Meta: map[string]interface{}{ - "schema_version": "2", - }, - }, - }, - }, - }, - }, - }, - }, - - // Meta with complex types - { - "same meta with complex types", - true, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Primary: &InstanceState{ - Meta: map[string]interface{}{ - "timeouts": map[string]interface{}{ - "create": 42, - "read": "27", - }, - }, - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Primary: &InstanceState{ - Meta: map[string]interface{}{ - "timeouts": map[string]interface{}{ - "create": 42, - "read": "27", - }, - }, - }, - }, - }, - }, - }, - }, - }, - - // Meta with complex types that have been altered during serialization - { - "same meta with complex types that have been json-ified", - true, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Primary: &InstanceState{ - Meta: map[string]interface{}{ - "timeouts": map[string]interface{}{ - "create": int(42), - "read": "27", - }, - }, - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Primary: &InstanceState{ - Meta: map[string]interface{}{ - "timeouts": map[string]interface{}{ - "create": float64(42), - "read": "27", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - if tc.One.Equal(tc.Two) != tc.Result { - t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) - } - if tc.Two.Equal(tc.One) != tc.Result { - t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) - } - }) - } -} - -func TestStateCompareAges(t *testing.T) { - cases := []struct { - Result StateAgeComparison - Err bool - One, Two *State - }{ - { - StateAgeEqual, false, - &State{ - Lineage: "1", - Serial: 2, - }, - &State{ - Lineage: "1", - Serial: 2, - }, - }, - { - StateAgeReceiverOlder, false, - &State{ - Lineage: "1", - Serial: 2, - }, - &State{ - Lineage: "1", - Serial: 3, - }, - }, - { - StateAgeReceiverNewer, false, - &State{ - Lineage: "1", - Serial: 3, - }, - &State{ - Lineage: "1", - Serial: 2, - }, - }, - { - StateAgeEqual, true, - &State{ - Lineage: "1", - Serial: 2, - }, - &State{ - Lineage: "2", - Serial: 2, - }, - }, - { - StateAgeEqual, true, - &State{ - Lineage: "1", - Serial: 3, - }, - &State{ - Lineage: "2", - Serial: 2, - }, - }, - } - - for i, tc := range cases { - result, err := tc.One.CompareAges(tc.Two) - - if err != nil && !tc.Err { - t.Errorf( - "%d: got error, but want success\n\n%s\n\n%s", - i, tc.One, tc.Two, - ) - continue - } - - if err == nil && tc.Err { - t.Errorf( - "%d: got success, but want error\n\n%s\n\n%s", - i, tc.One, tc.Two, - ) - continue - } - - if result != tc.Result { - t.Errorf( - "%d: got result %d, but want %d\n\n%s\n\n%s", - i, result, tc.Result, tc.One, tc.Two, - ) - continue - } - } -} - -func TestStateSameLineage(t *testing.T) { - cases := []struct { - Result bool - One, Two *State - }{ - { - true, - &State{ - Lineage: "1", - }, - &State{ - Lineage: "1", - }, - }, - { - // Empty lineage is compatible with all - true, - &State{ - Lineage: "", - }, - &State{ - Lineage: "1", - }, - }, - { - // Empty lineage is compatible with all - true, - &State{ - Lineage: "1", - }, - &State{ - Lineage: "", - }, - }, - { - false, - &State{ - Lineage: "1", - }, - &State{ - Lineage: "2", - }, - }, - } - - for i, tc := range cases { - result := tc.One.SameLineage(tc.Two) - - if result != tc.Result { - t.Errorf( - "%d: got %v, but want %v\n\n%s\n\n%s", - i, result, tc.Result, tc.One, tc.Two, - ) - continue - } - } -} - -func TestStateMarshalEqual(t *testing.T) { - tests := map[string]struct { - S1, S2 *State - Want bool - }{ - "both nil": { - nil, - nil, - true, - }, - "first zero, second nil": { - &State{}, - nil, - false, - }, - "first nil, second zero": { - nil, - &State{}, - false, - }, - "both zero": { - // These are not equal because they both implicitly init with - // different lineage. - &State{}, - &State{}, - false, - }, - "both set, same lineage": { - &State{ - Lineage: "abc123", - }, - &State{ - Lineage: "abc123", - }, - true, - }, - "both set, same lineage, different serial": { - &State{ - Lineage: "abc123", - Serial: 1, - }, - &State{ - Lineage: "abc123", - Serial: 2, - }, - false, - }, - "both set, same lineage, same serial, same resources": { - &State{ - Lineage: "abc123", - Serial: 1, - Modules: []*ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "foo_bar.baz": {}, - }, - }, - }, - }, - &State{ - Lineage: "abc123", - Serial: 1, - Modules: []*ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "foo_bar.baz": {}, - }, - }, - }, - }, - true, - }, - "both set, same lineage, same serial, different resources": { - &State{ - Lineage: "abc123", - Serial: 1, - Modules: []*ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "foo_bar.baz": {}, - }, - }, - }, - }, - &State{ - Lineage: "abc123", - Serial: 1, - Modules: []*ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "pizza_crust.tasty": {}, - }, - }, - }, - }, - false, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - got := test.S1.MarshalEqual(test.S2) - if got != test.Want { - t.Errorf("wrong result %#v; want %#v", got, test.Want) - s1Buf := &bytes.Buffer{} - s2Buf := &bytes.Buffer{} - _ = WriteState(test.S1, s1Buf) - _ = WriteState(test.S2, s2Buf) - t.Logf("\nState 1: %s\nState 2: %s", s1Buf.Bytes(), s2Buf.Bytes()) - } - }) - } -} - -func TestStateRemove(t *testing.T) { - cases := map[string]struct { - Address string - One, Two *State - }{ - "simple resource": { - "test_instance.foo", - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - "single instance": { - "test_instance.foo.primary", - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{}, - }, - }, - }, - }, - - "single instance in multi-count": { - "test_instance.foo[0]", - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo.0": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.foo.1": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo.1": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - "single resource, multi-count": { - "test_instance.foo", - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo.0": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.foo.1": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{}, - }, - }, - }, - }, - - "full module": { - "module.foo", - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &ModuleState{ - Path: []string{"root", "foo"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - "module and children": { - "module.foo", - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &ModuleState{ - Path: []string{"root", "foo"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &ModuleState{ - Path: []string{"root", "foo", "bar"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - } - - for k, tc := range cases { - if err := tc.One.Remove(tc.Address); err != nil { - t.Fatalf("bad: %s\n\n%s", k, err) - } - - if !tc.One.Equal(tc.Two) { - t.Fatalf("Bad: %s\n\n%s\n\n%s", k, tc.One.String(), tc.Two.String()) - } - } -} - -func TestResourceStateEqual(t *testing.T) { - cases := []struct { - Result bool - One, Two *ResourceState - }{ - // Different types - { - false, - &ResourceState{Type: "foo"}, - &ResourceState{Type: "bar"}, - }, - - // Different dependencies - { - false, - &ResourceState{Dependencies: []string{"foo"}}, - &ResourceState{Dependencies: []string{"bar"}}, - }, - - { - false, - &ResourceState{Dependencies: []string{"foo", "bar"}}, - &ResourceState{Dependencies: []string{"foo"}}, - }, - - { - true, - &ResourceState{Dependencies: []string{"bar", "foo"}}, - &ResourceState{Dependencies: []string{"foo", "bar"}}, - }, - - // Different primaries - { - false, - &ResourceState{Primary: nil}, - &ResourceState{Primary: &InstanceState{ID: "foo"}}, - }, - - { - true, - &ResourceState{Primary: &InstanceState{ID: "foo"}}, - &ResourceState{Primary: &InstanceState{ID: "foo"}}, - }, - - // Different tainted - { - false, - &ResourceState{ - Primary: &InstanceState{ - ID: "foo", - }, - }, - &ResourceState{ - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - }, - - { - true, - &ResourceState{ - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - &ResourceState{ - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - }, - } - - for i, tc := range cases { - if tc.One.Equal(tc.Two) != tc.Result { - t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) - } - if tc.Two.Equal(tc.One) != tc.Result { - t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) - } - } -} - -func TestResourceStateTaint(t *testing.T) { - cases := map[string]struct { - Input *ResourceState - Output *ResourceState - }{ - "no primary": { - &ResourceState{}, - &ResourceState{}, - }, - - "primary, not tainted": { - &ResourceState{ - Primary: &InstanceState{ID: "foo"}, - }, - &ResourceState{ - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - }, - - "primary, tainted": { - &ResourceState{ - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - &ResourceState{ - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - }, - } - - for k, tc := range cases { - tc.Input.Taint() - if !reflect.DeepEqual(tc.Input, tc.Output) { - t.Fatalf( - "Failure: %s\n\nExpected: %#v\n\nGot: %#v", - k, tc.Output, tc.Input) - } - } -} - -func TestResourceStateUntaint(t *testing.T) { - cases := map[string]struct { - Input *ResourceState - ExpectedOutput *ResourceState - }{ - "no primary, err": { - Input: &ResourceState{}, - ExpectedOutput: &ResourceState{}, - }, - - "primary, not tainted": { - Input: &ResourceState{ - Primary: &InstanceState{ID: "foo"}, - }, - ExpectedOutput: &ResourceState{ - Primary: &InstanceState{ID: "foo"}, - }, - }, - "primary, tainted": { - Input: &ResourceState{ - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - ExpectedOutput: &ResourceState{ - Primary: &InstanceState{ID: "foo"}, - }, - }, - } - - for k, tc := range cases { - tc.Input.Untaint() - if !reflect.DeepEqual(tc.Input, tc.ExpectedOutput) { - t.Fatalf( - "Failure: %s\n\nExpected: %#v\n\nGot: %#v", - k, tc.ExpectedOutput, tc.Input) - } - } -} - -func TestInstanceStateEmpty(t *testing.T) { - cases := map[string]struct { - In *InstanceState - Result bool - }{ - "nil is empty": { - nil, - true, - }, - "non-nil but without ID is empty": { - &InstanceState{}, - true, - }, - "with ID is not empty": { - &InstanceState{ - ID: "i-abc123", - }, - false, - }, - } - - for tn, tc := range cases { - if tc.In.Empty() != tc.Result { - t.Fatalf("%q expected %#v to be empty: %#v", tn, tc.In, tc.Result) - } - } -} - -func TestInstanceStateEqual(t *testing.T) { - cases := []struct { - Result bool - One, Two *InstanceState - }{ - // Nils - { - false, - nil, - &InstanceState{}, - }, - - { - false, - &InstanceState{}, - nil, - }, - - // Different IDs - { - false, - &InstanceState{ID: "foo"}, - &InstanceState{ID: "bar"}, - }, - - // Different Attributes - { - false, - &InstanceState{Attributes: map[string]string{"foo": "bar"}}, - &InstanceState{Attributes: map[string]string{"foo": "baz"}}, - }, - - // Different Attribute keys - { - false, - &InstanceState{Attributes: map[string]string{"foo": "bar"}}, - &InstanceState{Attributes: map[string]string{"bar": "baz"}}, - }, - - { - false, - &InstanceState{Attributes: map[string]string{"bar": "baz"}}, - &InstanceState{Attributes: map[string]string{"foo": "bar"}}, - }, - } - - for i, tc := range cases { - if tc.One.Equal(tc.Two) != tc.Result { - t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) - } - } -} - -func TestStateEmpty(t *testing.T) { - cases := []struct { - In *State - Result bool - }{ - { - nil, - true, - }, - { - &State{}, - true, - }, - { - &State{ - Remote: &RemoteState{Type: "foo"}, - }, - true, - }, - { - &State{ - Modules: []*ModuleState{ - &ModuleState{}, - }, - }, - false, - }, - } - - for i, tc := range cases { - if tc.In.Empty() != tc.Result { - t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) - } - } -} - -func TestStateHasResources(t *testing.T) { - cases := []struct { - In *State - Result bool - }{ - { - nil, - false, - }, - { - &State{}, - false, - }, - { - &State{ - Remote: &RemoteState{Type: "foo"}, - }, - false, - }, - { - &State{ - Modules: []*ModuleState{ - &ModuleState{}, - }, - }, - false, - }, - { - &State{ - Modules: []*ModuleState{ - &ModuleState{}, - &ModuleState{}, - }, - }, - false, - }, - { - &State{ - Modules: []*ModuleState{ - &ModuleState{}, - &ModuleState{ - Resources: map[string]*ResourceState{ - "foo.foo": &ResourceState{}, - }, - }, - }, - }, - true, - }, - } - - for i, tc := range cases { - if tc.In.HasResources() != tc.Result { - t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) - } - } -} - -func TestStateFromFutureTerraform(t *testing.T) { - cases := []struct { - In string - Result bool - }{ - { - "", - false, - }, - { - "0.1", - false, - }, - { - "999.15.1", - true, - }, - } - - for _, tc := range cases { - state := &State{TFVersion: tc.In} - actual := state.FromFutureTerraform() - if actual != tc.Result { - t.Fatalf("%s: bad: %v", tc.In, actual) - } - } -} - -func TestStateIsRemote(t *testing.T) { - cases := []struct { - In *State - Result bool - }{ - { - nil, - false, - }, - { - &State{}, - false, - }, - { - &State{ - Remote: &RemoteState{Type: "foo"}, - }, - true, - }, - } - - for i, tc := range cases { - if tc.In.IsRemote() != tc.Result { - t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) - } - } -} - -func TestInstanceState_MergeDiff(t *testing.T) { - is := InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "foo": "bar", - "port": "8000", - }, - } - - diff := &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "bar", - New: "baz", - }, - "bar": &ResourceAttrDiff{ - Old: "", - New: "foo", - }, - "baz": &ResourceAttrDiff{ - Old: "", - New: "foo", - NewComputed: true, - }, - "port": &ResourceAttrDiff{ - NewRemoved: true, - }, - }, - } - - is2 := is.MergeDiff(diff) - - expected := map[string]string{ - "foo": "baz", - "bar": "foo", - "baz": hcl2shim.UnknownVariableValue, - } - - if !reflect.DeepEqual(expected, is2.Attributes) { - t.Fatalf("bad: %#v", is2.Attributes) - } -} - -// GH-12183. This tests that a list with a computed set generates the -// right partial state. This never failed but is put here for completion -// of the test case for GH-12183. -func TestInstanceState_MergeDiff_computedSet(t *testing.T) { - is := InstanceState{} - - diff := &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "config.#": &ResourceAttrDiff{ - Old: "0", - New: "1", - RequiresNew: true, - }, - - "config.0.name": &ResourceAttrDiff{ - Old: "", - New: "hello", - }, - - "config.0.rules.#": &ResourceAttrDiff{ - Old: "", - NewComputed: true, - }, - }, - } - - is2 := is.MergeDiff(diff) - - expected := map[string]string{ - "config.#": "1", - "config.0.name": "hello", - "config.0.rules.#": hcl2shim.UnknownVariableValue, - } - - if !reflect.DeepEqual(expected, is2.Attributes) { - t.Fatalf("bad: %#v", is2.Attributes) - } -} - -func TestInstanceState_MergeDiff_nil(t *testing.T) { - var is *InstanceState - - diff := &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "foo": &ResourceAttrDiff{ - Old: "", - New: "baz", - }, - }, - } - - is2 := is.MergeDiff(diff) - - expected := map[string]string{ - "foo": "baz", - } - - if !reflect.DeepEqual(expected, is2.Attributes) { - t.Fatalf("bad: %#v", is2.Attributes) - } -} - -func TestInstanceState_MergeDiff_nilDiff(t *testing.T) { - is := InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "foo": "bar", - }, - } - - is2 := is.MergeDiff(nil) - - expected := map[string]string{ - "foo": "bar", - } - - if !reflect.DeepEqual(expected, is2.Attributes) { - t.Fatalf("bad: %#v", is2.Attributes) - } -} - -func TestReadWriteState(t *testing.T) { - state := &State{ - Serial: 9, - Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", - Remote: &RemoteState{ - Type: "http", - Config: map[string]string{ - "url": "http://my-cool-server.com/", - }, - }, - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Dependencies: []string{ - "aws_instance.bar", - }, - Resources: map[string]*ResourceState{ - "foo": &ResourceState{ - Primary: &InstanceState{ - ID: "bar", - Ephemeral: EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "root", - "password": "supersecret", - }, - }, - }, - }, - }, - }, - }, - } - state.init() - - buf := new(bytes.Buffer) - if err := WriteState(state, buf); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify that the version and serial are set - if state.Version != StateVersion { - t.Fatalf("bad version number: %d", state.Version) - } - - actual, err := ReadState(buf) - if err != nil { - t.Fatalf("err: %s", err) - } - - // ReadState should not restore sensitive information! - mod := state.RootModule() - mod.Resources["foo"].Primary.Ephemeral = EphemeralState{} - mod.Resources["foo"].Primary.Ephemeral.init() - - if !reflect.DeepEqual(actual, state) { - t.Logf("expected:\n%#v", state) - t.Fatalf("got:\n%#v", actual) - } -} - -func TestReadStateNewVersion(t *testing.T) { - type out struct { - Version int - } - - buf, err := json.Marshal(&out{StateVersion + 1}) - if err != nil { - t.Fatalf("err: %v", err) - } - - s, err := ReadState(bytes.NewReader(buf)) - if s != nil { - t.Fatalf("unexpected: %#v", s) - } - if !strings.Contains(err.Error(), "does not support state version") { - t.Fatalf("err: %v", err) - } -} - -func TestReadStateEmptyOrNilFile(t *testing.T) { - var emptyState bytes.Buffer - _, err := ReadState(&emptyState) - if err != ErrNoState { - t.Fatal("expected ErrNostate, got", err) - } - - var nilFile *os.File - _, err = ReadState(nilFile) - if err != ErrNoState { - t.Fatal("expected ErrNostate, got", err) - } -} - -func TestReadStateTFVersion(t *testing.T) { - type tfVersion struct { - Version int `json:"version"` - TFVersion string `json:"terraform_version"` - } - - cases := []struct { - Written string - Read string - Err bool - }{ - { - "0.0.0", - "0.0.0", - false, - }, - { - "", - "", - false, - }, - { - "bad", - "", - true, - }, - } - - for _, tc := range cases { - buf, err := json.Marshal(&tfVersion{ - Version: 2, - TFVersion: tc.Written, - }) - if err != nil { - t.Fatalf("err: %v", err) - } - - s, err := ReadState(bytes.NewReader(buf)) - if (err != nil) != tc.Err { - t.Fatalf("%s: err: %s", tc.Written, err) - } - if err != nil { - continue - } - - if s.TFVersion != tc.Read { - t.Fatalf("%s: bad: %s", tc.Written, s.TFVersion) - } - } -} - -func TestWriteStateTFVersion(t *testing.T) { - cases := []struct { - Write string - Read string - Err bool - }{ - { - "0.0.0", - "0.0.0", - false, - }, - { - "", - "", - false, - }, - { - "bad", - "", - true, - }, - } - - for _, tc := range cases { - var buf bytes.Buffer - err := WriteState(&State{TFVersion: tc.Write}, &buf) - if (err != nil) != tc.Err { - t.Fatalf("%s: err: %s", tc.Write, err) - } - if err != nil { - continue - } - - s, err := ReadState(&buf) - if err != nil { - t.Fatalf("%s: err: %s", tc.Write, err) - } - - if s.TFVersion != tc.Read { - t.Fatalf("%s: bad: %s", tc.Write, s.TFVersion) - } - } -} - -func TestParseResourceStateKey(t *testing.T) { - cases := []struct { - Input string - Expected *ResourceStateKey - ExpectedErr bool - }{ - { - Input: "aws_instance.foo.3", - Expected: &ResourceStateKey{ - Mode: ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - Index: 3, - }, - }, - { - Input: "aws_instance.foo.0", - Expected: &ResourceStateKey{ - Mode: ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - Index: 0, - }, - }, - { - Input: "aws_instance.foo", - Expected: &ResourceStateKey{ - Mode: ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - Index: -1, - }, - }, - { - Input: "data.aws_ami.foo", - Expected: &ResourceStateKey{ - Mode: DataResourceMode, - Type: "aws_ami", - Name: "foo", - Index: -1, - }, - }, - { - Input: "aws_instance.foo.malformed", - ExpectedErr: true, - }, - { - Input: "aws_instance.foo.malformedwithnumber.123", - ExpectedErr: true, - }, - { - Input: "malformed", - ExpectedErr: true, - }, - } - for _, tc := range cases { - rsk, err := ParseResourceStateKey(tc.Input) - if rsk != nil && tc.Expected != nil && !rsk.Equal(tc.Expected) { - t.Fatalf("%s: expected %s, got %s", tc.Input, tc.Expected, rsk) - } - if (err != nil) != tc.ExpectedErr { - t.Fatalf("%s: expected err: %t, got %s", tc.Input, tc.ExpectedErr, err) - } - } -} - -func TestReadState_prune(t *testing.T) { - state := &State{ - Modules: []*ModuleState{ - &ModuleState{Path: rootModulePath}, - nil, - }, - } - state.init() - - buf := new(bytes.Buffer) - if err := WriteState(state, buf); err != nil { - t.Fatalf("err: %s", err) - } - - actual, err := ReadState(buf) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &State{ - Version: state.Version, - Lineage: state.Lineage, - } - expected.init() - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("got:\n%#v", actual) - } -} - -func TestReadState_pruneDependencies(t *testing.T) { - state := &State{ - Serial: 9, - Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", - Remote: &RemoteState{ - Type: "http", - Config: map[string]string{ - "url": "http://my-cool-server.com/", - }, - }, - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Dependencies: []string{ - "aws_instance.bar", - "aws_instance.bar", - }, - Resources: map[string]*ResourceState{ - "foo": &ResourceState{ - Dependencies: []string{ - "aws_instance.baz", - "aws_instance.baz", - }, - Primary: &InstanceState{ - ID: "bar", - }, - }, - }, - }, - }, - } - state.init() - - buf := new(bytes.Buffer) - if err := WriteState(state, buf); err != nil { - t.Fatalf("err: %s", err) - } - - actual, err := ReadState(buf) - if err != nil { - t.Fatalf("err: %s", err) - } - - // make sure the duplicate Dependencies are filtered - modDeps := actual.Modules[0].Dependencies - resourceDeps := actual.Modules[0].Resources["foo"].Dependencies - - if len(modDeps) > 1 || modDeps[0] != "aws_instance.bar" { - t.Fatalf("expected 1 module depends_on entry, got %q", modDeps) - } - - if len(resourceDeps) > 1 || resourceDeps[0] != "aws_instance.baz" { - t.Fatalf("expected 1 resource depends_on entry, got %q", resourceDeps) - } -} - -func TestReadState_bigHash(t *testing.T) { - expected := uint64(14885267135666261723) - s := strings.NewReader(`{"version": 3, "backend":{"hash":14885267135666261723}}`) - - actual, err := ReadState(s) - if err != nil { - t.Fatal(err) - } - - if actual.Backend.Hash != expected { - t.Fatalf("expected backend hash %d, got %d", expected, actual.Backend.Hash) - } -} - -func TestResourceNameSort(t *testing.T) { - names := []string{ - "a", - "b", - "a.0", - "a.c", - "a.d", - "c", - "a.b.0", - "a.b.1", - "a.b.10", - "a.b.2", - } - - sort.Sort(resourceNameSort(names)) - - expected := []string{ - "a", - "a.0", - "a.b.0", - "a.b.1", - "a.b.2", - "a.b.10", - "a.c", - "a.d", - "b", - "c", - } - - if !reflect.DeepEqual(names, expected) { - t.Fatalf("got: %q\nexpected: %q\n", names, expected) - } -} diff --git a/internal/legacy/terraform/version_required.go b/internal/legacy/terraform/version_required.go deleted file mode 100644 index f14d93f681d3..000000000000 --- a/internal/legacy/terraform/version_required.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/tfdiags" - - "github.com/hashicorp/terraform/internal/configs" - - tfversion "github.com/hashicorp/terraform/version" -) - -// CheckCoreVersionRequirements visits each of the modules in the given -// configuration tree and verifies that any given Core version constraints -// match with the version of Terraform Core that is being used. -// -// The returned diagnostics will contain errors if any constraints do not match. -// The returned diagnostics might also return warnings, which should be -// displayed to the user. -func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { - if config == nil { - return nil - } - - var diags tfdiags.Diagnostics - module := config.Module - - for _, constraint := range module.CoreVersionConstraints { - if !constraint.Required.Check(tfversion.SemVer) { - switch { - case len(config.Path) == 0: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - tfversion.String(), - ), - Subject: constraint.DeclRange.Ptr(), - }) - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - config.Path, config.SourceAddr, tfversion.String(), - ), - Subject: constraint.DeclRange.Ptr(), - }) - } - } - } - - for _, c := range config.Children { - childDiags := CheckCoreVersionRequirements(c) - diags = diags.Append(childDiags) - } - - return diags -} diff --git a/internal/moduledeps/module.go b/internal/moduledeps/module.go deleted file mode 100644 index e428a4ec4152..000000000000 --- a/internal/moduledeps/module.go +++ /dev/null @@ -1,199 +0,0 @@ -package moduledeps - -import ( - "sort" - "strings" - - "github.com/hashicorp/terraform/internal/plugin/discovery" -) - -// Module represents the dependencies of a single module, as well being -// a node in a tree of such structures representing the dependencies of -// an entire configuration. -type Module struct { - Name string - Providers Providers - Children []*Module -} - -// WalkFunc is a callback type for use with Module.WalkTree -type WalkFunc func(path []string, parent *Module, current *Module) error - -// WalkTree calls the given callback once for the receiver and then -// once for each descendent, in an order such that parents are called -// before their children and siblings are called in the order they -// appear in the Children slice. -// -// When calling the callback, parent will be nil for the first call -// for the receiving module, and then set to the direct parent of -// each module for the subsequent calls. -// -// The path given to the callback is valid only until the callback -// returns, after which it will be mutated and reused. Callbacks must -// therefore copy the path slice if they wish to retain it. -// -// If the given callback returns an error, the walk will be aborted at -// that point and that error returned to the caller. -// -// This function is not thread-safe for concurrent modifications of the -// data structure, so it's the caller's responsibility to arrange for that -// should it be needed. -// -// It is safe for a callback to modify the descendents of the "current" -// module, including the ordering of the Children slice itself, but the -// callback MUST NOT modify the parent module. -func (m *Module) WalkTree(cb WalkFunc) error { - return walkModuleTree(make([]string, 0, 1), nil, m, cb) -} - -func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { - path = append(path, current.Name) - err := cb(path, parent, current) - if err != nil { - return err - } - - for _, child := range current.Children { - err := walkModuleTree(path, current, child, cb) - if err != nil { - return err - } - } - return nil -} - -// SortChildren sorts the Children slice into lexicographic order by -// name, in-place. -// -// This is primarily useful prior to calling WalkTree so that the walk -// will proceed in a consistent order. -func (m *Module) SortChildren() { - sort.Sort(sortModules{m.Children}) -} - -// SortDescendents is a convenience wrapper for calling SortChildren on -// the receiver and all of its descendent modules. -func (m *Module) SortDescendents() { - m.WalkTree(func(path []string, parent *Module, current *Module) error { - current.SortChildren() - return nil - }) -} - -type sortModules struct { - modules []*Module -} - -func (s sortModules) Len() int { - return len(s.modules) -} - -func (s sortModules) Less(i, j int) bool { - cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) - return cmp < 0 -} - -func (s sortModules) Swap(i, j int) { - s.modules[i], s.modules[j] = s.modules[j], s.modules[i] -} - -// ProviderRequirements produces a PluginRequirements structure that can -// be used with discovery.PluginMetaSet.ConstrainVersions to identify -// suitable plugins to satisfy the module's provider dependencies. -// -// This method only considers the direct requirements of the receiver. -// Use AllPluginRequirements to flatten the dependencies for the -// entire tree of modules. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) ProviderRequirements() discovery.PluginRequirements { - ret := make(discovery.PluginRequirements) - for pFqn, dep := range m.Providers { - providerType := pFqn.Type - if existing, exists := ret[providerType]; exists { - ret[providerType].Versions = existing.Versions.Append(dep.Constraints) - } else { - ret[providerType] = &discovery.PluginConstraints{ - Versions: dep.Constraints, - } - } - } - return ret -} - -// AllProviderRequirements calls ProviderRequirements for the receiver and all -// of its descendents, and merges the result into a single PluginRequirements -// structure that would satisfy all of the modules together. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) AllProviderRequirements() discovery.PluginRequirements { - var ret discovery.PluginRequirements - m.WalkTree(func(path []string, parent *Module, current *Module) error { - ret = ret.Merge(current.ProviderRequirements()) - return nil - }) - return ret -} - -// Equal returns true if the receiver is the root of an identical tree -// to the other given Module. This is a deep comparison that considers -// the equality of all downstream modules too. -// -// The children are considered to be ordered, so callers may wish to use -// SortDescendents first to normalize the order of the slices of child nodes. -// -// The implementation of this function is not optimized since it is provided -// primarily for use in tests. -func (m *Module) Equal(other *Module) bool { - // take care of nils first - if m == nil && other == nil { - return true - } else if (m == nil && other != nil) || (m != nil && other == nil) { - return false - } - - if m.Name != other.Name { - return false - } - - if len(m.Providers) != len(other.Providers) { - return false - } - if len(m.Children) != len(other.Children) { - return false - } - - // Can't use reflect.DeepEqual on this provider structure because - // the nested Constraints objects contain function pointers that - // never compare as equal. So we'll need to walk it the long way. - for inst, dep := range m.Providers { - if _, exists := other.Providers[inst]; !exists { - return false - } - - if dep.Reason != other.Providers[inst].Reason { - return false - } - - // Constraints are not too easy to compare robustly, so - // we'll just use their string representations as a proxy - // for now. - if dep.Constraints.String() != other.Providers[inst].Constraints.String() { - return false - } - } - - // Above we already checked that we have the same number of children - // in each module, so now we just need to check that they are - // recursively equal. - for i := range m.Children { - if !m.Children[i].Equal(other.Children[i]) { - return false - } - } - - // If we fall out here then they are equal - return true -} diff --git a/internal/moduledeps/module_test.go b/internal/moduledeps/module_test.go deleted file mode 100644 index dfbc99d219d5..000000000000 --- a/internal/moduledeps/module_test.go +++ /dev/null @@ -1,214 +0,0 @@ -package moduledeps - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plugin/discovery" -) - -func TestModuleWalkTree(t *testing.T) { - type walkStep struct { - Path []string - ParentName string - } - - tests := []struct { - Root *Module - WalkOrder []walkStep - }{ - { - &Module{ - Name: "root", - Children: nil, - }, - []walkStep{ - { - Path: []string{"root"}, - ParentName: "", - }, - }, - }, - { - &Module{ - Name: "root", - Children: []*Module{ - { - Name: "child", - }, - }, - }, - []walkStep{ - { - Path: []string{"root"}, - ParentName: "", - }, - { - Path: []string{"root", "child"}, - ParentName: "root", - }, - }, - }, - { - &Module{ - Name: "root", - Children: []*Module{ - { - Name: "child", - Children: []*Module{ - { - Name: "grandchild", - }, - }, - }, - }, - }, - []walkStep{ - { - Path: []string{"root"}, - ParentName: "", - }, - { - Path: []string{"root", "child"}, - ParentName: "root", - }, - { - Path: []string{"root", "child", "grandchild"}, - ParentName: "child", - }, - }, - }, - { - &Module{ - Name: "root", - Children: []*Module{ - { - Name: "child1", - Children: []*Module{ - { - Name: "grandchild1", - }, - }, - }, - { - Name: "child2", - Children: []*Module{ - { - Name: "grandchild2", - }, - }, - }, - }, - }, - []walkStep{ - { - Path: []string{"root"}, - ParentName: "", - }, - { - Path: []string{"root", "child1"}, - ParentName: "root", - }, - { - Path: []string{"root", "child1", "grandchild1"}, - ParentName: "child1", - }, - { - Path: []string{"root", "child2"}, - ParentName: "root", - }, - { - Path: []string{"root", "child2", "grandchild2"}, - ParentName: "child2", - }, - }, - }, - } - - for i, test := range tests { - t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { - wo := test.WalkOrder - test.Root.WalkTree(func(path []string, parent *Module, current *Module) error { - if len(wo) == 0 { - t.Fatalf("ran out of walk steps while expecting one for %#v", path) - } - step := wo[0] - wo = wo[1:] - if got, want := path, step.Path; !reflect.DeepEqual(got, want) { - t.Errorf("wrong path %#v; want %#v", got, want) - } - parentName := "" - if parent != nil { - parentName = parent.Name - } - if got, want := parentName, step.ParentName; got != want { - t.Errorf("wrong parent name %q; want %q", got, want) - } - - if got, want := current.Name, path[len(path)-1]; got != want { - t.Errorf("mismatching current.Name %q and final path element %q", got, want) - } - return nil - }) - }) - } -} - -func TestModuleSortChildren(t *testing.T) { - m := &Module{ - Name: "root", - Children: []*Module{ - { - Name: "apple", - }, - { - Name: "zebra", - }, - { - Name: "xylophone", - }, - { - Name: "pig", - }, - }, - } - - m.SortChildren() - - want := []string{"apple", "pig", "xylophone", "zebra"} - var got []string - for _, c := range m.Children { - got = append(got, c.Name) - } - - if !reflect.DeepEqual(want, got) { - t.Errorf("wrong order %#v; want %#v", want, got) - } -} - -func TestModuleProviderRequirements(t *testing.T) { - m := &Module{ - Name: "root", - Providers: Providers{ - addrs.NewDefaultProvider("foo"): ProviderDependency{ - Constraints: discovery.ConstraintStr(">=1.0.0").MustParse(), - }, - addrs.NewDefaultProvider("baz"): ProviderDependency{ - Constraints: discovery.ConstraintStr(">=3.0.0").MustParse(), - }, - }, - } - - reqd := m.ProviderRequirements() - if len(reqd) != 2 { - t.Errorf("wrong number of elements in %#v; want 2", reqd) - } - if got, want := reqd["foo"].Versions.String(), ">=1.0.0"; got != want { - t.Errorf("wrong combination of versions for 'foo' %q; want %q", got, want) - } - if got, want := reqd["baz"].Versions.String(), ">=3.0.0"; got != want { - t.Errorf("wrong combination of versions for 'baz' %q; want %q", got, want) - } -} diff --git a/internal/moduletest/provider.go b/internal/moduletest/provider.go deleted file mode 100644 index 52601d9d280f..000000000000 --- a/internal/moduletest/provider.go +++ /dev/null @@ -1,575 +0,0 @@ -package moduletest - -import ( - "fmt" - "log" - "sync" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/repl" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Provider is an implementation of providers.Interface which we're -// using as a likely-only-temporary vehicle for research on an opinionated -// module testing workflow in Terraform. -// -// We expose this to configuration as "terraform.io/builtin/test", but -// any attempt to configure it will emit a warning that it is experimental -// and likely to change or be removed entirely in future Terraform CLI -// releases. -// -// The testing provider exists to gather up test results during a Terraform -// apply operation. Its "test_results" managed resource type doesn't have any -// user-visible effect on its own, but when used in conjunction with the -// "terraform test" experimental command it is the intermediary that holds -// the test results while the test runs, so that the test command can then -// report them. -// -// For correct behavior of the assertion tracking, the "terraform test" -// command must be sure to use the same instance of Provider for both the -// plan and apply steps, so that the assertions that were planned can still -// be tracked during apply. For other commands that don't explicitly support -// test assertions, the provider will still succeed but the assertions data -// may not be complete if the apply step fails. -type Provider struct { - // components tracks all of the "component" names that have been - // used in test assertions resources so far. Each resource must have - // a unique component name. - components map[string]*Component - - // Must lock mutex in order to interact with the components map, because - // test assertions can potentially run concurrently. - mutex sync.RWMutex -} - -var _ providers.Interface = (*Provider)(nil) - -// NewProvider returns a new instance of the test provider. -func NewProvider() *Provider { - return &Provider{ - components: make(map[string]*Component), - } -} - -// TestResults returns the current record of test results tracked inside the -// provider. -// -// The result is a direct reference to the internal state of the provider, -// so the caller mustn't modify it nor store it across calls to provider -// operations. -func (p *Provider) TestResults() map[string]*Component { - return p.components -} - -// Reset returns the recieving provider back to its original state, with no -// recorded test results. -// -// It additionally detaches the instance from any data structure previously -// returned by method TestResults, freeing the caller from the constraints -// in its documentation about mutability and storage. -// -// For convenience in the presumed common case of resetting as part of -// capturing the results for storage, this method also returns the result -// that method TestResults would've returned if called prior to the call -// to Reset. -func (p *Provider) Reset() map[string]*Component { - p.mutex.Lock() - log.Print("[TRACE] moduletest.Provider: Reset") - ret := p.components - p.components = make(map[string]*Component) - p.mutex.Unlock() - return ret -} - -// GetProviderSchema returns the complete schema for the provider. -func (p *Provider) GetProviderSchema() providers.GetProviderSchemaResponse { - return providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_assertions": testAssertionsSchema, - }, - } -} - -// ValidateProviderConfig validates the provider configuration. -func (p *Provider) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { - // This provider has no configurable settings, so nothing to validate. - var res providers.ValidateProviderConfigResponse - return res -} - -// ConfigureProvider configures and initializes the provider. -func (p *Provider) ConfigureProvider(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { - // This provider has no configurable settings, but we use the configure - // request as an opportunity to generate a warning about it being - // experimental. - var res providers.ConfigureProviderResponse - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Warning, - "The test provider is experimental", - "The Terraform team is using the test provider (terraform.io/builtin/test) as part of ongoing research about declarative testing of Terraform modules.\n\nThe availability and behavior of this provider is expected to change significantly even in patch releases, so we recommend using this provider only in test configurations and constraining your test configurations to an exact Terraform version.", - nil, - )) - return res -} - -// ValidateResourceConfig is used to validate configuration values for a resource. -func (p *Provider) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - log.Print("[TRACE] moduletest.Provider: ValidateResourceConfig") - - var res providers.ValidateResourceConfigResponse - if req.TypeName != "test_assertions" { // we only have one resource type - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) - return res - } - - config := req.Config - if !config.GetAttr("component").IsKnown() { - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid component expression", - "The component name must be a static value given in the configuration, and may not be derived from a resource type attribute that will only be known during the apply step.", - cty.GetAttrPath("component"), - )) - } - if !hclsyntax.ValidIdentifier(config.GetAttr("component").AsString()) { - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid component name", - "The component name must be a valid identifier, starting with a letter followed by zero or more letters, digits, and underscores.", - cty.GetAttrPath("component"), - )) - } - for it := config.GetAttr("equal").ElementIterator(); it.Next(); { - k, obj := it.Element() - if !hclsyntax.ValidIdentifier(k.AsString()) { - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid assertion name", - "An assertion name must be a valid identifier, starting with a letter followed by zero or more letters, digits, and underscores.", - cty.GetAttrPath("equal").Index(k), - )) - } - if !obj.GetAttr("description").IsKnown() { - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid description expression", - "The description must be a static value given in the configuration, and may not be derived from a resource type attribute that will only be known during the apply step.", - cty.GetAttrPath("equal").Index(k).GetAttr("description"), - )) - } - } - for it := config.GetAttr("check").ElementIterator(); it.Next(); { - k, obj := it.Element() - if !hclsyntax.ValidIdentifier(k.AsString()) { - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid assertion name", - "An assertion name must be a valid identifier, starting with a letter followed by zero or more letters, digits, and underscores.", - cty.GetAttrPath("check").Index(k), - )) - } - if !obj.GetAttr("description").IsKnown() { - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid description expression", - "The description must be a static value given in the configuration, and may not be derived from a resource type attribute that will only be known during the apply step.", - cty.GetAttrPath("equal").Index(k).GetAttr("description"), - )) - } - } - - return res -} - -// ReadResource refreshes a resource and returns its current state. -func (p *Provider) ReadResource(req providers.ReadResourceRequest) providers.ReadResourceResponse { - log.Print("[TRACE] moduletest.Provider: ReadResource") - - var res providers.ReadResourceResponse - if req.TypeName != "test_assertions" { // we only have one resource type - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) - return res - } - // Test assertions are not a real remote object, so there isn't actually - // anything to refresh here. - res.NewState = req.PriorState - return res -} - -// UpgradeResourceState is called to allow the provider to adapt the raw value -// stored in the state in case the schema has changed since it was originally -// written. -func (p *Provider) UpgradeResourceState(req providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { - log.Print("[TRACE] moduletest.Provider: UpgradeResourceState") - - var res providers.UpgradeResourceStateResponse - if req.TypeName != "test_assertions" { // we only have one resource type - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) - return res - } - - // We assume here that there can never be a flatmap version of this - // resource type's data, because this provider was never included in a - // version of Terraform that used flatmap and this provider's schema - // contains attributes that are not flatmap-compatible anyway. - if len(req.RawStateFlatmap) != 0 { - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("can't upgrade a flatmap state for %q", req.TypeName)) - return res - } - if req.Version != 0 { - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("the state for this %s was created by a newer version of the provider", req.TypeName)) - return res - } - - v, err := ctyjson.Unmarshal(req.RawStateJSON, testAssertionsSchema.Block.ImpliedType()) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("failed to decode state for %s: %s", req.TypeName, err)) - return res - } - - res.UpgradedState = v - return res -} - -// PlanResourceChange takes the current state and proposed state of a -// resource, and returns the planned final state. -func (p *Provider) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - log.Print("[TRACE] moduletest.Provider: PlanResourceChange") - - // this is a destroy plan, - if req.ProposedNewState.IsNull() { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp - } - - var res providers.PlanResourceChangeResponse - if req.TypeName != "test_assertions" { // we only have one resource type - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) - return res - } - - // During planning, our job is to gather up all of the planned test - // assertions marked as pending, which will then allow us to include - // all of them in test results even if there's a failure during apply - // that prevents the full completion of the graph walk. - // - // In a sense our plan phase is similar to the compile step for a - // test program written in another language. Planning itself can fail, - // which means we won't be able to form a complete test plan at all, - // but if we succeed in planning then subsequent problems can be treated - // as test failures at "runtime", while still keeping a full manifest - // of all of the tests that ought to have run if the apply had run to - // completion. - - proposed := req.ProposedNewState - res.PlannedState = proposed - componentName := proposed.GetAttr("component").AsString() // proven known during validate - p.mutex.Lock() - defer p.mutex.Unlock() - // NOTE: Ideally we'd do something here to verify if two assertions - // resources in the configuration attempt to declare the same component, - // but we can't actually do that because Terraform calls PlanResourceChange - // during both plan and apply, and so the second one would always fail. - // Since this is just providing a temporary pseudo-syntax for writing tests - // anyway, we'll live with this for now and aim to solve it with a future - // iteration of testing that's better integrated into the Terraform - // language. - /* - if _, exists := p.components[componentName]; exists { - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Duplicate test component", - fmt.Sprintf("Another test_assertions resource already declared assertions for the component name %q.", componentName), - cty.GetAttrPath("component"), - )) - return res - } - */ - - component := Component{ - Assertions: make(map[string]*Assertion), - } - - for it := proposed.GetAttr("equal").ElementIterator(); it.Next(); { - k, obj := it.Element() - name := k.AsString() - if _, exists := component.Assertions[name]; exists { - // We can't actually get here in practice because so far we've - // only been pulling keys from one map, and so any duplicates - // would've been caught during config decoding, but this is here - // just to make these two blocks symmetrical to avoid mishaps in - // future refactoring/reorganization. - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Duplicate test assertion", - fmt.Sprintf("Another assertion block in this resource already declared an assertion named %q.", name), - cty.GetAttrPath("equal").Index(k), - )) - continue - } - - var desc string - descVal := obj.GetAttr("description") - if descVal.IsNull() { - descVal = cty.StringVal("") - } - err := gocty.FromCtyValue(descVal, &desc) - if err != nil { - // We shouldn't get here because we've already validated everything - // that would make FromCtyValue fail above and during validate. - res.Diagnostics = res.Diagnostics.Append(err) - } - - component.Assertions[name] = &Assertion{ - Outcome: Pending, - Description: desc, - } - } - - for it := proposed.GetAttr("check").ElementIterator(); it.Next(); { - k, obj := it.Element() - name := k.AsString() - if _, exists := component.Assertions[name]; exists { - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Duplicate test assertion", - fmt.Sprintf("Another assertion block in this resource already declared an assertion named %q.", name), - cty.GetAttrPath("check").Index(k), - )) - continue - } - - var desc string - descVal := obj.GetAttr("description") - if descVal.IsNull() { - descVal = cty.StringVal("") - } - err := gocty.FromCtyValue(descVal, &desc) - if err != nil { - // We shouldn't get here because we've already validated everything - // that would make FromCtyValue fail above and during validate. - res.Diagnostics = res.Diagnostics.Append(err) - } - - component.Assertions[name] = &Assertion{ - Outcome: Pending, - Description: desc, - } - } - - p.components[componentName] = &component - return res -} - -// ApplyResourceChange takes the planned state for a resource, which may -// yet contain unknown computed values, and applies the changes returning -// the final state. -func (p *Provider) ApplyResourceChange(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - log.Print("[TRACE] moduletest.Provider: ApplyResourceChange") - - var res providers.ApplyResourceChangeResponse - if req.TypeName != "test_assertions" { // we only have one resource type - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) - return res - } - - // During apply we actually check the assertions and record the results. - // An assertion failure isn't reflected as an error from the apply call - // because if possible we'd like to continue exercising other objects - // downstream in case that allows us to gather more information to report. - // (If something downstream returns an error then that could prevent us - // from completing other assertions, though.) - - planned := req.PlannedState - res.NewState = planned - if res.NewState.IsNull() { - // If we're destroying then we'll just quickly return success to - // allow the test process to clean up after itself. - return res - } - componentName := planned.GetAttr("component").AsString() // proven known during validate - - p.mutex.Lock() - defer p.mutex.Unlock() - component := p.components[componentName] - if component == nil { - // We might get here when using this provider outside of the - // "terraform test" command, where there won't be any mechanism to - // preserve the test provider instance between the plan and apply - // phases. In that case, we assume that nobody will come looking to - // collect the results anyway, and so we can just silently skip - // checking. - return res - } - - for it := planned.GetAttr("equal").ElementIterator(); it.Next(); { - k, obj := it.Element() - name := k.AsString() - var desc string - if plan, exists := component.Assertions[name]; exists { - desc = plan.Description - } - assert := &Assertion{ - Outcome: Pending, - Description: desc, - } - - gotVal := obj.GetAttr("got") - wantVal := obj.GetAttr("want") - switch { - case wantVal.RawEquals(gotVal): - assert.Outcome = Passed - gotStr := repl.FormatValue(gotVal, 4) - assert.Message = fmt.Sprintf("correct value\n got: %s\n", gotStr) - default: - assert.Outcome = Failed - gotStr := repl.FormatValue(gotVal, 4) - wantStr := repl.FormatValue(wantVal, 4) - assert.Message = fmt.Sprintf("wrong value\n got: %s\n want: %s\n", gotStr, wantStr) - } - - component.Assertions[name] = assert - } - - for it := planned.GetAttr("check").ElementIterator(); it.Next(); { - k, obj := it.Element() - name := k.AsString() - var desc string - if plan, exists := component.Assertions[name]; exists { - desc = plan.Description - } - assert := &Assertion{ - Outcome: Pending, - Description: desc, - } - - condVal := obj.GetAttr("condition") - switch { - case condVal.IsNull(): - res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( - tfdiags.Error, - "Invalid check condition", - "The condition value must be a boolean expression, not null.", - cty.GetAttrPath("check").Index(k).GetAttr("condition"), - )) - continue - case condVal.True(): - assert.Outcome = Passed - assert.Message = "condition passed" - default: - assert.Outcome = Failed - // For "check" we can't really return a decent error message - // because we've lost all of the context by the time we get here. - // "equal" will be better for most tests for that reason, and also - // this is one reason why in the long run it would be better for - // test assertions to be a first-class language feature rather than - // just a provider-based concept. - assert.Message = "condition failed" - } - - component.Assertions[name] = assert - } - - return res -} - -// ImportResourceState requests that the given resource be imported. -func (p *Provider) ImportResourceState(req providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { - var res providers.ImportResourceStateResponse - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("%s is not importable", req.TypeName)) - return res -} - -// ValidateDataResourceConfig is used to to validate the resource configuration values. -func (p *Provider) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { - // This provider has no data resouce types at all. - var res providers.ValidateDataResourceConfigResponse - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported data source %s", req.TypeName)) - return res -} - -// ReadDataSource returns the data source's current state. -func (p *Provider) ReadDataSource(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - // This provider has no data resouce types at all. - var res providers.ReadDataSourceResponse - res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported data source %s", req.TypeName)) - return res -} - -// Stop is called when the provider should halt any in-flight actions. -func (p *Provider) Stop() error { - // This provider doesn't do anything that can be cancelled. - return nil -} - -// Close is a noop for this provider, since it's run in-process. -func (p *Provider) Close() error { - return nil -} - -var testAssertionsSchema = providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "component": { - Type: cty.String, - Description: "The name of the component being tested. This is just for namespacing assertions in a result report.", - DescriptionKind: configschema.StringPlain, - Required: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "equal": { - Nesting: configschema.NestingMap, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "description": { - Type: cty.String, - Description: "An optional human-readable description of what's being tested by this assertion.", - DescriptionKind: configschema.StringPlain, - Required: true, - }, - "got": { - Type: cty.DynamicPseudoType, - Description: "The actual result value generated by the relevant component.", - DescriptionKind: configschema.StringPlain, - Required: true, - }, - "want": { - Type: cty.DynamicPseudoType, - Description: "The value that the component is expected to have generated.", - DescriptionKind: configschema.StringPlain, - Required: true, - }, - }, - }, - }, - "check": { - Nesting: configschema.NestingMap, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "description": { - Type: cty.String, - Description: "An optional (but strongly recommended) human-readable description of what's being tested by this assertion.", - DescriptionKind: configschema.StringPlain, - Required: true, - }, - "condition": { - Type: cty.Bool, - Description: "An expression that must be true in order for the test to pass.", - DescriptionKind: configschema.StringPlain, - Required: true, - }, - }, - }, - }, - }, - }, -} diff --git a/internal/moduletest/provider_test.go b/internal/moduletest/provider_test.go deleted file mode 100644 index 30ca0359a64f..000000000000 --- a/internal/moduletest/provider_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package moduletest - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/providers" - "github.com/zclconf/go-cty-debug/ctydebug" - "github.com/zclconf/go-cty/cty" -) - -func TestProvider(t *testing.T) { - - assertionConfig := cty.ObjectVal(map[string]cty.Value{ - "component": cty.StringVal("spline_reticulator"), - "equal": cty.MapVal(map[string]cty.Value{ - "match": cty.ObjectVal(map[string]cty.Value{ - "description": cty.StringVal("this should match"), - "got": cty.StringVal("a"), - "want": cty.StringVal("a"), - }), - "unmatch": cty.ObjectVal(map[string]cty.Value{ - "description": cty.StringVal("this should not match"), - "got": cty.StringVal("a"), - "want": cty.StringVal("b"), - }), - }), - "check": cty.MapVal(map[string]cty.Value{ - "pass": cty.ObjectVal(map[string]cty.Value{ - "description": cty.StringVal("this should pass"), - "condition": cty.True, - }), - "fail": cty.ObjectVal(map[string]cty.Value{ - "description": cty.StringVal("this should fail"), - "condition": cty.False, - }), - }), - }) - - // The provider code expects to receive an object that was decoded from - // HCL using the schema, so to make sure we're testing a more realistic - // situation here we'll require the config to conform to the schema. If - // this fails, it's a bug in the configuration definition above rather - // than in the provider itself. - for _, err := range assertionConfig.Type().TestConformance(testAssertionsSchema.Block.ImpliedType()) { - t.Error(err) - } - - p := NewProvider() - - configureResp := p.ConfigureProvider(providers.ConfigureProviderRequest{ - Config: cty.EmptyObjectVal, - }) - if got, want := len(configureResp.Diagnostics), 1; got != want { - t.Fatalf("got %d Configure diagnostics, but want %d", got, want) - } - if got, want := configureResp.Diagnostics[0].Description().Summary, "The test provider is experimental"; got != want { - t.Fatalf("wrong diagnostic message\ngot: %s\nwant: %s", got, want) - } - - validateResp := p.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ - TypeName: "test_assertions", - Config: assertionConfig, - }) - if got, want := len(validateResp.Diagnostics), 0; got != want { - t.Fatalf("got %d ValidateResourceTypeConfig diagnostics, but want %d", got, want) - } - - planResp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: "test_assertions", - Config: assertionConfig, - PriorState: cty.NullVal(assertionConfig.Type()), - ProposedNewState: assertionConfig, - }) - if got, want := len(planResp.Diagnostics), 0; got != want { - t.Fatalf("got %d PlanResourceChange diagnostics, but want %d", got, want) - } - planned := planResp.PlannedState - if got, want := planned, assertionConfig; !want.RawEquals(got) { - t.Fatalf("wrong planned new value\n%s", ctydebug.DiffValues(want, got)) - } - - gotComponents := p.TestResults() - wantComponents := map[string]*Component{ - "spline_reticulator": { - Assertions: map[string]*Assertion{ - "pass": { - Outcome: Pending, - Description: "this should pass", - }, - "fail": { - Outcome: Pending, - Description: "this should fail", - }, - "match": { - Outcome: Pending, - Description: "this should match", - }, - "unmatch": { - Outcome: Pending, - Description: "this should not match", - }, - }, - }, - } - if diff := cmp.Diff(wantComponents, gotComponents); diff != "" { - t.Fatalf("wrong test results after planning\n%s", diff) - } - - applyResp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: "test_assertions", - Config: assertionConfig, - PriorState: cty.NullVal(assertionConfig.Type()), - PlannedState: planned, - }) - if got, want := len(applyResp.Diagnostics), 0; got != want { - t.Fatalf("got %d ApplyResourceChange diagnostics, but want %d", got, want) - } - final := applyResp.NewState - if got, want := final, assertionConfig; !want.RawEquals(got) { - t.Fatalf("wrong new value\n%s", ctydebug.DiffValues(want, got)) - } - - gotComponents = p.TestResults() - wantComponents = map[string]*Component{ - "spline_reticulator": { - Assertions: map[string]*Assertion{ - "pass": { - Outcome: Passed, - Description: "this should pass", - Message: "condition passed", - }, - "fail": { - Outcome: Failed, - Description: "this should fail", - Message: "condition failed", - }, - "match": { - Outcome: Passed, - Description: "this should match", - Message: "correct value\n got: \"a\"\n", - }, - "unmatch": { - Outcome: Failed, - Description: "this should not match", - Message: "wrong value\n got: \"a\"\n want: \"b\"\n", - }, - }, - }, - } - if diff := cmp.Diff(wantComponents, gotComponents); diff != "" { - t.Fatalf("wrong test results after applying\n%s", diff) - } - -} diff --git a/internal/plans/objchange/action.go b/internal/plans/objchange/action.go deleted file mode 100644 index 56418aaee81a..000000000000 --- a/internal/plans/objchange/action.go +++ /dev/null @@ -1,40 +0,0 @@ -package objchange - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/plans" -) - -// ActionForChange determines which plans.Action value best describes a -// change from the value given in before to the value given in after. -// -// Because it has no context aside from the values, it can only return the -// basic actions NoOp, Create, Update, and Delete. Other codepaths with -// additional information might make this decision differently, such as by -// using the Replace action instead of the Update action where that makes -// sense. -// -// If the after value is unknown then the action can't be properly decided, and -// so ActionForChange will conservatively return either Create or Update -// depending on whether the before value is null. The before value must always -// be fully known; ActionForChange will panic if it contains any unknown values. -func ActionForChange(before, after cty.Value) plans.Action { - switch { - case !after.IsKnown(): - if before.IsNull() { - return plans.Create - } - return plans.Update - case after.IsNull() && before.IsNull(): - return plans.NoOp - case after.IsNull() && !before.IsNull(): - return plans.Delete - case before.IsNull() && !after.IsNull(): - return plans.Create - case after.RawEquals(before): - return plans.NoOp - default: - return plans.Update - } -} diff --git a/internal/plans/plan.go b/internal/plans/plan.go deleted file mode 100644 index 4ac924d010b7..000000000000 --- a/internal/plans/plan.go +++ /dev/null @@ -1,197 +0,0 @@ -package plans - -import ( - "sort" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/globalref" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -// Plan is the top-level type representing a planned set of changes. -// -// A plan is a summary of the set of changes required to move from a current -// state to a goal state derived from configuration. The described changes -// are not applied directly, but contain an approximation of the final -// result that will be completed during apply by resolving any values that -// cannot be predicted. -// -// A plan must always be accompanied by the configuration it was built from, -// since the plan does not itself include all of the information required to -// make the changes indicated. -type Plan struct { - // Mode is the mode under which this plan was created. - // - // This is only recorded to allow for UI differences when presenting plans - // to the end-user, and so it must not be used to influence apply-time - // behavior. The actions during apply must be described entirely by - // the Changes field, regardless of how the plan was created. - // - // FIXME: destroy operations still rely on DestroyMode being set, because - // there is no other source of this information in the plan. New behavior - // should not be added based on this flag, and changing the flag should be - // checked carefully against existing destroy behaviors. - UIMode Mode - - VariableValues map[string]DynamicValue - Changes *Changes - DriftedResources []*ResourceInstanceChangeSrc - TargetAddrs []addrs.Targetable - ForceReplaceAddrs []addrs.AbsResourceInstance - Backend Backend - - // Errored is true if the Changes information is incomplete because - // the planning operation failed. An errored plan cannot be applied, - // but can be cautiously inspected for debugging purposes. - Errored bool - - // Checks captures a snapshot of the (probably-incomplete) check results - // at the end of the planning process. - // - // If this plan is applyable (that is, if the planning process completed - // without errors) then the set of checks here should be complete even - // though some of them will likely have StatusUnknown where the check - // condition depends on values we won't know until the apply step. - Checks *states.CheckResults - - // RelevantAttributes is a set of resource instance addresses and - // attributes that are either directly affected by proposed changes or may - // have indirectly contributed to them via references in expressions. - // - // This is the result of a heuristic and is intended only as a hint to - // the UI layer in case it wants to emphasize or de-emphasize certain - // resources. Don't use this to drive any non-cosmetic behavior, especially - // including anything that would be subject to compatibility constraints. - RelevantAttributes []globalref.ResourceAttr - - // PrevRunState and PriorState both describe the situation that the plan - // was derived from: - // - // PrevRunState is a representation of the outcome of the previous - // Terraform operation, without any updates from the remote system but - // potentially including some changes that resulted from state upgrade - // actions. - // - // PriorState is a representation of the current state of remote objects, - // which will differ from PrevRunState if the "refresh" step returned - // different data, which might reflect drift. - // - // PriorState is the main snapshot we use for actions during apply. - // PrevRunState is only here so that we can diff PriorState against it in - // order to report to the user any out-of-band changes we've detected. - PrevRunState *states.State - PriorState *states.State -} - -// CanApply returns true if and only if the recieving plan includes content -// that would make sense to apply. If it returns false, the plan operation -// should indicate that there's nothing to do and Terraform should exit -// without prompting the user to confirm the changes. -// -// This function represents our main business logic for making the decision -// about whether a given plan represents meaningful "changes", and so its -// exact definition may change over time; the intent is just to centralize the -// rules for that rather than duplicating different versions of it at various -// locations in the UI code. -func (p *Plan) CanApply() bool { - switch { - case p.Errored: - // An errored plan can never be applied, because it is incomplete. - // Such a plan is only useful for describing the subset of actions - // planned so far in case they are useful for understanding the - // causes of the errors. - return false - - case !p.Changes.Empty(): - // "Empty" means that everything in the changes is a "NoOp", so if - // not empty then there's at least one non-NoOp change. - return true - - case !p.PriorState.ManagedResourcesEqual(p.PrevRunState): - // If there are no changes planned but we detected some - // outside-Terraform changes while refreshing then we consider - // that applyable in isolation only if this was a refresh-only - // plan where we expect updating the state to include these - // changes was the intended goal. - // - // (We don't treat a "refresh only" plan as applyable in normal - // planning mode because historically the refresh result wasn't - // considered part of a plan at all, and so it would be - // a disruptive breaking change if refreshing alone suddenly - // became applyable in the normal case and an existing configuration - // was relying on ignore_changes in order to be convergent in spite - // of intentional out-of-band operations.) - return p.UIMode == RefreshOnlyMode - - default: - // Otherwise, there are either no changes to apply or they are changes - // our cases above don't consider as worthy of applying in isolation. - return false - } -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving plan. -// -// The result is de-duplicated so that each distinct address appears only once. -func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { - if p == nil || p.Changes == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, rc := range p.Changes.Resources { - m[rc.ProviderAddr.String()] = rc.ProviderAddr - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} - -// Backend represents the backend-related configuration and other data as it -// existed when a plan was created. -type Backend struct { - // Type is the type of backend that the plan will apply against. - Type string - - // Config is the configuration of the backend, whose schema is decided by - // the backend Type. - Config DynamicValue - - // Workspace is the name of the workspace that was active when the plan - // was created. It is illegal to apply a plan created for one workspace - // to the state of another workspace. - // (This constraint is already enforced by the statefile lineage mechanism, - // but storing this explicitly allows us to return a better error message - // in the situation where the user has the wrong workspace selected.) - Workspace string -} - -func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { - dv, err := NewDynamicValue(config, configSchema.ImpliedType()) - if err != nil { - return nil, err - } - - return &Backend{ - Type: typeName, - Config: dv, - Workspace: workspaceName, - }, nil -} diff --git a/internal/plans/plan_test.go b/internal/plans/plan_test.go deleted file mode 100644 index 34f9361394f9..000000000000 --- a/internal/plans/plan_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package plans - -import ( - "testing" - - "github.com/go-test/deep" - - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestProviderAddrs(t *testing.T) { - - plan := &Plan{ - VariableValues: map[string]DynamicValue{}, - Changes: &Changes{ - Resources: []*ResourceInstanceChangeSrc{ - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "woot", - }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("test"), - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "woot", - }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), - DeposedKey: "foodface", - ProviderAddr: addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("test"), - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "what", - }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Module: addrs.RootModule.Child("foo"), - Provider: addrs.NewDefaultProvider("test"), - }, - }, - }, - }, - } - - got := plan.ProviderAddrs() - want := []addrs.AbsProviderConfig{ - addrs.AbsProviderConfig{ - Module: addrs.RootModule.Child("foo"), - Provider: addrs.NewDefaultProvider("test"), - }, - addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("test"), - }, - } - - for _, problem := range deep.Equal(got, want) { - t.Error(problem) - } -} - -// Module outputs should not effect the result of Empty -func TestModuleOutputChangesEmpty(t *testing.T) { - changes := &Changes{ - Outputs: []*OutputChangeSrc{ - { - Addr: addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance.Child("child", addrs.NoKey), - OutputValue: addrs.OutputValue{ - Name: "output", - }, - }, - ChangeSrc: ChangeSrc{ - Action: Update, - Before: []byte("a"), - After: []byte("b"), - }, - }, - }, - } - - if !changes.Empty() { - t.Fatal("plan has no visible changes") - } -} diff --git a/internal/plugin/convert/diagnostics.go b/internal/plugin/convert/diagnostics.go deleted file mode 100644 index 43824e1b797e..000000000000 --- a/internal/plugin/convert/diagnostics.go +++ /dev/null @@ -1,132 +0,0 @@ -package convert - -import ( - "github.com/hashicorp/terraform/internal/tfdiags" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/zclconf/go-cty/cty" -) - -// WarnsAndErrorsToProto converts the warnings and errors return by the legacy -// provider to protobuf diagnostics. -func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { - for _, w := range warns { - diags = AppendProtoDiag(diags, w) - } - - for _, e := range errs { - diags = AppendProtoDiag(diags, e) - } - - return diags -} - -// AppendProtoDiag appends a new diagnostic from a warning string or an error. -// This panics if d is not a string or error. -func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { - switch d := d.(type) { - case cty.PathError: - ap := PathToAttributePath(d.Path) - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: d.Error(), - Attribute: ap, - }) - case error: - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: d.Error(), - }) - case string: - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: d, - }) - case *proto.Diagnostic: - diags = append(diags, d) - case []*proto.Diagnostic: - diags = append(diags, d...) - } - return diags -} - -// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. -func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, d := range ds { - var severity tfdiags.Severity - - switch d.Severity { - case proto.Diagnostic_ERROR: - severity = tfdiags.Error - case proto.Diagnostic_WARNING: - severity = tfdiags.Warning - } - - var newDiag tfdiags.Diagnostic - - // if there's an attribute path, we need to create a AttributeValue diagnostic - if d.Attribute != nil && len(d.Attribute.Steps) > 0 { - path := AttributePathToPath(d.Attribute) - newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) - } else { - newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) - } - - diags = diags.Append(newDiag) - } - - return diags -} - -// AttributePathToPath takes the proto encoded path and converts it to a cty.Path -func AttributePathToPath(ap *proto.AttributePath) cty.Path { - var p cty.Path - for _, step := range ap.Steps { - switch selector := step.Selector.(type) { - case *proto.AttributePath_Step_AttributeName: - p = p.GetAttr(selector.AttributeName) - case *proto.AttributePath_Step_ElementKeyString: - p = p.Index(cty.StringVal(selector.ElementKeyString)) - case *proto.AttributePath_Step_ElementKeyInt: - p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) - } - } - return p -} - -// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. -func PathToAttributePath(p cty.Path) *proto.AttributePath { - ap := &proto.AttributePath{} - for _, step := range p { - switch selector := step.(type) { - case cty.GetAttrStep: - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: selector.Name, - }, - }) - case cty.IndexStep: - key := selector.Key - switch key.Type() { - case cty.String: - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: key.AsString(), - }, - }) - case cty.Number: - v, _ := key.AsBigFloat().Int64() - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: v, - }, - }) - default: - // We'll bail early if we encounter anything else, and just - // return the valid prefix. - return ap - } - } - } - return ap -} diff --git a/internal/plugin/convert/diagnostics_test.go b/internal/plugin/convert/diagnostics_test.go deleted file mode 100644 index 3f54985dd5ea..000000000000 --- a/internal/plugin/convert/diagnostics_test.go +++ /dev/null @@ -1,411 +0,0 @@ -package convert - -import ( - "errors" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/tfdiags" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/zclconf/go-cty/cty" -) - -var ignoreUnexported = cmpopts.IgnoreUnexported( - proto.Diagnostic{}, - proto.Schema_Block{}, - proto.Schema_NestedBlock{}, - proto.Schema_Attribute{}, -) - -func TestProtoDiagnostics(t *testing.T) { - diags := WarnsAndErrsToProto( - []string{ - "warning 1", - "warning 2", - }, - []error{ - errors.New("error 1"), - errors.New("error 2"), - }, - ) - - expected := []*proto.Diagnostic{ - { - Severity: proto.Diagnostic_WARNING, - Summary: "warning 1", - }, - { - Severity: proto.Diagnostic_WARNING, - Summary: "warning 2", - }, - { - Severity: proto.Diagnostic_ERROR, - Summary: "error 1", - }, - { - Severity: proto.Diagnostic_ERROR, - Summary: "error 2", - }, - } - - if !cmp.Equal(expected, diags, ignoreUnexported) { - t.Fatal(cmp.Diff(expected, diags, ignoreUnexported)) - } -} - -func TestDiagnostics(t *testing.T) { - type diagFlat struct { - Severity tfdiags.Severity - Attr []interface{} - Summary string - Detail string - } - - tests := map[string]struct { - Cons func([]*proto.Diagnostic) []*proto.Diagnostic - Want []diagFlat - }{ - "nil": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return diags - }, - nil, - }, - "error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "simple error", - }) - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "simple error", - }, - }, - }, - "detailed error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "simple error", - Detail: "detailed error", - }) - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "simple error", - Detail: "detailed error", - }, - }, - }, - "warning": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: "simple warning", - }) - }, - []diagFlat{ - { - Severity: tfdiags.Warning, - Summary: "simple warning", - }, - }, - }, - "detailed warning": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: "simple warning", - Detail: "detailed warning", - }) - }, - []diagFlat{ - { - Severity: tfdiags.Warning, - Summary: "simple warning", - Detail: "detailed warning", - }, - }, - }, - "multi error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "first error", - }, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "second error", - }) - return diags - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "first error", - }, - { - Severity: tfdiags.Error, - Summary: "second error", - }, - }, - }, - "warning and error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: "warning", - }, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error", - }) - return diags - }, - []diagFlat{ - { - Severity: tfdiags.Warning, - Summary: "warning", - }, - { - Severity: tfdiags.Error, - Summary: "error", - }, - }, - }, - "attr error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error", - Detail: "error detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attribute_name", - }, - }, - }, - }, - }) - return diags - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "error", - Detail: "error detail", - Attr: []interface{}{"attribute_name"}, - }, - }, - }, - "multi attr": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - diags = append(diags, - &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error 1", - Detail: "error 1 detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - }, - }, - }, - &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error 2", - Detail: "error 2 detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "sub", - }, - }, - }, - }, - }, - &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: "warning", - Detail: "warning detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - { - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: 1, - }, - }, - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "sub", - }, - }, - }, - }, - }, - &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error 3", - Detail: "error 3 detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - { - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: "idx", - }, - }, - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "sub", - }, - }, - }, - }, - }, - ) - - return diags - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "error 1", - Detail: "error 1 detail", - Attr: []interface{}{"attr"}, - }, - { - Severity: tfdiags.Error, - Summary: "error 2", - Detail: "error 2 detail", - Attr: []interface{}{"attr", "sub"}, - }, - { - Severity: tfdiags.Warning, - Summary: "warning", - Detail: "warning detail", - Attr: []interface{}{"attr", 1, "sub"}, - }, - { - Severity: tfdiags.Error, - Summary: "error 3", - Detail: "error 3 detail", - Attr: []interface{}{"attr", "idx", "sub"}, - }, - }, - }, - } - - flattenTFDiags := func(ds tfdiags.Diagnostics) []diagFlat { - var flat []diagFlat - for _, item := range ds { - desc := item.Description() - - var attr []interface{} - - for _, a := range tfdiags.GetAttribute(item) { - switch step := a.(type) { - case cty.GetAttrStep: - attr = append(attr, step.Name) - case cty.IndexStep: - switch step.Key.Type() { - case cty.Number: - i, _ := step.Key.AsBigFloat().Int64() - attr = append(attr, int(i)) - case cty.String: - attr = append(attr, step.Key.AsString()) - } - } - } - - flat = append(flat, diagFlat{ - Severity: item.Severity(), - Attr: attr, - Summary: desc.Summary, - Detail: desc.Detail, - }) - } - return flat - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - // we take the - tfDiags := ProtoToDiagnostics(tc.Cons(nil)) - - flat := flattenTFDiags(tfDiags) - - if !cmp.Equal(flat, tc.Want, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(flat, tc.Want, typeComparer, valueComparer, equateEmpty)) - } - }) - } -} - -// Test that a diagnostic with a present but empty attribute results in a -// whole body diagnostic. We verify this by inspecting the resulting Subject -// from the diagnostic when considered in the context of a config body. -func TestProtoDiagnostics_emptyAttributePath(t *testing.T) { - protoDiags := []*proto.Diagnostic{ - { - Severity: proto.Diagnostic_ERROR, - Summary: "error 1", - Detail: "error 1 detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - // this slice is intentionally left empty - }, - }, - }, - } - tfDiags := ProtoToDiagnostics(protoDiags) - - testConfig := `provider "test" { - foo = "bar" -}` - f, parseDiags := hclsyntax.ParseConfig([]byte(testConfig), "test.tf", hcl.Pos{Line: 1, Column: 1}) - if parseDiags.HasErrors() { - t.Fatal(parseDiags) - } - diags := tfDiags.InConfigBody(f.Body, "") - - if len(tfDiags) != 1 { - t.Fatalf("expected 1 diag, got %d", len(tfDiags)) - } - got := diags[0].Source().Subject - want := &tfdiags.SourceRange{ - Filename: "test.tf", - Start: tfdiags.SourcePos{Line: 1, Column: 1}, - End: tfdiags.SourcePos{Line: 1, Column: 1}, - } - - if !cmp.Equal(got, want, typeComparer, valueComparer) { - t.Fatal(cmp.Diff(got, want, typeComparer, valueComparer)) - } -} diff --git a/internal/plugin/convert/schema.go b/internal/plugin/convert/schema.go deleted file mode 100644 index 4a3f909935d5..000000000000 --- a/internal/plugin/convert/schema.go +++ /dev/null @@ -1,185 +0,0 @@ -package convert - -import ( - "encoding/json" - "reflect" - "sort" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - proto "github.com/hashicorp/terraform/internal/tfplugin5" -) - -// ConfigSchemaToProto takes a *configschema.Block and converts it to a -// proto.Schema_Block for a grpc response. -func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { - block := &proto.Schema_Block{ - Description: b.Description, - DescriptionKind: protoStringKind(b.DescriptionKind), - Deprecated: b.Deprecated, - } - - for _, name := range sortedKeys(b.Attributes) { - a := b.Attributes[name] - - attr := &proto.Schema_Attribute{ - Name: name, - Description: a.Description, - DescriptionKind: protoStringKind(a.DescriptionKind), - Optional: a.Optional, - Computed: a.Computed, - Required: a.Required, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - ty, err := json.Marshal(a.Type) - if err != nil { - panic(err) - } - - attr.Type = ty - - block.Attributes = append(block.Attributes, attr) - } - - for _, name := range sortedKeys(b.BlockTypes) { - b := b.BlockTypes[name] - block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) - } - - return block -} - -func protoStringKind(k configschema.StringKind) proto.StringKind { - switch k { - default: - return proto.StringKind_PLAIN - case configschema.StringMarkdown: - return proto.StringKind_MARKDOWN - } -} - -func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { - var nesting proto.Schema_NestedBlock_NestingMode - switch b.Nesting { - case configschema.NestingSingle: - nesting = proto.Schema_NestedBlock_SINGLE - case configschema.NestingGroup: - nesting = proto.Schema_NestedBlock_GROUP - case configschema.NestingList: - nesting = proto.Schema_NestedBlock_LIST - case configschema.NestingSet: - nesting = proto.Schema_NestedBlock_SET - case configschema.NestingMap: - nesting = proto.Schema_NestedBlock_MAP - default: - nesting = proto.Schema_NestedBlock_INVALID - } - return &proto.Schema_NestedBlock{ - TypeName: name, - Block: ConfigSchemaToProto(&b.Block), - Nesting: nesting, - MinItems: int64(b.MinItems), - MaxItems: int64(b.MaxItems), - } -} - -// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. -func ProtoToProviderSchema(s *proto.Schema) providers.Schema { - return providers.Schema{ - Version: s.Version, - Block: ProtoToConfigSchema(s.Block), - } -} - -// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it -// to a terraform *configschema.Block. -func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { - block := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute), - BlockTypes: make(map[string]*configschema.NestedBlock), - - Description: b.Description, - DescriptionKind: schemaStringKind(b.DescriptionKind), - Deprecated: b.Deprecated, - } - - for _, a := range b.Attributes { - attr := &configschema.Attribute{ - Description: a.Description, - DescriptionKind: schemaStringKind(a.DescriptionKind), - Required: a.Required, - Optional: a.Optional, - Computed: a.Computed, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - if err := json.Unmarshal(a.Type, &attr.Type); err != nil { - panic(err) - } - - block.Attributes[a.Name] = attr - } - - for _, b := range b.BlockTypes { - block.BlockTypes[b.TypeName] = schemaNestedBlock(b) - } - - return block -} - -func schemaStringKind(k proto.StringKind) configschema.StringKind { - switch k { - default: - return configschema.StringPlain - case proto.StringKind_MARKDOWN: - return configschema.StringMarkdown - } -} - -func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { - var nesting configschema.NestingMode - switch b.Nesting { - case proto.Schema_NestedBlock_SINGLE: - nesting = configschema.NestingSingle - case proto.Schema_NestedBlock_GROUP: - nesting = configschema.NestingGroup - case proto.Schema_NestedBlock_LIST: - nesting = configschema.NestingList - case proto.Schema_NestedBlock_MAP: - nesting = configschema.NestingMap - case proto.Schema_NestedBlock_SET: - nesting = configschema.NestingSet - default: - // In all other cases we'll leave it as the zero value (invalid) and - // let the caller validate it and deal with this. - } - - nb := &configschema.NestedBlock{ - Nesting: nesting, - MinItems: int(b.MinItems), - MaxItems: int(b.MaxItems), - } - - nested := ProtoToConfigSchema(b.Block) - nb.Block = *nested - return nb -} - -// sortedKeys returns the lexically sorted keys from the given map. This is -// used to make schema conversions are deterministic. This panics if map keys -// are not a string. -func sortedKeys(m interface{}) []string { - v := reflect.ValueOf(m) - keys := make([]string, v.Len()) - - mapKeys := v.MapKeys() - for i, k := range mapKeys { - keys[i] = k.Interface().(string) - } - - sort.Strings(keys) - return keys -} diff --git a/internal/plugin/convert/schema_test.go b/internal/plugin/convert/schema_test.go deleted file mode 100644 index 4df254fb765f..000000000000 --- a/internal/plugin/convert/schema_test.go +++ /dev/null @@ -1,361 +0,0 @@ -package convert - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/internal/configs/configschema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/zclconf/go-cty/cty" -) - -var ( - equateEmpty = cmpopts.EquateEmpty() - typeComparer = cmp.Comparer(cty.Type.Equals) - valueComparer = cmp.Comparer(cty.Value.RawEquals) -) - -// Test that we can convert configschema to protobuf types and back again. -func TestConvertSchemaBlocks(t *testing.T) { - tests := map[string]struct { - Block *proto.Schema_Block - Want *configschema.Block - }{ - "attributes": { - &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "computed", - Type: []byte(`["list","bool"]`), - Computed: true, - }, - { - Name: "optional", - Type: []byte(`"string"`), - Optional: true, - }, - { - Name: "optional_computed", - Type: []byte(`["map","bool"]`), - Optional: true, - Computed: true, - }, - { - Name: "required", - Type: []byte(`"number"`), - Required: true, - }, - }, - }, - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "computed": { - Type: cty.List(cty.Bool), - Computed: true, - }, - "optional": { - Type: cty.String, - Optional: true, - }, - "optional_computed": { - Type: cty.Map(cty.Bool), - Optional: true, - Computed: true, - }, - "required": { - Type: cty.Number, - Required: true, - }, - }, - }, - }, - "blocks": { - &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "list", - Nesting: proto.Schema_NestedBlock_LIST, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "map", - Nesting: proto.Schema_NestedBlock_MAP, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "set", - Nesting: proto.Schema_NestedBlock_SET, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "single", - Nesting: proto.Schema_NestedBlock_SINGLE, - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "foo", - Type: []byte(`"dynamic"`), - Required: true, - }, - }, - }, - }, - }, - }, - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": &configschema.NestedBlock{ - Nesting: configschema.NestingList, - }, - "map": &configschema.NestedBlock{ - Nesting: configschema.NestingMap, - }, - "set": &configschema.NestedBlock{ - Nesting: configschema.NestingSet, - }, - "single": &configschema.NestedBlock{ - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.DynamicPseudoType, - Required: true, - }, - }, - }, - }, - }, - }, - }, - "deep block nesting": { - &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "single", - Nesting: proto.Schema_NestedBlock_SINGLE, - Block: &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "list", - Nesting: proto.Schema_NestedBlock_LIST, - Block: &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "set", - Nesting: proto.Schema_NestedBlock_SET, - Block: &proto.Schema_Block{}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "single": &configschema.NestedBlock{ - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": &configschema.NestedBlock{ - Nesting: configschema.NestingList, - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "set": &configschema.NestedBlock{ - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - converted := ProtoToConfigSchema(tc.Block) - if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty)) - } - }) - } -} - -// Test that we can convert configschema to protobuf types and back again. -func TestConvertProtoSchemaBlocks(t *testing.T) { - tests := map[string]struct { - Want *proto.Schema_Block - Block *configschema.Block - }{ - "attributes": { - &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "computed", - Type: []byte(`["list","bool"]`), - Computed: true, - }, - { - Name: "optional", - Type: []byte(`"string"`), - Optional: true, - }, - { - Name: "optional_computed", - Type: []byte(`["map","bool"]`), - Optional: true, - Computed: true, - }, - { - Name: "required", - Type: []byte(`"number"`), - Required: true, - }, - }, - }, - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "computed": { - Type: cty.List(cty.Bool), - Computed: true, - }, - "optional": { - Type: cty.String, - Optional: true, - }, - "optional_computed": { - Type: cty.Map(cty.Bool), - Optional: true, - Computed: true, - }, - "required": { - Type: cty.Number, - Required: true, - }, - }, - }, - }, - "blocks": { - &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "list", - Nesting: proto.Schema_NestedBlock_LIST, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "map", - Nesting: proto.Schema_NestedBlock_MAP, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "set", - Nesting: proto.Schema_NestedBlock_SET, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "single", - Nesting: proto.Schema_NestedBlock_SINGLE, - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "foo", - Type: []byte(`"dynamic"`), - Required: true, - }, - }, - }, - }, - }, - }, - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": &configschema.NestedBlock{ - Nesting: configschema.NestingList, - }, - "map": &configschema.NestedBlock{ - Nesting: configschema.NestingMap, - }, - "set": &configschema.NestedBlock{ - Nesting: configschema.NestingSet, - }, - "single": &configschema.NestedBlock{ - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.DynamicPseudoType, - Required: true, - }, - }, - }, - }, - }, - }, - }, - "deep block nesting": { - &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "single", - Nesting: proto.Schema_NestedBlock_SINGLE, - Block: &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "list", - Nesting: proto.Schema_NestedBlock_LIST, - Block: &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "set", - Nesting: proto.Schema_NestedBlock_SET, - Block: &proto.Schema_Block{}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "single": &configschema.NestedBlock{ - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": &configschema.NestedBlock{ - Nesting: configschema.NestingList, - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "set": &configschema.NestedBlock{ - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - converted := ConfigSchemaToProto(tc.Block) - if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) { - t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported)) - } - }) - } -} diff --git a/internal/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v1.0.0.exe b/internal/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v1.0.0.exe deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/internal/plugin/grpc_error.go b/internal/plugin/grpc_error.go deleted file mode 100644 index 0f638b7fa455..000000000000 --- a/internal/plugin/grpc_error.go +++ /dev/null @@ -1,74 +0,0 @@ -package plugin - -import ( - "fmt" - "path" - "runtime" - - "github.com/hashicorp/terraform/internal/tfdiags" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// grpcErr extracts some known error types and formats them into better -// representations for core. This must only be called from plugin methods. -// Since we don't use RPC status errors for the plugin protocol, these do not -// contain any useful details, and we can return some text that at least -// indicates the plugin call and possible error condition. -func grpcErr(err error) (diags tfdiags.Diagnostics) { - if err == nil { - return - } - - // extract the method name from the caller. - pc, _, _, ok := runtime.Caller(1) - if !ok { - logger.Error("unknown grpc call", "error", err) - return diags.Append(err) - } - - f := runtime.FuncForPC(pc) - - // Function names will contain the full import path. Take the last - // segment, which will let users know which method was being called. - _, requestName := path.Split(f.Name()) - - // Here we can at least correlate the error in the logs to a particular binary. - logger.Error(requestName, "error", err) - - // TODO: while this expands the error codes into somewhat better messages, - // this still does not easily link the error to an actual user-recognizable - // plugin. The grpc plugin does not know its configured name, and the - // errors are in a list of diagnostics, making it hard for the caller to - // annotate the returned errors. - switch status.Code(err) { - case codes.Unavailable: - // This case is when the plugin has stopped running for some reason, - // and is usually the result of a crash. - diags = diags.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "Plugin did not respond", - fmt.Sprintf("The plugin encountered an error, and failed to respond to the %s call. "+ - "The plugin logs may contain more details.", requestName), - )) - case codes.Canceled: - diags = diags.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "Request cancelled", - fmt.Sprintf("The %s request was cancelled.", requestName), - )) - case codes.Unimplemented: - diags = diags.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "Unsupported plugin method", - fmt.Sprintf("The %s method is not supported by this plugin.", requestName), - )) - default: - diags = diags.Append(tfdiags.WholeContainingBody( - tfdiags.Error, - "Plugin error", - fmt.Sprintf("The plugin returned an unexpected error from %s: %v", requestName, err), - )) - } - return -} diff --git a/internal/plugin/grpc_provider.go b/internal/plugin/grpc_provider.go deleted file mode 100644 index 2c4f2c036027..000000000000 --- a/internal/plugin/grpc_provider.go +++ /dev/null @@ -1,697 +0,0 @@ -package plugin - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/plugin/convert" - "github.com/hashicorp/terraform/internal/providers" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/zclconf/go-cty/cty/msgpack" - "google.golang.org/grpc" -) - -var logger = logging.HCLogger() - -// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. -type GRPCProviderPlugin struct { - plugin.Plugin - GRPCProvider func() proto.ProviderServer -} - -func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCProvider{ - client: proto.NewProviderClient(c), - ctx: ctx, - }, nil -} - -func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterProviderServer(s, p.GRPCProvider()) - return nil -} - -// GRPCProvider handles the client, or core side of the plugin rpc connection. -// The GRPCProvider methods are mostly a translation layer between the -// terraform providers types and the grpc proto types, directly converting -// between the two. -type GRPCProvider struct { - // PluginClient provides a reference to the plugin.Client which controls the plugin process. - // This allows the GRPCProvider a way to shutdown the plugin process. - PluginClient *plugin.Client - - // TestServer contains a grpc.Server to close when the GRPCProvider is being - // used in an end to end test of a provider. - TestServer *grpc.Server - - // Proto client use to make the grpc service calls. - client proto.ProviderClient - - // this context is created by the plugin package, and is canceled when the - // plugin process ends. - ctx context.Context - - // schema stores the schema for this provider. This is used to properly - // serialize the state for requests. - mu sync.Mutex - schemas providers.GetProviderSchemaResponse -} - -// getSchema is used internally to get the cached provider schema -func (p *GRPCProvider) getSchema() providers.GetProviderSchemaResponse { - p.mu.Lock() - // unlock inline in case GetSchema needs to be called - if p.schemas.Provider.Block != nil { - p.mu.Unlock() - return p.schemas - } - p.mu.Unlock() - - return p.GetProviderSchema() -} - -func (p *GRPCProvider) GetProviderSchema() (resp providers.GetProviderSchemaResponse) { - logger.Trace("GRPCProvider: GetProviderSchema") - p.mu.Lock() - defer p.mu.Unlock() - - if p.schemas.Provider.Block != nil { - return p.schemas - } - - resp.ResourceTypes = make(map[string]providers.Schema) - resp.DataSources = make(map[string]providers.Schema) - - // Some providers may generate quite large schemas, and the internal default - // grpc response size limit is 4MB. 64MB should cover most any use case, and - // if we get providers nearing that we may want to consider a finer-grained - // API to fetch individual resource schemas. - // Note: this option is marked as EXPERIMENTAL in the grpc API. We keep - // this for compatibility, but recent providers all set the max message - // size much higher on the server side, which is the supported method for - // determining payload size. - const maxRecvSize = 64 << 20 - protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - if resp.Diagnostics.HasErrors() { - return resp - } - - if protoResp.Provider == nil { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) - return resp - } - - resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) - if protoResp.ProviderMeta == nil { - logger.Debug("No provider meta schema returned") - } else { - resp.ProviderMeta = convert.ProtoToProviderSchema(protoResp.ProviderMeta) - } - - for name, res := range protoResp.ResourceSchemas { - resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) - } - - for name, data := range protoResp.DataSourceSchemas { - resp.DataSources[name] = convert.ProtoToProviderSchema(data) - } - - if protoResp.ServerCapabilities != nil { - resp.ServerCapabilities.PlanDestroy = protoResp.ServerCapabilities.PlanDestroy - } - - p.schemas = resp - - return resp -} - -func (p *GRPCProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - logger.Trace("GRPCProvider: ValidateProviderConfig") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - ty := schema.Provider.Block.ImpliedType() - - mp, err := msgpack.Marshal(r.Config, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.PrepareProviderConfig_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - - config, err := decodeDynamicValue(protoResp.PreparedConfig, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.PreparedConfig = config - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { - logger.Trace("GRPCProvider: ValidateResourceConfig") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resourceSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - return resp - } - - mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateResourceTypeConfig_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { - logger.Trace("GRPCProvider: ValidateDataResourceConfig") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - dataSchema, ok := schema.DataSources[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) - return resp - } - - mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateDataSourceConfig_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - logger.Trace("GRPCProvider: UpgradeResourceState") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - return resp - } - - protoReq := &proto.UpgradeResourceState_Request{ - TypeName: r.TypeName, - Version: int64(r.Version), - RawState: &proto.RawState{ - Json: r.RawStateJSON, - Flatmap: r.RawStateFlatmap, - }, - } - - protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - ty := resSchema.Block.ImpliedType() - resp.UpgradedState = cty.NullVal(ty) - if protoResp.UpgradedState == nil { - return resp - } - - state, err := decodeDynamicValue(protoResp.UpgradedState, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = state - - return resp -} - -func (p *GRPCProvider) ConfigureProvider(r providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - logger.Trace("GRPCProvider: ConfigureProvider") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - var mp []byte - - // we don't have anything to marshal if there's no config - mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.Configure_Request{ - TerraformVersion: r.TerraformVersion, - Config: &proto.DynamicValue{ - Msgpack: mp, - }, - } - - protoResp, err := p.client.Configure(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) Stop() error { - logger.Trace("GRPCProvider: Stop") - - resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) - if err != nil { - return err - } - - if resp.Error != "" { - return errors.New(resp.Error) - } - return nil -} - -func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - logger.Trace("GRPCProvider: ReadResource") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type " + r.TypeName)) - return resp - } - - metaSchema := schema.ProviderMeta - - mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ReadResource_Request{ - TypeName: r.TypeName, - CurrentState: &proto.DynamicValue{Msgpack: mp}, - Private: r.Private, - } - - if metaSchema.Block != nil { - metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} - } - - protoResp, err := p.client.ReadResource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.NewState = state - resp.Private = protoResp.Private - - return resp -} - -func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - logger.Trace("GRPCProvider: PlanResourceChange") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - return resp - } - - metaSchema := schema.ProviderMeta - capabilities := schema.ServerCapabilities - - // If the provider doesn't support planning a destroy operation, we can - // return immediately. - if r.ProposedNewState.IsNull() && !capabilities.PlanDestroy { - resp.PlannedState = r.ProposedNewState - resp.PlannedPrivate = r.PriorPrivate - return resp - } - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.PlanResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto.DynamicValue{Msgpack: priorMP}, - Config: &proto.DynamicValue{Msgpack: configMP}, - ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, - PriorPrivate: r.PriorPrivate, - } - - if metaSchema.Block != nil { - metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} - } - - protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state, err := decodeDynamicValue(protoResp.PlannedState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.PlannedState = state - - for _, p := range protoResp.RequiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) - } - - resp.PlannedPrivate = protoResp.PlannedPrivate - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - logger.Trace("GRPCProvider: ApplyResourceChange") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - return resp - } - - metaSchema := schema.ProviderMeta - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ApplyResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto.DynamicValue{Msgpack: priorMP}, - PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, - Config: &proto.DynamicValue{Msgpack: configMP}, - PlannedPrivate: r.PlannedPrivate, - } - - if metaSchema.Block != nil { - metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} - } - - protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - resp.Private = protoResp.Private - - state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.NewState = state - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - logger.Trace("GRPCProvider: ImportResourceState") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - protoReq := &proto.ImportResourceState_Request{ - TypeName: r.TypeName, - Id: r.ID, - } - - protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - for _, imported := range protoResp.ImportedResources { - resource := providers.ImportedResource{ - TypeName: imported.TypeName, - Private: imported.Private, - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - continue - } - - state, err := decodeDynamicValue(imported.State, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resource.State = state - resp.ImportedResources = append(resp.ImportedResources, resource) - } - - return resp -} - -func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - logger.Trace("GRPCProvider: ReadDataSource") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - dataSchema, ok := schema.DataSources[r.TypeName] - if !ok { - schema.Diagnostics = schema.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) - } - - metaSchema := schema.ProviderMeta - - config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ReadDataSource_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{ - Msgpack: config, - }, - } - - if metaSchema.Block != nil { - metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} - } - - protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state, err := decodeDynamicValue(protoResp.State, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.State = state - - return resp -} - -// closing the grpc connection is final, and terraform will call it at the end of every phase. -func (p *GRPCProvider) Close() error { - logger.Trace("GRPCProvider: Close") - - // Make sure to stop the server if we're not running within go-plugin. - if p.TestServer != nil { - p.TestServer.Stop() - } - - // Check this since it's not automatically inserted during plugin creation. - // It's currently only inserted by the command package, because that is - // where the factory is built and is the only point with access to the - // plugin.Client. - if p.PluginClient == nil { - logger.Debug("provider has no plugin.Client") - return nil - } - - p.PluginClient.Kill() - return nil -} - -// Decode a DynamicValue from either the JSON or MsgPack encoding. -func decodeDynamicValue(v *proto.DynamicValue, ty cty.Type) (cty.Value, error) { - // always return a valid value - var err error - res := cty.NullVal(ty) - if v == nil { - return res, nil - } - - switch { - case len(v.Msgpack) > 0: - res, err = msgpack.Unmarshal(v.Msgpack, ty) - case len(v.Json) > 0: - res, err = ctyjson.Unmarshal(v.Json, ty) - } - return res, err -} diff --git a/internal/plugin/grpc_provider_test.go b/internal/plugin/grpc_provider_test.go deleted file mode 100644 index 4e8d2f6c9207..000000000000 --- a/internal/plugin/grpc_provider_test.go +++ /dev/null @@ -1,777 +0,0 @@ -package plugin - -import ( - "bytes" - "fmt" - "testing" - - "github.com/golang/mock/gomock" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - mockproto "github.com/hashicorp/terraform/internal/plugin/mock_proto" - proto "github.com/hashicorp/terraform/internal/tfplugin5" -) - -var _ providers.Interface = (*GRPCProvider)(nil) - -func mockProviderClient(t *testing.T) *mockproto.MockProviderClient { - ctrl := gomock.NewController(t) - client := mockproto.NewMockProviderClient(ctrl) - - // we always need a GetSchema method - client.EXPECT().GetSchema( - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(providerProtoSchema(), nil) - - return client -} - -func checkDiags(t *testing.T, d tfdiags.Diagnostics) { - t.Helper() - if d.HasErrors() { - t.Fatal(d.Err()) - } -} - -// checkDiagsHasError ensures error diagnostics are present or fails the test. -func checkDiagsHasError(t *testing.T, d tfdiags.Diagnostics) { - t.Helper() - - if !d.HasErrors() { - t.Fatal("expected error diagnostics") - } -} - -func providerProtoSchema() *proto.GetProviderSchema_Response { - return &proto.GetProviderSchema_Response{ - Provider: &proto.Schema{ - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "attr", - Type: []byte(`"string"`), - Required: true, - }, - }, - }, - }, - ResourceSchemas: map[string]*proto.Schema{ - "resource": &proto.Schema{ - Version: 1, - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "attr", - Type: []byte(`"string"`), - Required: true, - }, - }, - }, - }, - }, - DataSourceSchemas: map[string]*proto.Schema{ - "data": &proto.Schema{ - Version: 1, - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "attr", - Type: []byte(`"string"`), - Required: true, - }, - }, - }, - }, - }, - } -} - -func TestGRPCProvider_GetSchema(t *testing.T) { - p := &GRPCProvider{ - client: mockProviderClient(t), - } - - resp := p.GetProviderSchema() - checkDiags(t, resp.Diagnostics) -} - -// Ensure that gRPC errors are returned early. -// Reference: https://github.com/hashicorp/terraform/issues/31047 -func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) { - ctrl := gomock.NewController(t) - client := mockproto.NewMockProviderClient(ctrl) - - client.EXPECT().GetSchema( - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(&proto.GetProviderSchema_Response{}, fmt.Errorf("test error")) - - p := &GRPCProvider{ - client: client, - } - - resp := p.GetProviderSchema() - - checkDiagsHasError(t, resp.Diagnostics) -} - -// Ensure that provider error diagnostics are returned early. -// Reference: https://github.com/hashicorp/terraform/issues/31047 -func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) { - ctrl := gomock.NewController(t) - client := mockproto.NewMockProviderClient(ctrl) - - client.EXPECT().GetSchema( - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(&proto.GetProviderSchema_Response{ - Diagnostics: []*proto.Diagnostic{ - { - Severity: proto.Diagnostic_ERROR, - Summary: "error summary", - Detail: "error detail", - }, - }, - // Trigger potential panics - Provider: &proto.Schema{}, - }, nil) - - p := &GRPCProvider{ - client: client, - } - - resp := p.GetProviderSchema() - - checkDiagsHasError(t, resp.Diagnostics) -} - -func TestGRPCProvider_PrepareProviderConfig(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().PrepareProviderConfig( - gomock.Any(), - gomock.Any(), - ).Return(&proto.PrepareProviderConfig_Response{}, nil) - - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) - resp := p.ValidateProviderConfig(providers.ValidateProviderConfigRequest{Config: cfg}) - checkDiags(t, resp.Diagnostics) -} - -func TestGRPCProvider_ValidateResourceConfig(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ValidateResourceTypeConfig( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ValidateResourceTypeConfig_Response{}, nil) - - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) - resp := p.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ - TypeName: "resource", - Config: cfg, - }) - checkDiags(t, resp.Diagnostics) -} - -func TestGRPCProvider_ValidateDataSourceConfig(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ValidateDataSourceConfig( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ValidateDataSourceConfig_Response{}, nil) - - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) - resp := p.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ - TypeName: "data", - Config: cfg, - }) - checkDiags(t, resp.Diagnostics) -} - -func TestGRPCProvider_UpgradeResourceState(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().UpgradeResourceState( - gomock.Any(), - gomock.Any(), - ).Return(&proto.UpgradeResourceState_Response{ - UpgradedState: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - }, nil) - - resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ - TypeName: "resource", - Version: 0, - RawStateJSON: []byte(`{"old_attr":"bar"}`), - }) - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().UpgradeResourceState( - gomock.Any(), - gomock.Any(), - ).Return(&proto.UpgradeResourceState_Response{ - UpgradedState: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - }, nil) - - resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ - TypeName: "resource", - Version: 0, - RawStateJSON: []byte(`{"old_attr":"bar"}`), - }) - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_Configure(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().Configure( - gomock.Any(), - gomock.Any(), - ).Return(&proto.Configure_Response{}, nil) - - resp := p.ConfigureProvider(providers.ConfigureProviderRequest{ - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - checkDiags(t, resp.Diagnostics) -} - -func TestGRPCProvider_Stop(t *testing.T) { - ctrl := gomock.NewController(t) - client := mockproto.NewMockProviderClient(ctrl) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().Stop( - gomock.Any(), - gomock.Any(), - ).Return(&proto.Stop_Response{}, nil) - - err := p.Stop() - if err != nil { - t.Fatal(err) - } -} - -func TestGRPCProvider_ReadResource(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadResource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadResource_Response{ - NewState: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - }, nil) - - resp := p.ReadResource(providers.ReadResourceRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_ReadResourceJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadResource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadResource_Response{ - NewState: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - }, nil) - - resp := p.ReadResource(providers.ReadResourceRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_ReadEmptyJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadResource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadResource_Response{ - NewState: &proto.DynamicValue{ - Json: []byte(``), - }, - }, nil) - - obj := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }) - resp := p.ReadResource(providers.ReadResourceRequest{ - TypeName: "resource", - PriorState: obj, - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.NullVal(obj.Type()) - - if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_PlanResourceChange(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().PlanResourceChange( - gomock.Any(), - gomock.Any(), - ).Return(&proto.PlanResourceChange_Response{ - PlannedState: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - RequiresReplace: []*proto.AttributePath{ - { - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - }, - }, - }, - PlannedPrivate: expectedPrivate, - }, nil) - - resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - ProposedNewState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expectedState := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) - } - - expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` - replace := fmt.Sprintf("%#v", resp.RequiresReplace) - if expectedReplace != replace { - t.Fatalf("expected %q, got %q", expectedReplace, replace) - } - - if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { - t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) - } -} - -func TestGRPCProvider_PlanResourceChangeJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().PlanResourceChange( - gomock.Any(), - gomock.Any(), - ).Return(&proto.PlanResourceChange_Response{ - PlannedState: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - RequiresReplace: []*proto.AttributePath{ - { - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - }, - }, - }, - PlannedPrivate: expectedPrivate, - }, nil) - - resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - ProposedNewState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expectedState := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) - } - - expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` - replace := fmt.Sprintf("%#v", resp.RequiresReplace) - if expectedReplace != replace { - t.Fatalf("expected %q, got %q", expectedReplace, replace) - } - - if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { - t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) - } -} - -func TestGRPCProvider_ApplyResourceChange(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().ApplyResourceChange( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ApplyResourceChange_Response{ - NewState: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - Private: expectedPrivate, - }, nil) - - resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - PlannedPrivate: expectedPrivate, - }) - - checkDiags(t, resp.Diagnostics) - - expectedState := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } - - if !bytes.Equal(expectedPrivate, resp.Private) { - t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) - } -} -func TestGRPCProvider_ApplyResourceChangeJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().ApplyResourceChange( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ApplyResourceChange_Response{ - NewState: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - Private: expectedPrivate, - }, nil) - - resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - PlannedPrivate: expectedPrivate, - }) - - checkDiags(t, resp.Diagnostics) - - expectedState := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } - - if !bytes.Equal(expectedPrivate, resp.Private) { - t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) - } -} - -func TestGRPCProvider_ImportResourceState(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().ImportResourceState( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ImportResourceState_Response{ - ImportedResources: []*proto.ImportResourceState_ImportedResource{ - { - TypeName: "resource", - State: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - Private: expectedPrivate, - }, - }, - }, nil) - - resp := p.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: "resource", - ID: "foo", - }) - - checkDiags(t, resp.Diagnostics) - - expectedResource := providers.ImportedResource{ - TypeName: "resource", - State: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Private: expectedPrivate, - } - - imported := resp.ImportedResources[0] - if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) - } -} -func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().ImportResourceState( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ImportResourceState_Response{ - ImportedResources: []*proto.ImportResourceState_ImportedResource{ - { - TypeName: "resource", - State: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - Private: expectedPrivate, - }, - }, - }, nil) - - resp := p.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: "resource", - ID: "foo", - }) - - checkDiags(t, resp.Diagnostics) - - expectedResource := providers.ImportedResource{ - TypeName: "resource", - State: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Private: expectedPrivate, - } - - imported := resp.ImportedResources[0] - if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_ReadDataSource(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadDataSource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadDataSource_Response{ - State: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - }, nil) - - resp := p.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: "data", - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadDataSource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadDataSource_Response{ - State: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - }, nil) - - resp := p.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: "data", - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) - } -} diff --git a/internal/plugin/mock_proto/generate.go b/internal/plugin/mock_proto/generate.go deleted file mode 100644 index 6f004ffd362a..000000000000 --- a/internal/plugin/mock_proto/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/hashicorp/terraform/internal/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer - -package mock_tfplugin5 diff --git a/internal/plugin/mock_proto/mock.go b/internal/plugin/mock_proto/mock.go deleted file mode 100644 index 054fe1cd821a..000000000000 --- a/internal/plugin/mock_proto/mock.go +++ /dev/null @@ -1,623 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/hashicorp/terraform/internal/tfplugin5 (interfaces: ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer) - -// Package mock_tfplugin5 is a generated GoMock package. -package mock_tfplugin5 - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - tfplugin5 "github.com/hashicorp/terraform/internal/tfplugin5" - grpc "google.golang.org/grpc" - metadata "google.golang.org/grpc/metadata" -) - -// MockProviderClient is a mock of ProviderClient interface. -type MockProviderClient struct { - ctrl *gomock.Controller - recorder *MockProviderClientMockRecorder -} - -// MockProviderClientMockRecorder is the mock recorder for MockProviderClient. -type MockProviderClientMockRecorder struct { - mock *MockProviderClient -} - -// NewMockProviderClient creates a new mock instance. -func NewMockProviderClient(ctrl *gomock.Controller) *MockProviderClient { - mock := &MockProviderClient{ctrl: ctrl} - mock.recorder = &MockProviderClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder { - return m.recorder -} - -// ApplyResourceChange mocks base method. -func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin5.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.ApplyResourceChange_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ApplyResourceChange", varargs...) - ret0, _ := ret[0].(*tfplugin5.ApplyResourceChange_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ApplyResourceChange indicates an expected call of ApplyResourceChange. -func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyResourceChange", reflect.TypeOf((*MockProviderClient)(nil).ApplyResourceChange), varargs...) -} - -// Configure mocks base method. -func (m *MockProviderClient) Configure(arg0 context.Context, arg1 *tfplugin5.Configure_Request, arg2 ...grpc.CallOption) (*tfplugin5.Configure_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Configure", varargs...) - ret0, _ := ret[0].(*tfplugin5.Configure_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Configure indicates an expected call of Configure. -func (mr *MockProviderClientMockRecorder) Configure(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Configure", reflect.TypeOf((*MockProviderClient)(nil).Configure), varargs...) -} - -// GetSchema mocks base method. -func (m *MockProviderClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProviderSchema_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetSchema", varargs...) - ret0, _ := ret[0].(*tfplugin5.GetProviderSchema_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSchema indicates an expected call of GetSchema. -func (mr *MockProviderClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockProviderClient)(nil).GetSchema), varargs...) -} - -// ImportResourceState mocks base method. -func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin5.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.ImportResourceState_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ImportResourceState", varargs...) - ret0, _ := ret[0].(*tfplugin5.ImportResourceState_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImportResourceState indicates an expected call of ImportResourceState. -func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportResourceState", reflect.TypeOf((*MockProviderClient)(nil).ImportResourceState), varargs...) -} - -// PlanResourceChange mocks base method. -func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin5.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.PlanResourceChange_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PlanResourceChange", varargs...) - ret0, _ := ret[0].(*tfplugin5.PlanResourceChange_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PlanResourceChange indicates an expected call of PlanResourceChange. -func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlanResourceChange", reflect.TypeOf((*MockProviderClient)(nil).PlanResourceChange), varargs...) -} - -// PrepareProviderConfig mocks base method. -func (m *MockProviderClient) PrepareProviderConfig(arg0 context.Context, arg1 *tfplugin5.PrepareProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.PrepareProviderConfig_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PrepareProviderConfig", varargs...) - ret0, _ := ret[0].(*tfplugin5.PrepareProviderConfig_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PrepareProviderConfig indicates an expected call of PrepareProviderConfig. -func (mr *MockProviderClientMockRecorder) PrepareProviderConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareProviderConfig", reflect.TypeOf((*MockProviderClient)(nil).PrepareProviderConfig), varargs...) -} - -// ReadDataSource mocks base method. -func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin5.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadDataSource_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ReadDataSource", varargs...) - ret0, _ := ret[0].(*tfplugin5.ReadDataSource_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadDataSource indicates an expected call of ReadDataSource. -func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDataSource", reflect.TypeOf((*MockProviderClient)(nil).ReadDataSource), varargs...) -} - -// ReadResource mocks base method. -func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin5.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadResource_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ReadResource", varargs...) - ret0, _ := ret[0].(*tfplugin5.ReadResource_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadResource indicates an expected call of ReadResource. -func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadResource", reflect.TypeOf((*MockProviderClient)(nil).ReadResource), varargs...) -} - -// Stop mocks base method. -func (m *MockProviderClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Stop", varargs...) - ret0, _ := ret[0].(*tfplugin5.Stop_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Stop indicates an expected call of Stop. -func (mr *MockProviderClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProviderClient)(nil).Stop), varargs...) -} - -// UpgradeResourceState mocks base method. -func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin5.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.UpgradeResourceState_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UpgradeResourceState", varargs...) - ret0, _ := ret[0].(*tfplugin5.UpgradeResourceState_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpgradeResourceState indicates an expected call of UpgradeResourceState. -func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeResourceState", reflect.TypeOf((*MockProviderClient)(nil).UpgradeResourceState), varargs...) -} - -// ValidateDataSourceConfig mocks base method. -func (m *MockProviderClient) ValidateDataSourceConfig(arg0 context.Context, arg1 *tfplugin5.ValidateDataSourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateDataSourceConfig_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ValidateDataSourceConfig", varargs...) - ret0, _ := ret[0].(*tfplugin5.ValidateDataSourceConfig_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ValidateDataSourceConfig indicates an expected call of ValidateDataSourceConfig. -func (mr *MockProviderClientMockRecorder) ValidateDataSourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDataSourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateDataSourceConfig), varargs...) -} - -// ValidateResourceTypeConfig mocks base method. -func (m *MockProviderClient) ValidateResourceTypeConfig(arg0 context.Context, arg1 *tfplugin5.ValidateResourceTypeConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ValidateResourceTypeConfig", varargs...) - ret0, _ := ret[0].(*tfplugin5.ValidateResourceTypeConfig_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ValidateResourceTypeConfig indicates an expected call of ValidateResourceTypeConfig. -func (mr *MockProviderClientMockRecorder) ValidateResourceTypeConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourceTypeConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateResourceTypeConfig), varargs...) -} - -// MockProvisionerClient is a mock of ProvisionerClient interface. -type MockProvisionerClient struct { - ctrl *gomock.Controller - recorder *MockProvisionerClientMockRecorder -} - -// MockProvisionerClientMockRecorder is the mock recorder for MockProvisionerClient. -type MockProvisionerClientMockRecorder struct { - mock *MockProvisionerClient -} - -// NewMockProvisionerClient creates a new mock instance. -func NewMockProvisionerClient(ctrl *gomock.Controller) *MockProvisionerClient { - mock := &MockProvisionerClient{ctrl: ctrl} - mock.recorder = &MockProvisionerClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockProvisionerClient) EXPECT() *MockProvisionerClientMockRecorder { - return m.recorder -} - -// GetSchema mocks base method. -func (m *MockProvisionerClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProvisionerSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProvisionerSchema_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetSchema", varargs...) - ret0, _ := ret[0].(*tfplugin5.GetProvisionerSchema_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSchema indicates an expected call of GetSchema. -func (mr *MockProvisionerClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockProvisionerClient)(nil).GetSchema), varargs...) -} - -// ProvisionResource mocks base method. -func (m *MockProvisionerClient) ProvisionResource(arg0 context.Context, arg1 *tfplugin5.ProvisionResource_Request, arg2 ...grpc.CallOption) (tfplugin5.Provisioner_ProvisionResourceClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ProvisionResource", varargs...) - ret0, _ := ret[0].(tfplugin5.Provisioner_ProvisionResourceClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ProvisionResource indicates an expected call of ProvisionResource. -func (mr *MockProvisionerClientMockRecorder) ProvisionResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionResource", reflect.TypeOf((*MockProvisionerClient)(nil).ProvisionResource), varargs...) -} - -// Stop mocks base method. -func (m *MockProvisionerClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Stop", varargs...) - ret0, _ := ret[0].(*tfplugin5.Stop_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Stop indicates an expected call of Stop. -func (mr *MockProvisionerClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProvisionerClient)(nil).Stop), varargs...) -} - -// ValidateProvisionerConfig mocks base method. -func (m *MockProvisionerClient) ValidateProvisionerConfig(arg0 context.Context, arg1 *tfplugin5.ValidateProvisionerConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateProvisionerConfig_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ValidateProvisionerConfig", varargs...) - ret0, _ := ret[0].(*tfplugin5.ValidateProvisionerConfig_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ValidateProvisionerConfig indicates an expected call of ValidateProvisionerConfig. -func (mr *MockProvisionerClientMockRecorder) ValidateProvisionerConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateProvisionerConfig", reflect.TypeOf((*MockProvisionerClient)(nil).ValidateProvisionerConfig), varargs...) -} - -// MockProvisioner_ProvisionResourceClient is a mock of Provisioner_ProvisionResourceClient interface. -type MockProvisioner_ProvisionResourceClient struct { - ctrl *gomock.Controller - recorder *MockProvisioner_ProvisionResourceClientMockRecorder -} - -// MockProvisioner_ProvisionResourceClientMockRecorder is the mock recorder for MockProvisioner_ProvisionResourceClient. -type MockProvisioner_ProvisionResourceClientMockRecorder struct { - mock *MockProvisioner_ProvisionResourceClient -} - -// NewMockProvisioner_ProvisionResourceClient creates a new mock instance. -func NewMockProvisioner_ProvisionResourceClient(ctrl *gomock.Controller) *MockProvisioner_ProvisionResourceClient { - mock := &MockProvisioner_ProvisionResourceClient{ctrl: ctrl} - mock.recorder = &MockProvisioner_ProvisionResourceClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockProvisioner_ProvisionResourceClient) EXPECT() *MockProvisioner_ProvisionResourceClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method. -func (m *MockProvisioner_ProvisionResourceClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend. -func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).CloseSend)) -} - -// Context mocks base method. -func (m *MockProvisioner_ProvisionResourceClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Context)) -} - -// Header mocks base method. -func (m *MockProvisioner_ProvisionResourceClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header. -func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Header)) -} - -// Recv mocks base method. -func (m *MockProvisioner_ProvisionResourceClient) Recv() (*tfplugin5.ProvisionResource_Response, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*tfplugin5.ProvisionResource_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m *MockProvisioner_ProvisionResourceClient) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).RecvMsg), arg0) -} - -// SendMsg mocks base method. -func (m *MockProvisioner_ProvisionResourceClient) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).SendMsg), arg0) -} - -// Trailer mocks base method. -func (m *MockProvisioner_ProvisionResourceClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer. -func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Trailer)) -} - -// MockProvisioner_ProvisionResourceServer is a mock of Provisioner_ProvisionResourceServer interface. -type MockProvisioner_ProvisionResourceServer struct { - ctrl *gomock.Controller - recorder *MockProvisioner_ProvisionResourceServerMockRecorder -} - -// MockProvisioner_ProvisionResourceServerMockRecorder is the mock recorder for MockProvisioner_ProvisionResourceServer. -type MockProvisioner_ProvisionResourceServerMockRecorder struct { - mock *MockProvisioner_ProvisionResourceServer -} - -// NewMockProvisioner_ProvisionResourceServer creates a new mock instance. -func NewMockProvisioner_ProvisionResourceServer(ctrl *gomock.Controller) *MockProvisioner_ProvisionResourceServer { - mock := &MockProvisioner_ProvisionResourceServer{ctrl: ctrl} - mock.recorder = &MockProvisioner_ProvisionResourceServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockProvisioner_ProvisionResourceServer) EXPECT() *MockProvisioner_ProvisionResourceServerMockRecorder { - return m.recorder -} - -// Context mocks base method. -func (m *MockProvisioner_ProvisionResourceServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).Context)) -} - -// RecvMsg mocks base method. -func (m *MockProvisioner_ProvisionResourceServer) RecvMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RecvMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).RecvMsg), arg0) -} - -// Send mocks base method. -func (m *MockProvisioner_ProvisionResourceServer) Send(arg0 *tfplugin5.ProvisionResource_Response) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).Send), arg0) -} - -// SendHeader mocks base method. -func (m *MockProvisioner_ProvisionResourceServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader. -func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method. -func (m *MockProvisioner_ProvisionResourceServer) SendMsg(arg0 interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendMsg", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SendMsg), arg0) -} - -// SetHeader mocks base method. -func (m *MockProvisioner_ProvisionResourceServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader. -func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method. -func (m *MockProvisioner_ProvisionResourceServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer. -func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SetTrailer), arg0) -} diff --git a/internal/plugin/serve.go b/internal/plugin/serve.go deleted file mode 100644 index 27d3c9e6d46e..000000000000 --- a/internal/plugin/serve.go +++ /dev/null @@ -1,75 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/go-plugin" - proto "github.com/hashicorp/terraform/internal/tfplugin5" -) - -const ( - // The constants below are the names of the plugins that can be dispensed - // from the plugin server. - ProviderPluginName = "provider" - ProvisionerPluginName = "provisioner" - - // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify - // a particular version during their handshake. This is the version used when Terraform 0.10 - // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must - // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and - // 0.11. - DefaultProtocolVersion = 4 -) - -// Handshake is the HandshakeConfig used to configure clients and servers. -var Handshake = plugin.HandshakeConfig{ - // The ProtocolVersion is the version that must match between TF core - // and TF plugins. This should be bumped whenever a change happens in - // one or the other that makes it so that they can't safely communicate. - // This could be adding a new interface value, it could be how - // helper/schema computes diffs, etc. - ProtocolVersion: DefaultProtocolVersion, - - // The magic cookie values should NEVER be changed. - MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", - MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", -} - -type GRPCProviderFunc func() proto.ProviderServer -type GRPCProvisionerFunc func() proto.ProvisionerServer - -// ServeOpts are the configurations to serve a plugin. -type ServeOpts struct { - // Wrapped versions of the above plugins will automatically shimmed and - // added to the GRPC functions when possible. - GRPCProviderFunc GRPCProviderFunc - GRPCProvisionerFunc GRPCProvisionerFunc -} - -// Serve serves a plugin. This function never returns and should be the final -// function called in the main function of the plugin. -func Serve(opts *ServeOpts) { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - VersionedPlugins: pluginSet(opts), - GRPCServer: plugin.DefaultGRPCServer, - }) -} - -func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { - plugins := map[int]plugin.PluginSet{} - - // add the new protocol versions if they're configured - if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil { - plugins[5] = plugin.PluginSet{} - if opts.GRPCProviderFunc != nil { - plugins[5]["provider"] = &GRPCProviderPlugin{ - GRPCProvider: opts.GRPCProviderFunc, - } - } - if opts.GRPCProvisionerFunc != nil { - plugins[5]["provisioner"] = &GRPCProvisionerPlugin{ - GRPCProvisioner: opts.GRPCProvisionerFunc, - } - } - } - return plugins -} diff --git a/internal/plugin/ui_input.go b/internal/plugin/ui_input.go deleted file mode 100644 index 9a6f00a8c2fc..000000000000 --- a/internal/plugin/ui_input.go +++ /dev/null @@ -1,52 +0,0 @@ -package plugin - -import ( - "context" - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/terraform" -) - -// UIInput is an implementation of terraform.UIInput that communicates -// over RPC. -type UIInput struct { - Client *rpc.Client -} - -func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { - var resp UIInputInputResponse - err := i.Client.Call("Plugin.Input", opts, &resp) - if err != nil { - return "", err - } - if resp.Error != nil { - err = resp.Error - return "", err - } - - return resp.Value, nil -} - -type UIInputInputResponse struct { - Value string - Error *plugin.BasicError -} - -// UIInputServer is a net/rpc compatible structure for serving -// a UIInputServer. This should not be used directly. -type UIInputServer struct { - UIInput terraform.UIInput -} - -func (s *UIInputServer) Input( - opts *terraform.InputOpts, - reply *UIInputInputResponse) error { - value, err := s.UIInput.Input(context.Background(), opts) - *reply = UIInputInputResponse{ - Value: value, - Error: plugin.NewBasicError(err), - } - - return nil -} diff --git a/internal/plugin/ui_input_test.go b/internal/plugin/ui_input_test.go deleted file mode 100644 index 59cb0629a61a..000000000000 --- a/internal/plugin/ui_input_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package plugin - -import ( - "context" - "reflect" - "testing" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/terraform" -) - -func TestUIInput_impl(t *testing.T) { - var _ terraform.UIInput = new(UIInput) -} - -func TestUIInput_input(t *testing.T) { - client, server := plugin.TestRPCConn(t) - defer client.Close() - - i := new(terraform.MockUIInput) - i.InputReturnString = "foo" - - err := server.RegisterName("Plugin", &UIInputServer{ - UIInput: i, - }) - if err != nil { - t.Fatalf("err: %s", err) - } - - input := &UIInput{Client: client} - - opts := &terraform.InputOpts{ - Id: "foo", - } - - v, err := input.Input(context.Background(), opts) - if !i.InputCalled { - t.Fatal("input should be called") - } - if !reflect.DeepEqual(i.InputOpts, opts) { - t.Fatalf("bad: %#v", i.InputOpts) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - - if v != "foo" { - t.Fatalf("bad: %#v", v) - } -} diff --git a/internal/plugin/ui_output.go b/internal/plugin/ui_output.go deleted file mode 100644 index 130bbe30e264..000000000000 --- a/internal/plugin/ui_output.go +++ /dev/null @@ -1,29 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/terraform/internal/terraform" -) - -// UIOutput is an implementatin of terraform.UIOutput that communicates -// over RPC. -type UIOutput struct { - Client *rpc.Client -} - -func (o *UIOutput) Output(v string) { - o.Client.Call("Plugin.Output", v, new(interface{})) -} - -// UIOutputServer is the RPC server for serving UIOutput. -type UIOutputServer struct { - UIOutput terraform.UIOutput -} - -func (s *UIOutputServer) Output( - v string, - reply *interface{}) error { - s.UIOutput.Output(v) - return nil -} diff --git a/internal/plugin6/convert/diagnostics.go b/internal/plugin6/convert/diagnostics.go deleted file mode 100644 index 54058533e739..000000000000 --- a/internal/plugin6/convert/diagnostics.go +++ /dev/null @@ -1,132 +0,0 @@ -package convert - -import ( - "github.com/hashicorp/terraform/internal/tfdiags" - proto "github.com/hashicorp/terraform/internal/tfplugin6" - "github.com/zclconf/go-cty/cty" -) - -// WarnsAndErrorsToProto converts the warnings and errors return by the legacy -// provider to protobuf diagnostics. -func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { - for _, w := range warns { - diags = AppendProtoDiag(diags, w) - } - - for _, e := range errs { - diags = AppendProtoDiag(diags, e) - } - - return diags -} - -// AppendProtoDiag appends a new diagnostic from a warning string or an error. -// This panics if d is not a string or error. -func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { - switch d := d.(type) { - case cty.PathError: - ap := PathToAttributePath(d.Path) - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: d.Error(), - Attribute: ap, - }) - case error: - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: d.Error(), - }) - case string: - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: d, - }) - case *proto.Diagnostic: - diags = append(diags, d) - case []*proto.Diagnostic: - diags = append(diags, d...) - } - return diags -} - -// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. -func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, d := range ds { - var severity tfdiags.Severity - - switch d.Severity { - case proto.Diagnostic_ERROR: - severity = tfdiags.Error - case proto.Diagnostic_WARNING: - severity = tfdiags.Warning - } - - var newDiag tfdiags.Diagnostic - - // if there's an attribute path, we need to create a AttributeValue diagnostic - if d.Attribute != nil { - path := AttributePathToPath(d.Attribute) - newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) - } else { - newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) - } - - diags = diags.Append(newDiag) - } - - return diags -} - -// AttributePathToPath takes the proto encoded path and converts it to a cty.Path -func AttributePathToPath(ap *proto.AttributePath) cty.Path { - var p cty.Path - for _, step := range ap.Steps { - switch selector := step.Selector.(type) { - case *proto.AttributePath_Step_AttributeName: - p = p.GetAttr(selector.AttributeName) - case *proto.AttributePath_Step_ElementKeyString: - p = p.Index(cty.StringVal(selector.ElementKeyString)) - case *proto.AttributePath_Step_ElementKeyInt: - p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) - } - } - return p -} - -// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. -func PathToAttributePath(p cty.Path) *proto.AttributePath { - ap := &proto.AttributePath{} - for _, step := range p { - switch selector := step.(type) { - case cty.GetAttrStep: - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: selector.Name, - }, - }) - case cty.IndexStep: - key := selector.Key - switch key.Type() { - case cty.String: - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: key.AsString(), - }, - }) - case cty.Number: - v, _ := key.AsBigFloat().Int64() - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: v, - }, - }) - default: - // We'll bail early if we encounter anything else, and just - // return the valid prefix. - return ap - } - } - } - return ap -} diff --git a/internal/plugin6/convert/diagnostics_test.go b/internal/plugin6/convert/diagnostics_test.go deleted file mode 100644 index 10088a05f5df..000000000000 --- a/internal/plugin6/convert/diagnostics_test.go +++ /dev/null @@ -1,367 +0,0 @@ -package convert - -import ( - "errors" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/internal/tfdiags" - proto "github.com/hashicorp/terraform/internal/tfplugin6" - "github.com/zclconf/go-cty/cty" -) - -var ignoreUnexported = cmpopts.IgnoreUnexported( - proto.Diagnostic{}, - proto.Schema_Block{}, - proto.Schema_NestedBlock{}, - proto.Schema_Attribute{}, -) - -func TestProtoDiagnostics(t *testing.T) { - diags := WarnsAndErrsToProto( - []string{ - "warning 1", - "warning 2", - }, - []error{ - errors.New("error 1"), - errors.New("error 2"), - }, - ) - - expected := []*proto.Diagnostic{ - { - Severity: proto.Diagnostic_WARNING, - Summary: "warning 1", - }, - { - Severity: proto.Diagnostic_WARNING, - Summary: "warning 2", - }, - { - Severity: proto.Diagnostic_ERROR, - Summary: "error 1", - }, - { - Severity: proto.Diagnostic_ERROR, - Summary: "error 2", - }, - } - - if !cmp.Equal(expected, diags, ignoreUnexported) { - t.Fatal(cmp.Diff(expected, diags, ignoreUnexported)) - } -} - -func TestDiagnostics(t *testing.T) { - type diagFlat struct { - Severity tfdiags.Severity - Attr []interface{} - Summary string - Detail string - } - - tests := map[string]struct { - Cons func([]*proto.Diagnostic) []*proto.Diagnostic - Want []diagFlat - }{ - "nil": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return diags - }, - nil, - }, - "error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "simple error", - }) - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "simple error", - }, - }, - }, - "detailed error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "simple error", - Detail: "detailed error", - }) - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "simple error", - Detail: "detailed error", - }, - }, - }, - "warning": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: "simple warning", - }) - }, - []diagFlat{ - { - Severity: tfdiags.Warning, - Summary: "simple warning", - }, - }, - }, - "detailed warning": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - return append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: "simple warning", - Detail: "detailed warning", - }) - }, - []diagFlat{ - { - Severity: tfdiags.Warning, - Summary: "simple warning", - Detail: "detailed warning", - }, - }, - }, - "multi error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "first error", - }, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "second error", - }) - return diags - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "first error", - }, - { - Severity: tfdiags.Error, - Summary: "second error", - }, - }, - }, - "warning and error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: "warning", - }, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error", - }) - return diags - }, - []diagFlat{ - { - Severity: tfdiags.Warning, - Summary: "warning", - }, - { - Severity: tfdiags.Error, - Summary: "error", - }, - }, - }, - "attr error": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error", - Detail: "error detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attribute_name", - }, - }, - }, - }, - }) - return diags - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "error", - Detail: "error detail", - Attr: []interface{}{"attribute_name"}, - }, - }, - }, - "multi attr": { - func(diags []*proto.Diagnostic) []*proto.Diagnostic { - diags = append(diags, - &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error 1", - Detail: "error 1 detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - }, - }, - }, - &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error 2", - Detail: "error 2 detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "sub", - }, - }, - }, - }, - }, - &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: "warning", - Detail: "warning detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - { - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: 1, - }, - }, - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "sub", - }, - }, - }, - }, - }, - &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "error 3", - Detail: "error 3 detail", - Attribute: &proto.AttributePath{ - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - { - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: "idx", - }, - }, - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "sub", - }, - }, - }, - }, - }, - ) - - return diags - }, - []diagFlat{ - { - Severity: tfdiags.Error, - Summary: "error 1", - Detail: "error 1 detail", - Attr: []interface{}{"attr"}, - }, - { - Severity: tfdiags.Error, - Summary: "error 2", - Detail: "error 2 detail", - Attr: []interface{}{"attr", "sub"}, - }, - { - Severity: tfdiags.Warning, - Summary: "warning", - Detail: "warning detail", - Attr: []interface{}{"attr", 1, "sub"}, - }, - { - Severity: tfdiags.Error, - Summary: "error 3", - Detail: "error 3 detail", - Attr: []interface{}{"attr", "idx", "sub"}, - }, - }, - }, - } - - flattenTFDiags := func(ds tfdiags.Diagnostics) []diagFlat { - var flat []diagFlat - for _, item := range ds { - desc := item.Description() - - var attr []interface{} - - for _, a := range tfdiags.GetAttribute(item) { - switch step := a.(type) { - case cty.GetAttrStep: - attr = append(attr, step.Name) - case cty.IndexStep: - switch step.Key.Type() { - case cty.Number: - i, _ := step.Key.AsBigFloat().Int64() - attr = append(attr, int(i)) - case cty.String: - attr = append(attr, step.Key.AsString()) - } - } - } - - flat = append(flat, diagFlat{ - Severity: item.Severity(), - Attr: attr, - Summary: desc.Summary, - Detail: desc.Detail, - }) - } - return flat - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - // we take the - tfDiags := ProtoToDiagnostics(tc.Cons(nil)) - - flat := flattenTFDiags(tfDiags) - - if !cmp.Equal(flat, tc.Want, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(flat, tc.Want, typeComparer, valueComparer, equateEmpty)) - } - }) - } -} diff --git a/internal/plugin6/convert/schema.go b/internal/plugin6/convert/schema.go deleted file mode 100644 index f3c59548cbdd..000000000000 --- a/internal/plugin6/convert/schema.go +++ /dev/null @@ -1,297 +0,0 @@ -package convert - -import ( - "encoding/json" - "reflect" - "sort" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - proto "github.com/hashicorp/terraform/internal/tfplugin6" - "github.com/zclconf/go-cty/cty" -) - -// ConfigSchemaToProto takes a *configschema.Block and converts it to a -// proto.Schema_Block for a grpc response. -func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { - block := &proto.Schema_Block{ - Description: b.Description, - DescriptionKind: protoStringKind(b.DescriptionKind), - Deprecated: b.Deprecated, - } - - for _, name := range sortedKeys(b.Attributes) { - a := b.Attributes[name] - - attr := &proto.Schema_Attribute{ - Name: name, - Description: a.Description, - DescriptionKind: protoStringKind(a.DescriptionKind), - Optional: a.Optional, - Computed: a.Computed, - Required: a.Required, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - if a.Type != cty.NilType { - ty, err := json.Marshal(a.Type) - if err != nil { - panic(err) - } - attr.Type = ty - } - - if a.NestedType != nil { - attr.NestedType = configschemaObjectToProto(a.NestedType) - } - - block.Attributes = append(block.Attributes, attr) - } - - for _, name := range sortedKeys(b.BlockTypes) { - b := b.BlockTypes[name] - block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) - } - - return block -} - -func protoStringKind(k configschema.StringKind) proto.StringKind { - switch k { - default: - return proto.StringKind_PLAIN - case configschema.StringMarkdown: - return proto.StringKind_MARKDOWN - } -} - -func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { - var nesting proto.Schema_NestedBlock_NestingMode - switch b.Nesting { - case configschema.NestingSingle: - nesting = proto.Schema_NestedBlock_SINGLE - case configschema.NestingGroup: - nesting = proto.Schema_NestedBlock_GROUP - case configschema.NestingList: - nesting = proto.Schema_NestedBlock_LIST - case configschema.NestingSet: - nesting = proto.Schema_NestedBlock_SET - case configschema.NestingMap: - nesting = proto.Schema_NestedBlock_MAP - default: - nesting = proto.Schema_NestedBlock_INVALID - } - return &proto.Schema_NestedBlock{ - TypeName: name, - Block: ConfigSchemaToProto(&b.Block), - Nesting: nesting, - MinItems: int64(b.MinItems), - MaxItems: int64(b.MaxItems), - } -} - -// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. -func ProtoToProviderSchema(s *proto.Schema) providers.Schema { - return providers.Schema{ - Version: s.Version, - Block: ProtoToConfigSchema(s.Block), - } -} - -// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it -// to a terraform *configschema.Block. -func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { - block := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute), - BlockTypes: make(map[string]*configschema.NestedBlock), - - Description: b.Description, - DescriptionKind: schemaStringKind(b.DescriptionKind), - Deprecated: b.Deprecated, - } - - for _, a := range b.Attributes { - attr := &configschema.Attribute{ - Description: a.Description, - DescriptionKind: schemaStringKind(a.DescriptionKind), - Required: a.Required, - Optional: a.Optional, - Computed: a.Computed, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - if a.Type != nil { - if err := json.Unmarshal(a.Type, &attr.Type); err != nil { - panic(err) - } - } - - if a.NestedType != nil { - attr.NestedType = protoObjectToConfigSchema(a.NestedType) - } - - block.Attributes[a.Name] = attr - } - - for _, b := range b.BlockTypes { - block.BlockTypes[b.TypeName] = schemaNestedBlock(b) - } - - return block -} - -func schemaStringKind(k proto.StringKind) configschema.StringKind { - switch k { - default: - return configschema.StringPlain - case proto.StringKind_MARKDOWN: - return configschema.StringMarkdown - } -} - -func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { - var nesting configschema.NestingMode - switch b.Nesting { - case proto.Schema_NestedBlock_SINGLE: - nesting = configschema.NestingSingle - case proto.Schema_NestedBlock_GROUP: - nesting = configschema.NestingGroup - case proto.Schema_NestedBlock_LIST: - nesting = configschema.NestingList - case proto.Schema_NestedBlock_MAP: - nesting = configschema.NestingMap - case proto.Schema_NestedBlock_SET: - nesting = configschema.NestingSet - default: - // In all other cases we'll leave it as the zero value (invalid) and - // let the caller validate it and deal with this. - } - - nb := &configschema.NestedBlock{ - Nesting: nesting, - MinItems: int(b.MinItems), - MaxItems: int(b.MaxItems), - } - - nested := ProtoToConfigSchema(b.Block) - nb.Block = *nested - return nb -} - -func protoObjectToConfigSchema(b *proto.Schema_Object) *configschema.Object { - var nesting configschema.NestingMode - switch b.Nesting { - case proto.Schema_Object_SINGLE: - nesting = configschema.NestingSingle - case proto.Schema_Object_LIST: - nesting = configschema.NestingList - case proto.Schema_Object_MAP: - nesting = configschema.NestingMap - case proto.Schema_Object_SET: - nesting = configschema.NestingSet - default: - // In all other cases we'll leave it as the zero value (invalid) and - // let the caller validate it and deal with this. - } - - object := &configschema.Object{ - Attributes: make(map[string]*configschema.Attribute), - Nesting: nesting, - } - - for _, a := range b.Attributes { - attr := &configschema.Attribute{ - Description: a.Description, - DescriptionKind: schemaStringKind(a.DescriptionKind), - Required: a.Required, - Optional: a.Optional, - Computed: a.Computed, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - if a.Type != nil { - if err := json.Unmarshal(a.Type, &attr.Type); err != nil { - panic(err) - } - } - - if a.NestedType != nil { - attr.NestedType = protoObjectToConfigSchema(a.NestedType) - } - - object.Attributes[a.Name] = attr - } - - return object -} - -// sortedKeys returns the lexically sorted keys from the given map. This is -// used to make schema conversions are deterministic. This panics if map keys -// are not a string. -func sortedKeys(m interface{}) []string { - v := reflect.ValueOf(m) - keys := make([]string, v.Len()) - - mapKeys := v.MapKeys() - for i, k := range mapKeys { - keys[i] = k.Interface().(string) - } - - sort.Strings(keys) - return keys -} - -func configschemaObjectToProto(b *configschema.Object) *proto.Schema_Object { - var nesting proto.Schema_Object_NestingMode - switch b.Nesting { - case configschema.NestingSingle: - nesting = proto.Schema_Object_SINGLE - case configschema.NestingList: - nesting = proto.Schema_Object_LIST - case configschema.NestingSet: - nesting = proto.Schema_Object_SET - case configschema.NestingMap: - nesting = proto.Schema_Object_MAP - default: - nesting = proto.Schema_Object_INVALID - } - - attributes := make([]*proto.Schema_Attribute, 0, len(b.Attributes)) - - for _, name := range sortedKeys(b.Attributes) { - a := b.Attributes[name] - - attr := &proto.Schema_Attribute{ - Name: name, - Description: a.Description, - DescriptionKind: protoStringKind(a.DescriptionKind), - Optional: a.Optional, - Computed: a.Computed, - Required: a.Required, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - if a.Type != cty.NilType { - ty, err := json.Marshal(a.Type) - if err != nil { - panic(err) - } - attr.Type = ty - } - - if a.NestedType != nil { - attr.NestedType = configschemaObjectToProto(a.NestedType) - } - - attributes = append(attributes, attr) - } - - return &proto.Schema_Object{ - Attributes: attributes, - Nesting: nesting, - } -} diff --git a/internal/plugin6/convert/schema_test.go b/internal/plugin6/convert/schema_test.go deleted file mode 100644 index 9befe4c5afb0..000000000000 --- a/internal/plugin6/convert/schema_test.go +++ /dev/null @@ -1,566 +0,0 @@ -package convert - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/internal/configs/configschema" - proto "github.com/hashicorp/terraform/internal/tfplugin6" - "github.com/zclconf/go-cty/cty" -) - -var ( - equateEmpty = cmpopts.EquateEmpty() - typeComparer = cmp.Comparer(cty.Type.Equals) - valueComparer = cmp.Comparer(cty.Value.RawEquals) -) - -// Test that we can convert configschema to protobuf types and back again. -func TestConvertSchemaBlocks(t *testing.T) { - tests := map[string]struct { - Block *proto.Schema_Block - Want *configschema.Block - }{ - "attributes": { - &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "computed", - Type: []byte(`["list","bool"]`), - Computed: true, - }, - { - Name: "optional", - Type: []byte(`"string"`), - Optional: true, - }, - { - Name: "optional_computed", - Type: []byte(`["map","bool"]`), - Optional: true, - Computed: true, - }, - { - Name: "required", - Type: []byte(`"number"`), - Required: true, - }, - { - Name: "nested_type", - NestedType: &proto.Schema_Object{ - Nesting: proto.Schema_Object_SINGLE, - Attributes: []*proto.Schema_Attribute{ - { - Name: "computed", - Type: []byte(`["list","bool"]`), - Computed: true, - }, - { - Name: "optional", - Type: []byte(`"string"`), - Optional: true, - }, - { - Name: "optional_computed", - Type: []byte(`["map","bool"]`), - Optional: true, - Computed: true, - }, - { - Name: "required", - Type: []byte(`"number"`), - Required: true, - }, - }, - }, - Required: true, - }, - { - Name: "deeply_nested_type", - NestedType: &proto.Schema_Object{ - Nesting: proto.Schema_Object_SINGLE, - Attributes: []*proto.Schema_Attribute{ - { - Name: "first_level", - NestedType: &proto.Schema_Object{ - Nesting: proto.Schema_Object_SINGLE, - Attributes: []*proto.Schema_Attribute{ - { - Name: "computed", - Type: []byte(`["list","bool"]`), - Computed: true, - }, - { - Name: "optional", - Type: []byte(`"string"`), - Optional: true, - }, - { - Name: "optional_computed", - Type: []byte(`["map","bool"]`), - Optional: true, - Computed: true, - }, - { - Name: "required", - Type: []byte(`"number"`), - Required: true, - }, - }, - }, - Computed: true, - }, - }, - }, - Required: true, - }, - { - Name: "nested_list", - NestedType: &proto.Schema_Object{ - Nesting: proto.Schema_Object_LIST, - Attributes: []*proto.Schema_Attribute{ - { - Name: "required", - Type: []byte(`"string"`), - Computed: true, - }, - }, - }, - Required: true, - }, - { - Name: "nested_set", - NestedType: &proto.Schema_Object{ - Nesting: proto.Schema_Object_SET, - Attributes: []*proto.Schema_Attribute{ - { - Name: "required", - Type: []byte(`"string"`), - Computed: true, - }, - }, - }, - Required: true, - }, - { - Name: "nested_map", - NestedType: &proto.Schema_Object{ - Nesting: proto.Schema_Object_MAP, - Attributes: []*proto.Schema_Attribute{ - { - Name: "required", - Type: []byte(`"string"`), - Computed: true, - }, - }, - }, - Required: true, - }, - }, - }, - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "computed": { - Type: cty.List(cty.Bool), - Computed: true, - }, - "optional": { - Type: cty.String, - Optional: true, - }, - "optional_computed": { - Type: cty.Map(cty.Bool), - Optional: true, - Computed: true, - }, - "required": { - Type: cty.Number, - Required: true, - }, - "nested_type": { - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "computed": { - Type: cty.List(cty.Bool), - Computed: true, - }, - "optional": { - Type: cty.String, - Optional: true, - }, - "optional_computed": { - Type: cty.Map(cty.Bool), - Optional: true, - Computed: true, - }, - "required": { - Type: cty.Number, - Required: true, - }, - }, - Nesting: configschema.NestingSingle, - }, - Required: true, - }, - "deeply_nested_type": { - NestedType: &configschema.Object{ - Attributes: map[string]*configschema.Attribute{ - "first_level": { - NestedType: &configschema.Object{ - Nesting: configschema.NestingSingle, - Attributes: map[string]*configschema.Attribute{ - "computed": { - Type: cty.List(cty.Bool), - Computed: true, - }, - "optional": { - Type: cty.String, - Optional: true, - }, - "optional_computed": { - Type: cty.Map(cty.Bool), - Optional: true, - Computed: true, - }, - "required": { - Type: cty.Number, - Required: true, - }, - }, - }, - Computed: true, - }, - }, - Nesting: configschema.NestingSingle, - }, - Required: true, - }, - "nested_list": { - NestedType: &configschema.Object{ - Nesting: configschema.NestingList, - Attributes: map[string]*configschema.Attribute{ - "required": { - Type: cty.String, - Computed: true, - }, - }, - }, - Required: true, - }, - "nested_map": { - NestedType: &configschema.Object{ - Nesting: configschema.NestingMap, - Attributes: map[string]*configschema.Attribute{ - "required": { - Type: cty.String, - Computed: true, - }, - }, - }, - Required: true, - }, - "nested_set": { - NestedType: &configschema.Object{ - Nesting: configschema.NestingSet, - Attributes: map[string]*configschema.Attribute{ - "required": { - Type: cty.String, - Computed: true, - }, - }, - }, - Required: true, - }, - }, - }, - }, - "blocks": { - &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "list", - Nesting: proto.Schema_NestedBlock_LIST, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "map", - Nesting: proto.Schema_NestedBlock_MAP, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "set", - Nesting: proto.Schema_NestedBlock_SET, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "single", - Nesting: proto.Schema_NestedBlock_SINGLE, - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "foo", - Type: []byte(`"dynamic"`), - Required: true, - }, - }, - }, - }, - }, - }, - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": &configschema.NestedBlock{ - Nesting: configschema.NestingList, - }, - "map": &configschema.NestedBlock{ - Nesting: configschema.NestingMap, - }, - "set": &configschema.NestedBlock{ - Nesting: configschema.NestingSet, - }, - "single": &configschema.NestedBlock{ - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.DynamicPseudoType, - Required: true, - }, - }, - }, - }, - }, - }, - }, - "deep block nesting": { - &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "single", - Nesting: proto.Schema_NestedBlock_SINGLE, - Block: &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "list", - Nesting: proto.Schema_NestedBlock_LIST, - Block: &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "set", - Nesting: proto.Schema_NestedBlock_SET, - Block: &proto.Schema_Block{}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "single": &configschema.NestedBlock{ - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": &configschema.NestedBlock{ - Nesting: configschema.NestingList, - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "set": &configschema.NestedBlock{ - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - converted := ProtoToConfigSchema(tc.Block) - if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty)) - } - }) - } -} - -// Test that we can convert configschema to protobuf types and back again. -func TestConvertProtoSchemaBlocks(t *testing.T) { - tests := map[string]struct { - Want *proto.Schema_Block - Block *configschema.Block - }{ - "attributes": { - &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "computed", - Type: []byte(`["list","bool"]`), - Computed: true, - }, - { - Name: "optional", - Type: []byte(`"string"`), - Optional: true, - }, - { - Name: "optional_computed", - Type: []byte(`["map","bool"]`), - Optional: true, - Computed: true, - }, - { - Name: "required", - Type: []byte(`"number"`), - Required: true, - }, - }, - }, - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "computed": { - Type: cty.List(cty.Bool), - Computed: true, - }, - "optional": { - Type: cty.String, - Optional: true, - }, - "optional_computed": { - Type: cty.Map(cty.Bool), - Optional: true, - Computed: true, - }, - "required": { - Type: cty.Number, - Required: true, - }, - }, - }, - }, - "blocks": { - &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "list", - Nesting: proto.Schema_NestedBlock_LIST, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "map", - Nesting: proto.Schema_NestedBlock_MAP, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "set", - Nesting: proto.Schema_NestedBlock_SET, - Block: &proto.Schema_Block{}, - }, - { - TypeName: "single", - Nesting: proto.Schema_NestedBlock_SINGLE, - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "foo", - Type: []byte(`"dynamic"`), - Required: true, - }, - }, - }, - }, - }, - }, - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": &configschema.NestedBlock{ - Nesting: configschema.NestingList, - }, - "map": &configschema.NestedBlock{ - Nesting: configschema.NestingMap, - }, - "set": &configschema.NestedBlock{ - Nesting: configschema.NestingSet, - }, - "single": &configschema.NestedBlock{ - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.DynamicPseudoType, - Required: true, - }, - }, - }, - }, - }, - }, - }, - "deep block nesting": { - &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "single", - Nesting: proto.Schema_NestedBlock_SINGLE, - Block: &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "list", - Nesting: proto.Schema_NestedBlock_LIST, - Block: &proto.Schema_Block{ - BlockTypes: []*proto.Schema_NestedBlock{ - { - TypeName: "set", - Nesting: proto.Schema_NestedBlock_SET, - Block: &proto.Schema_Block{}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "single": &configschema.NestedBlock{ - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "list": &configschema.NestedBlock{ - Nesting: configschema.NestingList, - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "set": &configschema.NestedBlock{ - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - converted := ConfigSchemaToProto(tc.Block) - if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) { - t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported)) - } - }) - } -} diff --git a/internal/plugin6/grpc_error.go b/internal/plugin6/grpc_error.go deleted file mode 100644 index 717c1642bb71..000000000000 --- a/internal/plugin6/grpc_error.go +++ /dev/null @@ -1,74 +0,0 @@ -package plugin6 - -import ( - "fmt" - "path" - "runtime" - - "github.com/hashicorp/terraform/internal/tfdiags" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// grpcErr extracts some known error types and formats them into better -// representations for core. This must only be called from plugin methods. -// Since we don't use RPC status errors for the plugin protocol, these do not -// contain any useful details, and we can return some text that at least -// indicates the plugin call and possible error condition. -func grpcErr(err error) (diags tfdiags.Diagnostics) { - if err == nil { - return - } - - // extract the method name from the caller. - pc, _, _, ok := runtime.Caller(1) - if !ok { - logger.Error("unknown grpc call", "error", err) - return diags.Append(err) - } - - f := runtime.FuncForPC(pc) - - // Function names will contain the full import path. Take the last - // segment, which will let users know which method was being called. - _, requestName := path.Split(f.Name()) - - // Here we can at least correlate the error in the logs to a particular binary. - logger.Error(requestName, "error", err) - - // TODO: while this expands the error codes into somewhat better messages, - // this still does not easily link the error to an actual user-recognizable - // plugin. The grpc plugin does not know its configured name, and the - // errors are in a list of diagnostics, making it hard for the caller to - // annotate the returned errors. - switch status.Code(err) { - case codes.Unavailable: - // This case is when the plugin has stopped running for some reason, - // and is usually the result of a crash. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Plugin did not respond", - fmt.Sprintf("The plugin encountered an error, and failed to respond to the %s call. "+ - "The plugin logs may contain more details.", requestName), - )) - case codes.Canceled: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Request cancelled", - fmt.Sprintf("The %s request was cancelled.", requestName), - )) - case codes.Unimplemented: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported plugin method", - fmt.Sprintf("The %s method is not supported by this plugin.", requestName), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Plugin error", - fmt.Sprintf("The plugin returned an unexpected error from %s: %v", requestName, err), - )) - } - return -} diff --git a/internal/plugin6/grpc_provider.go b/internal/plugin6/grpc_provider.go deleted file mode 100644 index c6530e075468..000000000000 --- a/internal/plugin6/grpc_provider.go +++ /dev/null @@ -1,693 +0,0 @@ -package plugin6 - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/plugin6/convert" - "github.com/hashicorp/terraform/internal/providers" - proto6 "github.com/hashicorp/terraform/internal/tfplugin6" - ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/zclconf/go-cty/cty/msgpack" - "google.golang.org/grpc" -) - -var logger = logging.HCLogger() - -// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. -type GRPCProviderPlugin struct { - plugin.Plugin - GRPCProvider func() proto6.ProviderServer -} - -func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCProvider{ - client: proto6.NewProviderClient(c), - ctx: ctx, - }, nil -} - -func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto6.RegisterProviderServer(s, p.GRPCProvider()) - return nil -} - -// GRPCProvider handles the client, or core side of the plugin rpc connection. -// The GRPCProvider methods are mostly a translation layer between the -// terraform providers types and the grpc proto types, directly converting -// between the two. -type GRPCProvider struct { - // PluginClient provides a reference to the plugin.Client which controls the plugin process. - // This allows the GRPCProvider a way to shutdown the plugin process. - PluginClient *plugin.Client - - // TestServer contains a grpc.Server to close when the GRPCProvider is being - // used in an end to end test of a provider. - TestServer *grpc.Server - - // Proto client use to make the grpc service calls. - client proto6.ProviderClient - - // this context is created by the plugin package, and is canceled when the - // plugin process ends. - ctx context.Context - - // schema stores the schema for this provider. This is used to properly - // serialize the state for requests. - mu sync.Mutex - schemas providers.GetProviderSchemaResponse -} - -func New(client proto6.ProviderClient, ctx context.Context) GRPCProvider { - return GRPCProvider{ - client: client, - ctx: ctx, - } -} - -// getSchema is used internally to get the cached provider schema. -func (p *GRPCProvider) getSchema() providers.GetProviderSchemaResponse { - p.mu.Lock() - // unlock inline in case GetProviderSchema needs to be called - if p.schemas.Provider.Block != nil { - p.mu.Unlock() - return p.schemas - } - p.mu.Unlock() - - return p.GetProviderSchema() -} - -func (p *GRPCProvider) GetProviderSchema() (resp providers.GetProviderSchemaResponse) { - logger.Trace("GRPCProvider.v6: GetProviderSchema") - p.mu.Lock() - defer p.mu.Unlock() - - if p.schemas.Provider.Block != nil { - return p.schemas - } - - resp.ResourceTypes = make(map[string]providers.Schema) - resp.DataSources = make(map[string]providers.Schema) - - // Some providers may generate quite large schemas, and the internal default - // grpc response size limit is 4MB. 64MB should cover most any use case, and - // if we get providers nearing that we may want to consider a finer-grained - // API to fetch individual resource schemas. - // Note: this option is marked as EXPERIMENTAL in the grpc API. We keep - // this for compatibility, but recent providers all set the max message - // size much higher on the server side, which is the supported method for - // determining payload size. - const maxRecvSize = 64 << 20 - protoResp, err := p.client.GetProviderSchema(p.ctx, new(proto6.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - if resp.Diagnostics.HasErrors() { - return resp - } - - if protoResp.Provider == nil { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) - return resp - } - - resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) - if protoResp.ProviderMeta == nil { - logger.Debug("No provider meta schema returned") - } else { - resp.ProviderMeta = convert.ProtoToProviderSchema(protoResp.ProviderMeta) - } - - for name, res := range protoResp.ResourceSchemas { - resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) - } - - for name, data := range protoResp.DataSourceSchemas { - resp.DataSources[name] = convert.ProtoToProviderSchema(data) - } - - if protoResp.ServerCapabilities != nil { - resp.ServerCapabilities.PlanDestroy = protoResp.ServerCapabilities.PlanDestroy - } - - p.schemas = resp - - return resp -} - -func (p *GRPCProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - logger.Trace("GRPCProvider.v6: ValidateProviderConfig") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - ty := schema.Provider.Block.ImpliedType() - - mp, err := msgpack.Marshal(r.Config, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto6.ValidateProviderConfig_Request{ - Config: &proto6.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateProviderConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { - logger.Trace("GRPCProvider.v6: ValidateResourceConfig") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resourceSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - return resp - } - - mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto6.ValidateResourceConfig_Request{ - TypeName: r.TypeName, - Config: &proto6.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateResourceConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { - logger.Trace("GRPCProvider.v6: ValidateDataResourceConfig") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - dataSchema, ok := schema.DataSources[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) - return resp - } - - mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto6.ValidateDataResourceConfig_Request{ - TypeName: r.TypeName, - Config: &proto6.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateDataResourceConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - logger.Trace("GRPCProvider.v6: UpgradeResourceState") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - return resp - } - - protoReq := &proto6.UpgradeResourceState_Request{ - TypeName: r.TypeName, - Version: int64(r.Version), - RawState: &proto6.RawState{ - Json: r.RawStateJSON, - Flatmap: r.RawStateFlatmap, - }, - } - - protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - ty := resSchema.Block.ImpliedType() - resp.UpgradedState = cty.NullVal(ty) - if protoResp.UpgradedState == nil { - return resp - } - - state, err := decodeDynamicValue(protoResp.UpgradedState, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = state - - return resp -} - -func (p *GRPCProvider) ConfigureProvider(r providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - logger.Trace("GRPCProvider.v6: ConfigureProvider") - - schema := p.getSchema() - - var mp []byte - - // we don't have anything to marshal if there's no config - mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto6.ConfigureProvider_Request{ - TerraformVersion: r.TerraformVersion, - Config: &proto6.DynamicValue{ - Msgpack: mp, - }, - } - - protoResp, err := p.client.ConfigureProvider(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) Stop() error { - logger.Trace("GRPCProvider.v6: Stop") - - resp, err := p.client.StopProvider(p.ctx, new(proto6.StopProvider_Request)) - if err != nil { - return err - } - - if resp.Error != "" { - return errors.New(resp.Error) - } - return nil -} - -func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - logger.Trace("GRPCProvider.v6: ReadResource") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type " + r.TypeName)) - return resp - } - - metaSchema := schema.ProviderMeta - - mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto6.ReadResource_Request{ - TypeName: r.TypeName, - CurrentState: &proto6.DynamicValue{Msgpack: mp}, - Private: r.Private, - } - - if metaSchema.Block != nil { - metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} - } - - protoResp, err := p.client.ReadResource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.NewState = state - resp.Private = protoResp.Private - - return resp -} - -func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - logger.Trace("GRPCProvider.v6: PlanResourceChange") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - return resp - } - - metaSchema := schema.ProviderMeta - capabilities := schema.ServerCapabilities - - // If the provider doesn't support planning a destroy operation, we can - // return immediately. - if r.ProposedNewState.IsNull() && !capabilities.PlanDestroy { - resp.PlannedState = r.ProposedNewState - resp.PlannedPrivate = r.PriorPrivate - return resp - } - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto6.PlanResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto6.DynamicValue{Msgpack: priorMP}, - Config: &proto6.DynamicValue{Msgpack: configMP}, - ProposedNewState: &proto6.DynamicValue{Msgpack: propMP}, - PriorPrivate: r.PriorPrivate, - } - - if metaSchema.Block != nil { - metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} - } - - protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state, err := decodeDynamicValue(protoResp.PlannedState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.PlannedState = state - - for _, p := range protoResp.RequiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) - } - - resp.PlannedPrivate = protoResp.PlannedPrivate - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - logger.Trace("GRPCProvider.v6: ApplyResourceChange") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - return resp - } - - metaSchema := schema.ProviderMeta - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto6.ApplyResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto6.DynamicValue{Msgpack: priorMP}, - PlannedState: &proto6.DynamicValue{Msgpack: plannedMP}, - Config: &proto6.DynamicValue{Msgpack: configMP}, - PlannedPrivate: r.PlannedPrivate, - } - - if metaSchema.Block != nil { - metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} - } - - protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - resp.Private = protoResp.Private - - state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.NewState = state - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - logger.Trace("GRPCProvider.v6: ImportResourceState") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - protoReq := &proto6.ImportResourceState_Request{ - TypeName: r.TypeName, - Id: r.ID, - } - - protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - for _, imported := range protoResp.ImportedResources { - resource := providers.ImportedResource{ - TypeName: imported.TypeName, - Private: imported.Private, - } - - resSchema, ok := schema.ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) - continue - } - - state, err := decodeDynamicValue(imported.State, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resource.State = state - resp.ImportedResources = append(resp.ImportedResources, resource) - } - - return resp -} - -func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - logger.Trace("GRPCProvider.v6: ReadDataSource") - - schema := p.getSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = schema.Diagnostics - return resp - } - - dataSchema, ok := schema.DataSources[r.TypeName] - if !ok { - schema.Diagnostics = schema.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) - } - - metaSchema := schema.ProviderMeta - - config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto6.ReadDataSource_Request{ - TypeName: r.TypeName, - Config: &proto6.DynamicValue{ - Msgpack: config, - }, - } - - if metaSchema.Block != nil { - metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} - } - - protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state, err := decodeDynamicValue(protoResp.State, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.State = state - - return resp -} - -// closing the grpc connection is final, and terraform will call it at the end of every phase. -func (p *GRPCProvider) Close() error { - logger.Trace("GRPCProvider.v6: Close") - - // Make sure to stop the server if we're not running within go-plugin. - if p.TestServer != nil { - p.TestServer.Stop() - } - - // Check this since it's not automatically inserted during plugin creation. - // It's currently only inserted by the command package, because that is - // where the factory is built and is the only point with access to the - // plugin.Client. - if p.PluginClient == nil { - logger.Debug("provider has no plugin.Client") - return nil - } - - p.PluginClient.Kill() - return nil -} - -// Decode a DynamicValue from either the JSON or MsgPack encoding. -func decodeDynamicValue(v *proto6.DynamicValue, ty cty.Type) (cty.Value, error) { - // always return a valid value - var err error - res := cty.NullVal(ty) - if v == nil { - return res, nil - } - - switch { - case len(v.Msgpack) > 0: - res, err = msgpack.Unmarshal(v.Msgpack, ty) - case len(v.Json) > 0: - res, err = ctyjson.Unmarshal(v.Json, ty) - } - return res, err -} diff --git a/internal/plugin6/grpc_provider_test.go b/internal/plugin6/grpc_provider_test.go deleted file mode 100644 index 300a09b4ade4..000000000000 --- a/internal/plugin6/grpc_provider_test.go +++ /dev/null @@ -1,784 +0,0 @@ -package plugin6 - -import ( - "bytes" - "fmt" - "testing" - - "github.com/golang/mock/gomock" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - mockproto "github.com/hashicorp/terraform/internal/plugin6/mock_proto" - proto "github.com/hashicorp/terraform/internal/tfplugin6" -) - -var _ providers.Interface = (*GRPCProvider)(nil) - -var ( - equateEmpty = cmpopts.EquateEmpty() - typeComparer = cmp.Comparer(cty.Type.Equals) - valueComparer = cmp.Comparer(cty.Value.RawEquals) -) - -func mockProviderClient(t *testing.T) *mockproto.MockProviderClient { - ctrl := gomock.NewController(t) - client := mockproto.NewMockProviderClient(ctrl) - - // we always need a GetSchema method - client.EXPECT().GetProviderSchema( - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(providerProtoSchema(), nil) - - return client -} - -func checkDiags(t *testing.T, d tfdiags.Diagnostics) { - t.Helper() - if d.HasErrors() { - t.Fatal(d.Err()) - } -} - -// checkDiagsHasError ensures error diagnostics are present or fails the test. -func checkDiagsHasError(t *testing.T, d tfdiags.Diagnostics) { - t.Helper() - - if !d.HasErrors() { - t.Fatal("expected error diagnostics") - } -} - -func providerProtoSchema() *proto.GetProviderSchema_Response { - return &proto.GetProviderSchema_Response{ - Provider: &proto.Schema{ - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "attr", - Type: []byte(`"string"`), - Required: true, - }, - }, - }, - }, - ResourceSchemas: map[string]*proto.Schema{ - "resource": { - Version: 1, - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "attr", - Type: []byte(`"string"`), - Required: true, - }, - }, - }, - }, - }, - DataSourceSchemas: map[string]*proto.Schema{ - "data": { - Version: 1, - Block: &proto.Schema_Block{ - Attributes: []*proto.Schema_Attribute{ - { - Name: "attr", - Type: []byte(`"string"`), - Required: true, - }, - }, - }, - }, - }, - } -} - -func TestGRPCProvider_GetSchema(t *testing.T) { - p := &GRPCProvider{ - client: mockProviderClient(t), - } - - resp := p.GetProviderSchema() - checkDiags(t, resp.Diagnostics) -} - -// Ensure that gRPC errors are returned early. -// Reference: https://github.com/hashicorp/terraform/issues/31047 -func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) { - ctrl := gomock.NewController(t) - client := mockproto.NewMockProviderClient(ctrl) - - client.EXPECT().GetProviderSchema( - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(&proto.GetProviderSchema_Response{}, fmt.Errorf("test error")) - - p := &GRPCProvider{ - client: client, - } - - resp := p.GetProviderSchema() - - checkDiagsHasError(t, resp.Diagnostics) -} - -// Ensure that provider error diagnostics are returned early. -// Reference: https://github.com/hashicorp/terraform/issues/31047 -func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) { - ctrl := gomock.NewController(t) - client := mockproto.NewMockProviderClient(ctrl) - - client.EXPECT().GetProviderSchema( - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(&proto.GetProviderSchema_Response{ - Diagnostics: []*proto.Diagnostic{ - { - Severity: proto.Diagnostic_ERROR, - Summary: "error summary", - Detail: "error detail", - }, - }, - // Trigger potential panics - Provider: &proto.Schema{}, - }, nil) - - p := &GRPCProvider{ - client: client, - } - - resp := p.GetProviderSchema() - - checkDiagsHasError(t, resp.Diagnostics) -} - -func TestGRPCProvider_PrepareProviderConfig(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ValidateProviderConfig( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ValidateProviderConfig_Response{}, nil) - - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) - resp := p.ValidateProviderConfig(providers.ValidateProviderConfigRequest{Config: cfg}) - checkDiags(t, resp.Diagnostics) -} - -func TestGRPCProvider_ValidateResourceConfig(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ValidateResourceConfig( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ValidateResourceConfig_Response{}, nil) - - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) - resp := p.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ - TypeName: "resource", - Config: cfg, - }) - checkDiags(t, resp.Diagnostics) -} - -func TestGRPCProvider_ValidateDataResourceConfig(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ValidateDataResourceConfig( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ValidateDataResourceConfig_Response{}, nil) - - cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) - resp := p.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ - TypeName: "data", - Config: cfg, - }) - checkDiags(t, resp.Diagnostics) -} - -func TestGRPCProvider_UpgradeResourceState(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().UpgradeResourceState( - gomock.Any(), - gomock.Any(), - ).Return(&proto.UpgradeResourceState_Response{ - UpgradedState: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - }, nil) - - resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ - TypeName: "resource", - Version: 0, - RawStateJSON: []byte(`{"old_attr":"bar"}`), - }) - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().UpgradeResourceState( - gomock.Any(), - gomock.Any(), - ).Return(&proto.UpgradeResourceState_Response{ - UpgradedState: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - }, nil) - - resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ - TypeName: "resource", - Version: 0, - RawStateJSON: []byte(`{"old_attr":"bar"}`), - }) - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_Configure(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ConfigureProvider( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ConfigureProvider_Response{}, nil) - - resp := p.ConfigureProvider(providers.ConfigureProviderRequest{ - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - checkDiags(t, resp.Diagnostics) -} - -func TestGRPCProvider_Stop(t *testing.T) { - ctrl := gomock.NewController(t) - client := mockproto.NewMockProviderClient(ctrl) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().StopProvider( - gomock.Any(), - gomock.Any(), - ).Return(&proto.StopProvider_Response{}, nil) - - err := p.Stop() - if err != nil { - t.Fatal(err) - } -} - -func TestGRPCProvider_ReadResource(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadResource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadResource_Response{ - NewState: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - }, nil) - - resp := p.ReadResource(providers.ReadResourceRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_ReadResourceJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadResource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadResource_Response{ - NewState: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - }, nil) - - resp := p.ReadResource(providers.ReadResourceRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_ReadEmptyJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadResource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadResource_Response{ - NewState: &proto.DynamicValue{ - Json: []byte(``), - }, - }, nil) - - obj := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }) - resp := p.ReadResource(providers.ReadResourceRequest{ - TypeName: "resource", - PriorState: obj, - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.NullVal(obj.Type()) - - if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_PlanResourceChange(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().PlanResourceChange( - gomock.Any(), - gomock.Any(), - ).Return(&proto.PlanResourceChange_Response{ - PlannedState: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - RequiresReplace: []*proto.AttributePath{ - { - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - }, - }, - }, - PlannedPrivate: expectedPrivate, - }, nil) - - resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - ProposedNewState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expectedState := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) - } - - expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` - replace := fmt.Sprintf("%#v", resp.RequiresReplace) - if expectedReplace != replace { - t.Fatalf("expected %q, got %q", expectedReplace, replace) - } - - if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { - t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) - } -} - -func TestGRPCProvider_PlanResourceChangeJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().PlanResourceChange( - gomock.Any(), - gomock.Any(), - ).Return(&proto.PlanResourceChange_Response{ - PlannedState: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - RequiresReplace: []*proto.AttributePath{ - { - Steps: []*proto.AttributePath_Step{ - { - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: "attr", - }, - }, - }, - }, - }, - PlannedPrivate: expectedPrivate, - }, nil) - - resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - ProposedNewState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expectedState := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) - } - - expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` - replace := fmt.Sprintf("%#v", resp.RequiresReplace) - if expectedReplace != replace { - t.Fatalf("expected %q, got %q", expectedReplace, replace) - } - - if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { - t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) - } -} - -func TestGRPCProvider_ApplyResourceChange(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().ApplyResourceChange( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ApplyResourceChange_Response{ - NewState: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - Private: expectedPrivate, - }, nil) - - resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - PlannedPrivate: expectedPrivate, - }) - - checkDiags(t, resp.Diagnostics) - - expectedState := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } - - if !bytes.Equal(expectedPrivate, resp.Private) { - t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) - } -} -func TestGRPCProvider_ApplyResourceChangeJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().ApplyResourceChange( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ApplyResourceChange_Response{ - NewState: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - Private: expectedPrivate, - }, nil) - - resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: "resource", - PriorState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - PlannedPrivate: expectedPrivate, - }) - - checkDiags(t, resp.Diagnostics) - - expectedState := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) - } - - if !bytes.Equal(expectedPrivate, resp.Private) { - t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) - } -} - -func TestGRPCProvider_ImportResourceState(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().ImportResourceState( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ImportResourceState_Response{ - ImportedResources: []*proto.ImportResourceState_ImportedResource{ - { - TypeName: "resource", - State: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - Private: expectedPrivate, - }, - }, - }, nil) - - resp := p.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: "resource", - ID: "foo", - }) - - checkDiags(t, resp.Diagnostics) - - expectedResource := providers.ImportedResource{ - TypeName: "resource", - State: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Private: expectedPrivate, - } - - imported := resp.ImportedResources[0] - if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) - } -} -func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - expectedPrivate := []byte(`{"meta": "data"}`) - - client.EXPECT().ImportResourceState( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ImportResourceState_Response{ - ImportedResources: []*proto.ImportResourceState_ImportedResource{ - { - TypeName: "resource", - State: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - Private: expectedPrivate, - }, - }, - }, nil) - - resp := p.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: "resource", - ID: "foo", - }) - - checkDiags(t, resp.Diagnostics) - - expectedResource := providers.ImportedResource{ - TypeName: "resource", - State: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }), - Private: expectedPrivate, - } - - imported := resp.ImportedResources[0] - if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_ReadDataSource(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadDataSource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadDataSource_Response{ - State: &proto.DynamicValue{ - Msgpack: []byte("\x81\xa4attr\xa3bar"), - }, - }, nil) - - resp := p.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: "data", - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) - } -} - -func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) { - client := mockProviderClient(t) - p := &GRPCProvider{ - client: client, - } - - client.EXPECT().ReadDataSource( - gomock.Any(), - gomock.Any(), - ).Return(&proto.ReadDataSource_Response{ - State: &proto.DynamicValue{ - Json: []byte(`{"attr":"bar"}`), - }, - }, nil) - - resp := p.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: "data", - Config: cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("foo"), - }), - }) - - checkDiags(t, resp.Diagnostics) - - expected := cty.ObjectVal(map[string]cty.Value{ - "attr": cty.StringVal("bar"), - }) - - if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) - } -} diff --git a/internal/plugin6/mock_proto/generate.go b/internal/plugin6/mock_proto/generate.go deleted file mode 100644 index cde637e4b2df..000000000000 --- a/internal/plugin6/mock_proto/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/hashicorp/terraform/internal/tfplugin6 ProviderClient - -package mock_tfplugin6 diff --git a/internal/plugin6/mock_proto/mock.go b/internal/plugin6/mock_proto/mock.go deleted file mode 100644 index 448008ef7528..000000000000 --- a/internal/plugin6/mock_proto/mock.go +++ /dev/null @@ -1,277 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/hashicorp/terraform/internal/tfplugin6 (interfaces: ProviderClient) - -// Package mock_tfplugin6 is a generated GoMock package. -package mock_tfplugin6 - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - tfplugin6 "github.com/hashicorp/terraform/internal/tfplugin6" - grpc "google.golang.org/grpc" -) - -// MockProviderClient is a mock of ProviderClient interface. -type MockProviderClient struct { - ctrl *gomock.Controller - recorder *MockProviderClientMockRecorder -} - -// MockProviderClientMockRecorder is the mock recorder for MockProviderClient. -type MockProviderClientMockRecorder struct { - mock *MockProviderClient -} - -// NewMockProviderClient creates a new mock instance. -func NewMockProviderClient(ctrl *gomock.Controller) *MockProviderClient { - mock := &MockProviderClient{ctrl: ctrl} - mock.recorder = &MockProviderClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder { - return m.recorder -} - -// ApplyResourceChange mocks base method. -func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin6.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin6.ApplyResourceChange_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ApplyResourceChange", varargs...) - ret0, _ := ret[0].(*tfplugin6.ApplyResourceChange_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ApplyResourceChange indicates an expected call of ApplyResourceChange. -func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyResourceChange", reflect.TypeOf((*MockProviderClient)(nil).ApplyResourceChange), varargs...) -} - -// ConfigureProvider mocks base method. -func (m *MockProviderClient) ConfigureProvider(arg0 context.Context, arg1 *tfplugin6.ConfigureProvider_Request, arg2 ...grpc.CallOption) (*tfplugin6.ConfigureProvider_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ConfigureProvider", varargs...) - ret0, _ := ret[0].(*tfplugin6.ConfigureProvider_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ConfigureProvider indicates an expected call of ConfigureProvider. -func (mr *MockProviderClientMockRecorder) ConfigureProvider(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigureProvider", reflect.TypeOf((*MockProviderClient)(nil).ConfigureProvider), varargs...) -} - -// GetProviderSchema mocks base method. -func (m *MockProviderClient) GetProviderSchema(arg0 context.Context, arg1 *tfplugin6.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin6.GetProviderSchema_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetProviderSchema", varargs...) - ret0, _ := ret[0].(*tfplugin6.GetProviderSchema_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProviderSchema indicates an expected call of GetProviderSchema. -func (mr *MockProviderClientMockRecorder) GetProviderSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProviderSchema", reflect.TypeOf((*MockProviderClient)(nil).GetProviderSchema), varargs...) -} - -// ImportResourceState mocks base method. -func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin6.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.ImportResourceState_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ImportResourceState", varargs...) - ret0, _ := ret[0].(*tfplugin6.ImportResourceState_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImportResourceState indicates an expected call of ImportResourceState. -func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportResourceState", reflect.TypeOf((*MockProviderClient)(nil).ImportResourceState), varargs...) -} - -// PlanResourceChange mocks base method. -func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin6.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin6.PlanResourceChange_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PlanResourceChange", varargs...) - ret0, _ := ret[0].(*tfplugin6.PlanResourceChange_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PlanResourceChange indicates an expected call of PlanResourceChange. -func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlanResourceChange", reflect.TypeOf((*MockProviderClient)(nil).PlanResourceChange), varargs...) -} - -// ReadDataSource mocks base method. -func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin6.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin6.ReadDataSource_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ReadDataSource", varargs...) - ret0, _ := ret[0].(*tfplugin6.ReadDataSource_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadDataSource indicates an expected call of ReadDataSource. -func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDataSource", reflect.TypeOf((*MockProviderClient)(nil).ReadDataSource), varargs...) -} - -// ReadResource mocks base method. -func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin6.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin6.ReadResource_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ReadResource", varargs...) - ret0, _ := ret[0].(*tfplugin6.ReadResource_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadResource indicates an expected call of ReadResource. -func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadResource", reflect.TypeOf((*MockProviderClient)(nil).ReadResource), varargs...) -} - -// StopProvider mocks base method. -func (m *MockProviderClient) StopProvider(arg0 context.Context, arg1 *tfplugin6.StopProvider_Request, arg2 ...grpc.CallOption) (*tfplugin6.StopProvider_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "StopProvider", varargs...) - ret0, _ := ret[0].(*tfplugin6.StopProvider_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StopProvider indicates an expected call of StopProvider. -func (mr *MockProviderClientMockRecorder) StopProvider(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopProvider", reflect.TypeOf((*MockProviderClient)(nil).StopProvider), varargs...) -} - -// UpgradeResourceState mocks base method. -func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin6.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.UpgradeResourceState_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UpgradeResourceState", varargs...) - ret0, _ := ret[0].(*tfplugin6.UpgradeResourceState_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpgradeResourceState indicates an expected call of UpgradeResourceState. -func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeResourceState", reflect.TypeOf((*MockProviderClient)(nil).UpgradeResourceState), varargs...) -} - -// ValidateDataResourceConfig mocks base method. -func (m *MockProviderClient) ValidateDataResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateDataResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateDataResourceConfig_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ValidateDataResourceConfig", varargs...) - ret0, _ := ret[0].(*tfplugin6.ValidateDataResourceConfig_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ValidateDataResourceConfig indicates an expected call of ValidateDataResourceConfig. -func (mr *MockProviderClientMockRecorder) ValidateDataResourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDataResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateDataResourceConfig), varargs...) -} - -// ValidateProviderConfig mocks base method. -func (m *MockProviderClient) ValidateProviderConfig(arg0 context.Context, arg1 *tfplugin6.ValidateProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateProviderConfig_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ValidateProviderConfig", varargs...) - ret0, _ := ret[0].(*tfplugin6.ValidateProviderConfig_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ValidateProviderConfig indicates an expected call of ValidateProviderConfig. -func (mr *MockProviderClientMockRecorder) ValidateProviderConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateProviderConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateProviderConfig), varargs...) -} - -// ValidateResourceConfig mocks base method. -func (m *MockProviderClient) ValidateResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateResourceConfig_Response, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ValidateResourceConfig", varargs...) - ret0, _ := ret[0].(*tfplugin6.ValidateResourceConfig_Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ValidateResourceConfig indicates an expected call of ValidateResourceConfig. -func (mr *MockProviderClientMockRecorder) ValidateResourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateResourceConfig), varargs...) -} diff --git a/internal/plugin6/serve.go b/internal/plugin6/serve.go deleted file mode 100644 index 8c5203fcd5e6..000000000000 --- a/internal/plugin6/serve.go +++ /dev/null @@ -1,63 +0,0 @@ -package plugin6 - -import ( - "github.com/hashicorp/go-plugin" - proto "github.com/hashicorp/terraform/internal/tfplugin6" -) - -const ( - // The constants below are the names of the plugins that can be dispensed - // from the plugin server. - ProviderPluginName = "provider" - - // DefaultProtocolVersion is the protocol version assumed for legacy clients - // that don't specify a particular version during their handshake. Since we - // explicitly set VersionedPlugins in Serve, this number does not need to - // change with the protocol version and can effectively stay 4 forever - // (unless we need the "biggest hammer" approach to break all provider - // compatibility). - DefaultProtocolVersion = 4 -) - -// Handshake is the HandshakeConfig used to configure clients and servers. -var Handshake = plugin.HandshakeConfig{ - // The ProtocolVersion is the version that must match between TF core - // and TF plugins. - ProtocolVersion: DefaultProtocolVersion, - - // The magic cookie values should NEVER be changed. - MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", - MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", -} - -type GRPCProviderFunc func() proto.ProviderServer - -// ServeOpts are the configurations to serve a plugin. -type ServeOpts struct { - GRPCProviderFunc GRPCProviderFunc -} - -// Serve serves a plugin. This function never returns and should be the final -// function called in the main function of the plugin. -func Serve(opts *ServeOpts) { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - VersionedPlugins: pluginSet(opts), - GRPCServer: plugin.DefaultGRPCServer, - }) -} - -func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { - plugins := map[int]plugin.PluginSet{} - - // add the new protocol versions if they're configured - if opts.GRPCProviderFunc != nil { - plugins[6] = plugin.PluginSet{} - if opts.GRPCProviderFunc != nil { - plugins[6]["provider"] = &GRPCProviderPlugin{ - GRPCProvider: opts.GRPCProviderFunc, - } - } - } - return plugins -} diff --git a/internal/provider-simple-v6/main/main.go b/internal/provider-simple-v6/main/main.go deleted file mode 100644 index cc2bbc3c3fab..000000000000 --- a/internal/provider-simple-v6/main/main.go +++ /dev/null @@ -1,16 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/internal/grpcwrap" - plugin "github.com/hashicorp/terraform/internal/plugin6" - simple "github.com/hashicorp/terraform/internal/provider-simple-v6" - "github.com/hashicorp/terraform/internal/tfplugin6" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - GRPCProviderFunc: func() tfplugin6.ProviderServer { - return grpcwrap.Provider6(simple.Provider()) - }, - }) -} diff --git a/internal/provider-simple-v6/provider.go b/internal/provider-simple-v6/provider.go deleted file mode 100644 index 1fb2ce127bf2..000000000000 --- a/internal/provider-simple-v6/provider.go +++ /dev/null @@ -1,147 +0,0 @@ -// simple provider a minimal provider implementation for testing -package simple - -import ( - "errors" - "fmt" - "time" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -type simple struct { - schema providers.GetProviderSchemaResponse -} - -func Provider() providers.Interface { - simpleResource := providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Computed: true, - Type: cty.String, - }, - "value": { - Optional: true, - Type: cty.String, - }, - }, - }, - } - - return simple{ - schema: providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: nil, - }, - ResourceTypes: map[string]providers.Schema{ - "simple_resource": simpleResource, - }, - DataSources: map[string]providers.Schema{ - "simple_resource": simpleResource, - }, - ServerCapabilities: providers.ServerCapabilities{ - PlanDestroy: true, - }, - }, - } -} - -func (s simple) GetProviderSchema() providers.GetProviderSchemaResponse { - return s.schema -} - -func (s simple) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - return resp -} - -func (s simple) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { - return resp -} - -func (s simple) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { - return resp -} - -func (p simple) UpgradeResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() - val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) - resp.Diagnostics = resp.Diagnostics.Append(err) - resp.UpgradedState = val - return resp -} - -func (s simple) ConfigureProvider(providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - return resp -} - -func (s simple) Stop() error { - return nil -} - -func (s simple) ReadResource(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - // just return the same state we received - resp.NewState = req.PriorState - return resp -} - -func (s simple) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - if req.ProposedNewState.IsNull() { - // destroy op - resp.PlannedState = req.ProposedNewState - - // signal that this resource was properly planned for destruction, - // verifying that the schema capabilities with PlanDestroy took effect. - resp.PlannedPrivate = []byte("destroy planned") - return resp - } - - m := req.ProposedNewState.AsValueMap() - _, ok := m["id"] - if !ok { - m["id"] = cty.UnknownVal(cty.String) - } - - resp.PlannedState = cty.ObjectVal(m) - return resp -} - -func (s simple) ApplyResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - if req.PlannedState.IsNull() { - // make sure this was transferred from the plan action - if string(req.PlannedPrivate) != "destroy planned" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("resource not planned for destroy, private data %q", req.PlannedPrivate)) - } - - resp.NewState = req.PlannedState - return resp - } - - m := req.PlannedState.AsValueMap() - _, ok := m["id"] - if !ok { - m["id"] = cty.StringVal(time.Now().String()) - } - resp.NewState = cty.ObjectVal(m) - - return resp -} - -func (s simple) ImportResourceState(providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("unsupported")) - return resp -} - -func (s simple) ReadDataSource(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - m := req.Config.AsValueMap() - m["id"] = cty.StringVal("static_id") - resp.State = cty.ObjectVal(m) - return resp -} - -func (s simple) Close() error { - return nil -} diff --git a/internal/provider-simple/main/main.go b/internal/provider-simple/main/main.go deleted file mode 100644 index 8e8ceadff933..000000000000 --- a/internal/provider-simple/main/main.go +++ /dev/null @@ -1,16 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/internal/grpcwrap" - "github.com/hashicorp/terraform/internal/plugin" - simple "github.com/hashicorp/terraform/internal/provider-simple" - "github.com/hashicorp/terraform/internal/tfplugin5" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - GRPCProviderFunc: func() tfplugin5.ProviderServer { - return grpcwrap.Provider(simple.Provider()) - }, - }) -} diff --git a/internal/provider-simple/provider.go b/internal/provider-simple/provider.go deleted file mode 100644 index 8e32dcc10ef7..000000000000 --- a/internal/provider-simple/provider.go +++ /dev/null @@ -1,138 +0,0 @@ -// simple provider a minimal provider implementation for testing -package simple - -import ( - "errors" - "time" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -type simple struct { - schema providers.GetProviderSchemaResponse -} - -func Provider() providers.Interface { - simpleResource := providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Computed: true, - Type: cty.String, - }, - "value": { - Optional: true, - Type: cty.String, - }, - }, - }, - } - - return simple{ - schema: providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: nil, - }, - ResourceTypes: map[string]providers.Schema{ - "simple_resource": simpleResource, - }, - DataSources: map[string]providers.Schema{ - "simple_resource": simpleResource, - }, - ServerCapabilities: providers.ServerCapabilities{ - PlanDestroy: true, - }, - }, - } -} - -func (s simple) GetProviderSchema() providers.GetProviderSchemaResponse { - return s.schema -} - -func (s simple) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - return resp -} - -func (s simple) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { - return resp -} - -func (s simple) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { - return resp -} - -func (p simple) UpgradeResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() - val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) - resp.Diagnostics = resp.Diagnostics.Append(err) - resp.UpgradedState = val - return resp -} - -func (s simple) ConfigureProvider(providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - return resp -} - -func (s simple) Stop() error { - return nil -} - -func (s simple) ReadResource(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - // just return the same state we received - resp.NewState = req.PriorState - return resp -} - -func (s simple) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - if req.ProposedNewState.IsNull() { - // destroy op - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp - } - - m := req.ProposedNewState.AsValueMap() - _, ok := m["id"] - if !ok { - m["id"] = cty.UnknownVal(cty.String) - } - - resp.PlannedState = cty.ObjectVal(m) - return resp -} - -func (s simple) ApplyResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - if req.PlannedState.IsNull() { - resp.NewState = req.PlannedState - return resp - } - - m := req.PlannedState.AsValueMap() - _, ok := m["id"] - if !ok { - m["id"] = cty.StringVal(time.Now().String()) - } - resp.NewState = cty.ObjectVal(m) - - return resp -} - -func (s simple) ImportResourceState(providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("unsupported")) - return resp -} - -func (s simple) ReadDataSource(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - m := req.Config.AsValueMap() - m["id"] = cty.StringVal("static_id") - resp.State = cty.ObjectVal(m) - return resp -} - -func (s simple) Close() error { - return nil -} diff --git a/internal/provider-terraform/main/main.go b/internal/provider-terraform/main/main.go deleted file mode 100644 index a50fef2d9b9b..000000000000 --- a/internal/provider-terraform/main/main.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/internal/builtin/providers/terraform" - "github.com/hashicorp/terraform/internal/grpcwrap" - "github.com/hashicorp/terraform/internal/plugin" - "github.com/hashicorp/terraform/internal/tfplugin5" -) - -func main() { - // Provide a binary version of the internal terraform provider for testing - plugin.Serve(&plugin.ServeOpts{ - GRPCProviderFunc: func() tfplugin5.ProviderServer { - return grpcwrap.Provider(terraform.NewProvider()) - }, - }) -} diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/windows_amd64/terraform-provider-null.exe b/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/windows_amd64/terraform-provider-null.exe deleted file mode 100644 index daa9e3509f65..000000000000 --- a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/windows_amd64/terraform-provider-null.exe +++ /dev/null @@ -1 +0,0 @@ -# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/internal/providers/provider.go b/internal/providers/provider.go deleted file mode 100644 index 5d98d9bf3917..000000000000 --- a/internal/providers/provider.go +++ /dev/null @@ -1,393 +0,0 @@ -package providers - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Interface represents the set of methods required for a complete resource -// provider plugin. -type Interface interface { - // GetSchema returns the complete schema for the provider. - GetProviderSchema() GetProviderSchemaResponse - - // ValidateProviderConfig allows the provider to validate the configuration. - // The ValidateProviderConfigResponse.PreparedConfig field is unused. The - // final configuration is not stored in the state, and any modifications - // that need to be made must be made during the Configure method call. - ValidateProviderConfig(ValidateProviderConfigRequest) ValidateProviderConfigResponse - - // ValidateResourceConfig allows the provider to validate the resource - // configuration values. - ValidateResourceConfig(ValidateResourceConfigRequest) ValidateResourceConfigResponse - - // ValidateDataResourceConfig allows the provider to validate the data source - // configuration values. - ValidateDataResourceConfig(ValidateDataResourceConfigRequest) ValidateDataResourceConfigResponse - - // UpgradeResourceState is called when the state loader encounters an - // instance state whose schema version is less than the one reported by the - // currently-used version of the corresponding provider, and the upgraded - // result is used for any further processing. - UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse - - // Configure configures and initialized the provider. - ConfigureProvider(ConfigureProviderRequest) ConfigureProviderResponse - - // Stop is called when the provider should halt any in-flight actions. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provider after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // ReadResource refreshes a resource and returns its current state. - ReadResource(ReadResourceRequest) ReadResourceResponse - - // PlanResourceChange takes the current state and proposed state of a - // resource, and returns the planned final state. - PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse - - // ApplyResourceChange takes the planned state for a resource, which may - // yet contain unknown computed values, and applies the changes returning - // the final state. - ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse - - // ImportResourceState requests that the given resource be imported. - ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse - - // ReadDataSource returns the data source's current state. - ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetProviderSchemaResponse struct { - // Provider is the schema for the provider itself. - Provider Schema - - // ProviderMeta is the schema for the provider's meta info in a module - ProviderMeta Schema - - // ResourceTypes map the resource type name to that type's schema. - ResourceTypes map[string]Schema - - // DataSources maps the data source name to that data source's schema. - DataSources map[string]Schema - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // ServerCapabilities lists optional features supported by the provider. - ServerCapabilities ServerCapabilities -} - -// ServerCapabilities allows providers to communicate extra information -// regarding supported protocol features. This is used to indicate availability -// of certain forward-compatible changes which may be optional in a major -// protocol version, but cannot be tested for directly. -type ServerCapabilities struct { - // PlanDestroy signals that this provider expects to receive a - // PlanResourceChange call for resources that are to be destroyed. - PlanDestroy bool -} - -type ValidateProviderConfigRequest struct { - // Config is the raw configuration value for the provider. - Config cty.Value -} - -type ValidateProviderConfigResponse struct { - // PreparedConfig is unused and will be removed with support for plugin protocol v5. - PreparedConfig cty.Value - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateResourceConfigRequest struct { - // TypeName is the name of the resource type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateResourceConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateDataResourceConfigRequest struct { - // TypeName is the name of the data source type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateDataResourceConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type UpgradeResourceStateRequest struct { - // TypeName is the name of the resource type being upgraded - TypeName string - - // Version is version of the schema that created the current state. - Version int64 - - // RawStateJSON and RawStateFlatmap contiain the state that needs to be - // upgraded to match the current schema version. Because the schema is - // unknown, this contains only the raw data as stored in the state. - // RawStateJSON is the current json state encoding. - // RawStateFlatmap is the legacy flatmap encoding. - // Only on of these fields may be set for the upgrade request. - RawStateJSON []byte - RawStateFlatmap map[string]string -} - -type UpgradeResourceStateResponse struct { - // UpgradedState is the newly upgraded resource state. - UpgradedState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ConfigureProviderRequest struct { - // Terraform version is the version string from the running instance of - // terraform. Providers can use TerraformVersion to verify compatibility, - // and to store for informational purposes. - TerraformVersion string - - // Config is the complete configuration value for the provider. - Config cty.Value -} - -type ConfigureProviderResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ReadResourceRequest struct { - // TypeName is the name of the resource type being read. - TypeName string - - // PriorState contains the previously saved state value for this resource. - PriorState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte - - // ProviderMeta is the configuration for the provider_meta block for the - // module and provider this resource belongs to. Its use is defined by - // each provider, and it should not be used without coordination with - // HashiCorp. It is considered experimental and subject to change. - ProviderMeta cty.Value -} - -type ReadResourceResponse struct { - // NewState contains the current state of the resource. - NewState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -type PlanResourceChangeRequest struct { - // TypeName is the name of the resource type to plan. - TypeName string - - // PriorState is the previously saved state value for this resource. - PriorState cty.Value - - // ProposedNewState is the expected state after the new configuration is - // applied. This is created by directly applying the configuration to the - // PriorState. The provider is then responsible for applying any further - // changes required to create the proposed final state. - ProposedNewState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the ProposedNewState in most circumstances. - Config cty.Value - - // PriorPrivate is the previously saved private data returned from the - // provider during the last apply. - PriorPrivate []byte - - // ProviderMeta is the configuration for the provider_meta block for the - // module and provider this resource belongs to. Its use is defined by - // each provider, and it should not be used without coordination with - // HashiCorp. It is considered experimental and subject to change. - ProviderMeta cty.Value -} - -type PlanResourceChangeResponse struct { - // PlannedState is the expected state of the resource once the current - // configuration is applied. - PlannedState cty.Value - - // RequiresReplace is the list of the attributes that are requiring - // resource replacement. - RequiresReplace []cty.Path - - // PlannedPrivate is an opaque blob that is not interpreted by terraform - // core. This will be saved and relayed back to the provider during - // ApplyResourceChange. - PlannedPrivate []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ApplyResourceChangeRequest struct { - // TypeName is the name of the resource type being applied. - TypeName string - - // PriorState is the current state of resource. - PriorState cty.Value - - // Planned state is the state returned from PlanResourceChange, and should - // represent the new state, minus any remaining computed attributes. - PlannedState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the PlannedState in most circumstances. - Config cty.Value - - // PlannedPrivate is the same value as returned by PlanResourceChange. - PlannedPrivate []byte - - // ProviderMeta is the configuration for the provider_meta block for the - // module and provider this resource belongs to. Its use is defined by - // each provider, and it should not be used without coordination with - // HashiCorp. It is considered experimental and subject to change. - ProviderMeta cty.Value -} - -type ApplyResourceChangeResponse struct { - // NewState is the new complete state after applying the planned change. - // In the event of an error, NewState should represent the most recent - // known state of the resource, if it exists. - NewState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ImportResourceStateRequest struct { - // TypeName is the name of the resource type to be imported. - TypeName string - - // ID is a string with which the provider can identify the resource to be - // imported. - ID string -} - -type ImportResourceStateResponse struct { - // ImportedResources contains one or more state values related to the - // imported resource. It is not required that these be complete, only that - // there is enough identifying information for the provider to successfully - // update the states in ReadResource. - ImportedResources []ImportedResource - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// ImportedResource represents an object being imported into Terraform with the -// help of a provider. An ImportedObject is a RemoteObject that has been read -// by the provider's import handler but hasn't yet been committed to state. -type ImportedResource struct { - // TypeName is the name of the resource type associated with the - // returned state. It's possible for providers to import multiple related - // types with a single import request. - TypeName string - - // State is the state of the remote object being imported. This may not be - // complete, but must contain enough information to uniquely identify the - // resource. - State cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -// AsInstanceObject converts the receiving ImportedObject into a -// ResourceInstanceObject that has status ObjectReady. -// -// The returned object does not know its own resource type, so the caller must -// retain the ResourceType value from the source object if this information is -// needed. -// -// The returned object also has no dependency addresses, but the caller may -// freely modify the direct fields of the returned object without affecting -// the receiver. -func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { - return &states.ResourceInstanceObject{ - Status: states.ObjectReady, - Value: ir.State, - Private: ir.Private, - } -} - -type ReadDataSourceRequest struct { - // TypeName is the name of the data source type to Read. - TypeName string - - // Config is the complete configuration for the requested data source. - Config cty.Value - - // ProviderMeta is the configuration for the provider_meta block for the - // module and provider this resource belongs to. Its use is defined by - // each provider, and it should not be used without coordination with - // HashiCorp. It is considered experimental and subject to change. - ProviderMeta cty.Value -} - -type ReadDataSourceResponse struct { - // State is the current state of the requested data source. - State cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/internal/providers/schemas.go b/internal/providers/schemas.go deleted file mode 100644 index 213ff4f0e588..000000000000 --- a/internal/providers/schemas.go +++ /dev/null @@ -1,62 +0,0 @@ -package providers - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" -) - -// Schemas is an overall container for all of the schemas for all configurable -// objects defined within a particular provider. -// -// The schema for each individual configurable object is represented by nested -// instances of type Schema (singular) within this data structure. -// -// This type used to be known as terraform.ProviderSchema, but moved out here -// as part of our ongoing efforts to shrink down the "terraform" package. -// There's still a type alias at the old name, but we should prefer using -// providers.Schema in new code. However, a consequence of this transitional -// situation is that the "terraform" package still has the responsibility for -// constructing a providers.Schemas object based on responses from the provider -// API; hopefully we'll continue this refactor later so that functions in this -// package totally encapsulate the unmarshalling and include this as part of -// providers.GetProviderSchemaResponse. -type Schemas struct { - Provider *configschema.Block - ProviderMeta *configschema.Block - ResourceTypes map[string]*configschema.Block - DataSources map[string]*configschema.Block - - ResourceTypeSchemaVersions map[string]uint64 -} - -// SchemaForResourceType attempts to find a schema for the given mode and type. -// Returns nil if no such schema is available. -func (ss *Schemas) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { - switch mode { - case addrs.ManagedResourceMode: - return ss.ResourceTypes[typeName], ss.ResourceTypeSchemaVersions[typeName] - case addrs.DataResourceMode: - // Data resources don't have schema versions right now, since state is discarded for each refresh - return ss.DataSources[typeName], 0 - default: - // Shouldn't happen, because the above cases are comprehensive. - return nil, 0 - } -} - -// SchemaForResourceAddr attempts to find a schema for the mode and type from -// the given resource address. Returns nil if no such schema is available. -func (ss *Schemas) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { - return ss.SchemaForResourceType(addr.Mode, addr.Type) -} - -// Schema pairs a provider or resource schema with that schema's version. -// This is used to be able to upgrade the schema in UpgradeResourceState. -// -// This describes the schema for a single object within a provider. Type -// "Schemas" (plural) instead represents the overall collection of schemas -// for everything within a particular provider. -type Schema struct { - Version int64 - Block *configschema.Block -} diff --git a/internal/provisioner-local-exec/main/main.go b/internal/provisioner-local-exec/main/main.go deleted file mode 100644 index 78f14b37afc4..000000000000 --- a/internal/provisioner-local-exec/main/main.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - localexec "github.com/hashicorp/terraform/internal/builtin/provisioners/local-exec" - "github.com/hashicorp/terraform/internal/grpcwrap" - "github.com/hashicorp/terraform/internal/plugin" - "github.com/hashicorp/terraform/internal/tfplugin5" -) - -func main() { - // Provide a binary version of the internal terraform provider for testing - plugin.Serve(&plugin.ServeOpts{ - GRPCProvisionerFunc: func() tfplugin5.ProvisionerServer { - return grpcwrap.Provisioner(localexec.New()) - }, - }) -} diff --git a/internal/provisioners/provisioner.go b/internal/provisioners/provisioner.go deleted file mode 100644 index 190740a7fe49..000000000000 --- a/internal/provisioners/provisioner.go +++ /dev/null @@ -1,82 +0,0 @@ -package provisioners - -import ( - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Interface is the set of methods required for a resource provisioner plugin. -type Interface interface { - // GetSchema returns the schema for the provisioner configuration. - GetSchema() GetSchemaResponse - - // ValidateProvisionerConfig allows the provisioner to validate the - // configuration values. - ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse - - // ProvisionResource runs the provisioner with provided configuration. - // ProvisionResource blocks until the execution is complete. - // If the returned diagnostics contain any errors, the resource will be - // left in a tainted state. - ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse - - // Stop is called to interrupt the provisioner. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provisioner after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provisioner contains the schema for this provisioner. - Provisioner *configschema.Block - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// UIOutput provides the Output method for resource provisioner -// plugins to write any output to the UI. -// -// Provisioners may call the Output method multiple times while Apply is in -// progress. It is invalid to call Output after Apply returns. -type UIOutput interface { - Output(string) -} - -type ValidateProvisionerConfigRequest struct { - // Config is the complete configuration to be used for the provisioner. - Config cty.Value -} - -type ValidateProvisionerConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ProvisionResourceRequest struct { - // Config is the complete provisioner configuration. - Config cty.Value - - // Connection contains any information required to access the resource - // instance. - Connection cty.Value - - // UIOutput is used to return output during the Apply operation. - UIOutput UIOutput -} - -type ProvisionResourceResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/internal/registry/client.go b/internal/registry/client.go deleted file mode 100644 index 0204674bbf05..000000000000 --- a/internal/registry/client.go +++ /dev/null @@ -1,327 +0,0 @@ -package registry - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "os" - "path" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-retryablehttp" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/registry/regsrc" - "github.com/hashicorp/terraform/internal/registry/response" - "github.com/hashicorp/terraform/version" -) - -const ( - xTerraformGet = "X-Terraform-Get" - xTerraformVersion = "X-Terraform-Version" - modulesServiceID = "modules.v1" - providersServiceID = "providers.v1" - - // registryDiscoveryRetryEnvName is the name of the environment variable that - // can be configured to customize number of retries for module and provider - // discovery requests with the remote registry. - registryDiscoveryRetryEnvName = "TF_REGISTRY_DISCOVERY_RETRY" - defaultRetry = 1 - - // registryClientTimeoutEnvName is the name of the environment variable that - // can be configured to customize the timeout duration (seconds) for module - // and provider discovery with the remote registry. - registryClientTimeoutEnvName = "TF_REGISTRY_CLIENT_TIMEOUT" - - // defaultRequestTimeout is the default timeout duration for requests to the - // remote registry. - defaultRequestTimeout = 10 * time.Second -) - -var ( - tfVersion = version.String() - - discoveryRetry int - requestTimeout time.Duration -) - -func init() { - configureDiscoveryRetry() - configureRequestTimeout() -} - -// Client provides methods to query Terraform Registries. -type Client struct { - // this is the client to be used for all requests. - client *retryablehttp.Client - - // services is a required *disco.Disco, which may have services and - // credentials pre-loaded. - services *disco.Disco -} - -// NewClient returns a new initialized registry client. -func NewClient(services *disco.Disco, client *http.Client) *Client { - if services == nil { - services = disco.New() - } - - if client == nil { - client = httpclient.New() - client.Timeout = requestTimeout - } - retryableClient := retryablehttp.NewClient() - retryableClient.HTTPClient = client - retryableClient.RetryMax = discoveryRetry - retryableClient.RequestLogHook = requestLogHook - retryableClient.ErrorHandler = maxRetryErrorHandler - - logOutput := logging.LogOutput() - retryableClient.Logger = log.New(logOutput, "", log.Flags()) - - services.Transport = retryableClient.HTTPClient.Transport - - services.SetUserAgent(httpclient.TerraformUserAgent(version.String())) - - return &Client{ - client: retryableClient, - services: services, - } -} - -// Discover queries the host, and returns the url for the registry. -func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { - service, err := c.services.DiscoverServiceURL(host, serviceID) - if err != nil { - return nil, &ServiceUnreachableError{err} - } - if !strings.HasSuffix(service.Path, "/") { - service.Path += "/" - } - return service, nil -} - -// ModuleVersions queries the registry for a module, and returns the available versions. -func (c *Client) ModuleVersions(ctx context.Context, module *regsrc.Module) (*response.ModuleVersions, error) { - host, err := module.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, modulesServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join(module.Module(), "versions")) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching module versions from %q", service) - - req, err := retryablehttp.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - c.addRequestCreds(host, req.Request) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // OK - case http.StatusNotFound: - return nil, &errModuleNotFound{addr: module} - default: - return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) - } - - var versions response.ModuleVersions - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&versions); err != nil { - return nil, err - } - - for _, mod := range versions.Modules { - for _, v := range mod.Versions { - log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) - } - } - - return &versions, nil -} - -func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { - creds, err := c.services.CredentialsForHost(host) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) - return - } - - if creds != nil { - creds.PrepareRequest(req) - } -} - -// ModuleLocation find the download location for a specific version module. -// This returns a string, because the final location may contain special go-getter syntax. -func (c *Client) ModuleLocation(ctx context.Context, module *regsrc.Module, version string) (string, error) { - host, err := module.SvcHost() - if err != nil { - return "", err - } - - service, err := c.Discover(host, modulesServiceID) - if err != nil { - return "", err - } - - var p *url.URL - if version == "" { - p, err = url.Parse(path.Join(module.Module(), "download")) - } else { - p, err = url.Parse(path.Join(module.Module(), version, "download")) - } - if err != nil { - return "", err - } - download := service.ResolveReference(p) - - log.Printf("[DEBUG] looking up module location from %q", download) - - req, err := retryablehttp.NewRequest("GET", download.String(), nil) - if err != nil { - return "", err - } - - req = req.WithContext(ctx) - - c.addRequestCreds(host, req.Request) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - // there should be no body, but save it for logging - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("error reading response body from registry: %s", err) - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusNoContent: - // OK - case http.StatusNotFound: - return "", fmt.Errorf("module %q version %q not found", module, version) - default: - // anything else is an error: - return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) - } - - // the download location is in the X-Terraform-Get header - location := resp.Header.Get(xTerraformGet) - if location == "" { - return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) - } - - // If location looks like it's trying to be a relative URL, treat it as - // one. - // - // We don't do this for just _any_ location, since the X-Terraform-Get - // header is a go-getter location rather than a URL, and so not all - // possible values will parse reasonably as URLs.) - // - // When used in conjunction with go-getter we normally require this header - // to be an absolute URL, but we are more liberal here because third-party - // registry implementations may not "know" their own absolute URLs if - // e.g. they are running behind a reverse proxy frontend, or such. - if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { - locationURL, err := url.Parse(location) - if err != nil { - return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) - } - locationURL = download.ResolveReference(locationURL) - location = locationURL.String() - } - - return location, nil -} - -// configureDiscoveryRetry configures the number of retries the registry client -// will attempt for requests with retryable errors, like 502 status codes -func configureDiscoveryRetry() { - discoveryRetry = defaultRetry - - if v := os.Getenv(registryDiscoveryRetryEnvName); v != "" { - retry, err := strconv.Atoi(v) - if err == nil && retry > 0 { - discoveryRetry = retry - } - } -} - -func requestLogHook(logger retryablehttp.Logger, req *http.Request, i int) { - if i > 0 { - logger.Printf("[INFO] Previous request to the remote registry failed, attempting retry.") - } -} - -func maxRetryErrorHandler(resp *http.Response, err error, numTries int) (*http.Response, error) { - // Close the body per library instructions - if resp != nil { - resp.Body.Close() - } - - // Additional error detail: if we have a response, use the status code; - // if we have an error, use that; otherwise nothing. We will never have - // both response and error. - var errMsg string - if resp != nil { - errMsg = fmt.Sprintf(": %s returned from %s", resp.Status, resp.Request.URL) - } else if err != nil { - errMsg = fmt.Sprintf(": %s", err) - } - - // This function is always called with numTries=RetryMax+1. If we made any - // retry attempts, include that in the error message. - if numTries > 1 { - return resp, fmt.Errorf("the request failed after %d attempts, please try again later%s", - numTries, errMsg) - } - return resp, fmt.Errorf("the request failed, please try again later%s", errMsg) -} - -// configureRequestTimeout configures the registry client request timeout from -// environment variables -func configureRequestTimeout() { - requestTimeout = defaultRequestTimeout - - if v := os.Getenv(registryClientTimeoutEnvName); v != "" { - timeout, err := strconv.Atoi(v) - if err == nil && timeout > 0 { - requestTimeout = time.Duration(timeout) * time.Second - } - } -} diff --git a/internal/registry/client_test.go b/internal/registry/client_test.go deleted file mode 100644 index da3055110a0c..000000000000 --- a/internal/registry/client_test.go +++ /dev/null @@ -1,369 +0,0 @@ -package registry - -import ( - "context" - "net/http" - "os" - "strings" - "testing" - "time" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/registry/regsrc" - "github.com/hashicorp/terraform/internal/registry/test" - tfversion "github.com/hashicorp/terraform/version" -) - -func TestConfigureDiscoveryRetry(t *testing.T) { - t.Run("default retry", func(t *testing.T) { - if discoveryRetry != defaultRetry { - t.Fatalf("expected retry %q, got %q", defaultRetry, discoveryRetry) - } - - rc := NewClient(nil, nil) - if rc.client.RetryMax != defaultRetry { - t.Fatalf("expected client retry %q, got %q", - defaultRetry, rc.client.RetryMax) - } - }) - - t.Run("configured retry", func(t *testing.T) { - defer func(retryEnv string) { - os.Setenv(registryDiscoveryRetryEnvName, retryEnv) - discoveryRetry = defaultRetry - }(os.Getenv(registryDiscoveryRetryEnvName)) - os.Setenv(registryDiscoveryRetryEnvName, "2") - - configureDiscoveryRetry() - expected := 2 - if discoveryRetry != expected { - t.Fatalf("expected retry %q, got %q", - expected, discoveryRetry) - } - - rc := NewClient(nil, nil) - if rc.client.RetryMax != expected { - t.Fatalf("expected client retry %q, got %q", - expected, rc.client.RetryMax) - } - }) -} - -func TestConfigureRegistryClientTimeout(t *testing.T) { - t.Run("default timeout", func(t *testing.T) { - if requestTimeout != defaultRequestTimeout { - t.Fatalf("expected timeout %q, got %q", - defaultRequestTimeout.String(), requestTimeout.String()) - } - - rc := NewClient(nil, nil) - if rc.client.HTTPClient.Timeout != defaultRequestTimeout { - t.Fatalf("expected client timeout %q, got %q", - defaultRequestTimeout.String(), rc.client.HTTPClient.Timeout.String()) - } - }) - - t.Run("configured timeout", func(t *testing.T) { - defer func(timeoutEnv string) { - os.Setenv(registryClientTimeoutEnvName, timeoutEnv) - requestTimeout = defaultRequestTimeout - }(os.Getenv(registryClientTimeoutEnvName)) - os.Setenv(registryClientTimeoutEnvName, "20") - - configureRequestTimeout() - expected := 20 * time.Second - if requestTimeout != expected { - t.Fatalf("expected timeout %q, got %q", - expected, requestTimeout.String()) - } - - rc := NewClient(nil, nil) - if rc.client.HTTPClient.Timeout != expected { - t.Fatalf("expected client timeout %q, got %q", - expected, rc.client.HTTPClient.Timeout.String()) - } - }) -} - -func TestLookupModuleVersions(t *testing.T) { - server := test.Registry() - defer server.Close() - - client := NewClient(test.Disco(server), nil) - - // test with and without a hostname - for _, src := range []string{ - "example.com/test-versions/name/provider", - "test-versions/name/provider", - } { - modsrc, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - - resp, err := client.ModuleVersions(context.Background(), modsrc) - if err != nil { - t.Fatal(err) - } - - if len(resp.Modules) != 1 { - t.Fatal("expected 1 module, got", len(resp.Modules)) - } - - mod := resp.Modules[0] - name := "test-versions/name/provider" - if mod.Source != name { - t.Fatalf("expected module name %q, got %q", name, mod.Source) - } - - if len(mod.Versions) != 4 { - t.Fatal("expected 4 versions, got", len(mod.Versions)) - } - - for _, v := range mod.Versions { - _, err := version.NewVersion(v.Version) - if err != nil { - t.Fatalf("invalid version %q: %s", v.Version, err) - } - } - } -} - -func TestInvalidRegistry(t *testing.T) { - server := test.Registry() - defer server.Close() - - client := NewClient(test.Disco(server), nil) - - src := "non-existent.localhost.localdomain/test-versions/name/provider" - modsrc, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - - if _, err := client.ModuleVersions(context.Background(), modsrc); err == nil { - t.Fatal("expected error") - } -} - -func TestRegistryAuth(t *testing.T) { - server := test.Registry() - defer server.Close() - - client := NewClient(test.Disco(server), nil) - - src := "private/name/provider" - mod, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - - _, err = client.ModuleVersions(context.Background(), mod) - if err != nil { - t.Fatal(err) - } - _, err = client.ModuleLocation(context.Background(), mod, "1.0.0") - if err != nil { - t.Fatal(err) - } - - // Also test without a credentials source - client.services.SetCredentialsSource(nil) - - // both should fail without auth - _, err = client.ModuleVersions(context.Background(), mod) - if err == nil { - t.Fatal("expected error") - } - _, err = client.ModuleLocation(context.Background(), mod, "1.0.0") - if err == nil { - t.Fatal("expected error") - } -} - -func TestLookupModuleLocationRelative(t *testing.T) { - server := test.Registry() - defer server.Close() - - client := NewClient(test.Disco(server), nil) - - src := "relative/foo/bar" - mod, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - - got, err := client.ModuleLocation(context.Background(), mod, "0.2.0") - if err != nil { - t.Fatal(err) - } - - want := server.URL + "/relative-path" - if got != want { - t.Errorf("wrong location %s; want %s", got, want) - } -} - -func TestAccLookupModuleVersions(t *testing.T) { - if os.Getenv("TF_ACC") == "" { - t.Skip() - } - regDisco := disco.New() - regDisco.SetUserAgent(httpclient.TerraformUserAgent(tfversion.String())) - - // test with and without a hostname - for _, src := range []string{ - "terraform-aws-modules/vpc/aws", - regsrc.PublicRegistryHost.String() + "/terraform-aws-modules/vpc/aws", - } { - modsrc, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - - s := NewClient(regDisco, nil) - resp, err := s.ModuleVersions(context.Background(), modsrc) - if err != nil { - t.Fatal(err) - } - - if len(resp.Modules) != 1 { - t.Fatal("expected 1 module, got", len(resp.Modules)) - } - - mod := resp.Modules[0] - name := "terraform-aws-modules/vpc/aws" - if mod.Source != name { - t.Fatalf("expected module name %q, got %q", name, mod.Source) - } - - if len(mod.Versions) == 0 { - t.Fatal("expected multiple versions, got 0") - } - - for _, v := range mod.Versions { - _, err := version.NewVersion(v.Version) - if err != nil { - t.Fatalf("invalid version %q: %s", v.Version, err) - } - } - } -} - -// the error should reference the config source exactly, not the discovered path. -func TestLookupLookupModuleError(t *testing.T) { - server := test.Registry() - defer server.Close() - - client := NewClient(test.Disco(server), nil) - - // this should not be found in the registry - src := "bad/local/path" - mod, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - - // Instrument CheckRetry to make sure 404s are not retried - retries := 0 - oldCheck := client.client.CheckRetry - client.client.CheckRetry = func(ctx context.Context, resp *http.Response, err error) (bool, error) { - if retries > 0 { - t.Fatal("retried after module not found") - } - retries++ - return oldCheck(ctx, resp, err) - } - - _, err = client.ModuleLocation(context.Background(), mod, "0.2.0") - if err == nil { - t.Fatal("expected error") - } - - // check for the exact quoted string to ensure we didn't prepend a hostname. - if !strings.Contains(err.Error(), `"bad/local/path"`) { - t.Fatal("error should not include the hostname. got:", err) - } -} - -func TestLookupModuleRetryError(t *testing.T) { - server := test.RegistryRetryableErrorsServer() - defer server.Close() - - client := NewClient(test.Disco(server), nil) - - src := "example.com/test-versions/name/provider" - modsrc, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - resp, err := client.ModuleVersions(context.Background(), modsrc) - if err == nil { - t.Fatal("expected requests to exceed retry", err) - } - if resp != nil { - t.Fatal("unexpected response", *resp) - } - - // verify maxRetryErrorHandler handler returned the error - if !strings.Contains(err.Error(), "the request failed after 2 attempts, please try again later") { - t.Fatal("unexpected error, got:", err) - } -} - -func TestLookupModuleNoRetryError(t *testing.T) { - // Disable retries - discoveryRetry = 0 - defer configureDiscoveryRetry() - - server := test.RegistryRetryableErrorsServer() - defer server.Close() - - client := NewClient(test.Disco(server), nil) - - src := "example.com/test-versions/name/provider" - modsrc, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - resp, err := client.ModuleVersions(context.Background(), modsrc) - if err == nil { - t.Fatal("expected request to fail", err) - } - if resp != nil { - t.Fatal("unexpected response", *resp) - } - - // verify maxRetryErrorHandler handler returned the error - if !strings.Contains(err.Error(), "the request failed, please try again later") { - t.Fatal("unexpected error, got:", err) - } -} - -func TestLookupModuleNetworkError(t *testing.T) { - server := test.RegistryRetryableErrorsServer() - client := NewClient(test.Disco(server), nil) - - // Shut down the server to simulate network failure - server.Close() - - src := "example.com/test-versions/name/provider" - modsrc, err := regsrc.ParseModuleSource(src) - if err != nil { - t.Fatal(err) - } - resp, err := client.ModuleVersions(context.Background(), modsrc) - if err == nil { - t.Fatal("expected request to fail", err) - } - if resp != nil { - t.Fatal("unexpected response", *resp) - } - - // verify maxRetryErrorHandler handler returned the correct error - if !strings.Contains(err.Error(), "the request failed after 2 attempts, please try again later") { - t.Fatal("unexpected error, got:", err) - } -} diff --git a/internal/registry/errors.go b/internal/registry/errors.go deleted file mode 100644 index a35eb717ed25..000000000000 --- a/internal/registry/errors.go +++ /dev/null @@ -1,47 +0,0 @@ -package registry - -import ( - "fmt" - - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/registry/regsrc" -) - -type errModuleNotFound struct { - addr *regsrc.Module -} - -func (e *errModuleNotFound) Error() string { - return fmt.Sprintf("module %s not found", e.addr) -} - -// IsModuleNotFound returns true only if the given error is a "module not found" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsModuleNotFound(err error) bool { - _, ok := err.(*errModuleNotFound) - return ok -} - -// IsServiceNotProvided returns true only if the given error is a "service not provided" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsServiceNotProvided(err error) bool { - _, ok := err.(*disco.ErrServiceNotProvided) - return ok -} - -// ServiceUnreachableError Registry service is unreachable -type ServiceUnreachableError struct { - err error -} - -func (e *ServiceUnreachableError) Error() string { - return e.err.Error() -} - -// IsServiceUnreachable returns true if the registry/discovery service was unreachable -func IsServiceUnreachable(err error) bool { - _, ok := err.(*ServiceUnreachableError) - return ok -} diff --git a/internal/registry/regsrc/module.go b/internal/registry/regsrc/module.go deleted file mode 100644 index 3ffa002bba07..000000000000 --- a/internal/registry/regsrc/module.go +++ /dev/null @@ -1,245 +0,0 @@ -package regsrc - -import ( - "errors" - "fmt" - "regexp" - "strings" - - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/internal/addrs" -) - -var ( - ErrInvalidModuleSource = errors.New("not a valid registry module source") - - // nameSubRe is the sub-expression that matches a valid module namespace or - // name. It's strictly a super-set of what GitHub allows for user/org and - // repo names respectively, but more restrictive than our original repo-name - // regex which allowed periods but could cause ambiguity with hostname - // prefixes. It does not anchor the start or end so it can be composed into - // more complex RegExps below. Alphanumeric with - and _ allowed in non - // leading or trailing positions. Max length 64 chars. (GitHub username is - // 38 max.) - nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" - - // providerSubRe is the sub-expression that matches a valid provider. It - // does not anchor the start or end so it can be composed into more complex - // RegExps below. Only lowercase chars and digits are supported in practice. - // Max length 64 chars. - providerSubRe = "[0-9a-z]{1,64}" - - // moduleSourceRe is a regular expression that matches the basic - // namespace/name/provider[//...] format for registry sources. It assumes - // any FriendlyHost prefix has already been removed if present. - moduleSourceRe = regexp.MustCompile( - fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", - nameSubRe, nameSubRe, providerSubRe)) - - // NameRe is a regular expression defining the format allowed for namespace - // or name fields in module registry implementations. - NameRe = regexp.MustCompile("^" + nameSubRe + "$") - - // ProviderRe is a regular expression defining the format allowed for - // provider fields in module registry implementations. - ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") - - // these hostnames are not allowed as registry sources, because they are - // already special case module sources in terraform. - disallowed = map[string]bool{ - "github.com": true, - "bitbucket.org": true, - } -) - -// Module describes a Terraform Registry Module source. -type Module struct { - // RawHost is the friendly host prefix if one was present. It might be nil - // if the original source had no host prefix which implies - // PublicRegistryHost but is distinct from having an actual pointer to - // PublicRegistryHost since it encodes the fact the original string didn't - // include a host prefix at all which is significant for recovering actual - // input not just normalized form. Most callers should access it with Host() - // which will return public registry host instance if it's nil. - RawHost *FriendlyHost - RawNamespace string - RawName string - RawProvider string - RawSubmodule string -} - -// NewModule construct a new module source from separate parts. Pass empty -// string if host or submodule are not needed. -func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { - m := &Module{ - RawNamespace: namespace, - RawName: name, - RawProvider: provider, - RawSubmodule: submodule, - } - if host != "" { - h := NewFriendlyHost(host) - if h != nil { - fmt.Println("HOST:", h) - if !h.Valid() || disallowed[h.Display()] { - return nil, ErrInvalidModuleSource - } - } - m.RawHost = h - } - return m, nil -} - -// ModuleFromModuleSourceAddr is an adapter to automatically transform the -// modern representation of registry module addresses, -// addrs.ModuleSourceRegistry, into the legacy representation regsrc.Module. -// -// Note that the new-style model always does normalization during parsing and -// does not preserve the raw user input at all, and so although the fields -// of regsrc.Module are all called "Raw...", initializing a Module indirectly -// through an addrs.ModuleSourceRegistry will cause those values to be the -// normalized ones, not the raw user input. -// -// Use this only for temporary shims to call into existing code that still -// uses regsrc.Module. Eventually all other subsystems should be updated to -// use addrs.ModuleSourceRegistry instead, and then package regsrc can be -// removed altogether. -func ModuleFromModuleSourceAddr(addr addrs.ModuleSourceRegistry) *Module { - ret := ModuleFromRegistryPackageAddr(addr.Package) - ret.RawSubmodule = addr.Subdir - return ret -} - -// ModuleFromRegistryPackageAddr is similar to ModuleFromModuleSourceAddr, but -// it works with just the isolated registry package address, and not the -// full source address. -// -// The practical implication of that is that RawSubmodule will always be -// the empty string in results from this function, because "Submodule" maps -// to "Subdir" and that's a module source address concept, not a module -// package concept. In practice this typically doesn't matter because the -// registry client ignores the RawSubmodule field anyway; that's a concern -// for the higher-level module installer to deal with. -func ModuleFromRegistryPackageAddr(addr addrs.ModuleRegistryPackage) *Module { - return &Module{ - RawHost: NewFriendlyHost(addr.Host.String()), - RawNamespace: addr.Namespace, - RawName: addr.Name, - RawProvider: addr.TargetSystem, // this field was never actually enforced to be a provider address, so now has a more general name - } -} - -// ParseModuleSource attempts to parse source as a Terraform registry module -// source. If the string is not found to be in a valid format, -// ErrInvalidModuleSource is returned. Note that this can only be used on -// "input" strings, e.g. either ones supplied by the user or potentially -// normalised but in Display form (unicode). It will fail to parse a source with -// a punycoded domain since this is not permitted input from a user. If you have -// an already normalized string internally, you can compare it without parsing -// by comparing with the normalized version of the subject with the normal -// string equality operator. -func ParseModuleSource(source string) (*Module, error) { - // See if there is a friendly host prefix. - host, rest := ParseFriendlyHost(source) - if host != nil { - if !host.Valid() || disallowed[host.Display()] { - return nil, ErrInvalidModuleSource - } - } - - matches := moduleSourceRe.FindStringSubmatch(rest) - if len(matches) < 4 { - return nil, ErrInvalidModuleSource - } - - m := &Module{ - RawHost: host, - RawNamespace: matches[1], - RawName: matches[2], - RawProvider: matches[3], - } - - if len(matches) == 5 { - m.RawSubmodule = matches[4] - } - - return m, nil -} - -// Display returns the source formatted for display to the user in CLI or web -// output. -func (m *Module) Display() string { - return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) -} - -// Normalized returns the source formatted for internal reference or comparison. -func (m *Module) Normalized() string { - return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) -} - -// String returns the source formatted as the user originally typed it assuming -// it was parsed from user input. -func (m *Module) String() string { - // Don't normalize public registry hostname - leave it exactly like the user - // input it. - hostPrefix := "" - if m.RawHost != nil { - hostPrefix = m.RawHost.String() + "/" - } - return m.formatWithPrefix(hostPrefix, true) -} - -// Equal compares the module source against another instance taking -// normalization into account. -func (m *Module) Equal(other *Module) bool { - return m.Normalized() == other.Normalized() -} - -// Host returns the FriendlyHost object describing which registry this module is -// in. If the original source string had not host component this will return the -// PublicRegistryHost. -func (m *Module) Host() *FriendlyHost { - if m.RawHost == nil { - return PublicRegistryHost - } - return m.RawHost -} - -func (m *Module) normalizedHostPrefix(host string) string { - if m.Host().Equal(PublicRegistryHost) { - return "" - } - return host + "/" -} - -func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { - suffix := "" - if m.RawSubmodule != "" { - suffix = "//" + m.RawSubmodule - } - str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, - m.RawProvider, suffix) - - // lower case by default - if !preserveCase { - return strings.ToLower(str) - } - return str -} - -// Module returns just the registry ID of the module, without a hostname or -// suffix. -func (m *Module) Module() string { - return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) -} - -// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may -// contain an invalid hostname, this also returns an error indicating if it -// could be converted to a svchost.Hostname. If no host is specified, the -// default PublicRegistryHost is returned. -func (m *Module) SvcHost() (svchost.Hostname, error) { - if m.RawHost == nil { - return svchost.ForComparison(PublicRegistryHost.Raw) - } - return svchost.ForComparison(m.RawHost.Raw) -} diff --git a/internal/states/checks.go b/internal/states/checks.go deleted file mode 100644 index 181871a766da..000000000000 --- a/internal/states/checks.go +++ /dev/null @@ -1,182 +0,0 @@ -package states - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" -) - -// CheckResults represents a summary snapshot of the status of a set of checks -// declared in configuration, updated after each Terraform Core run that -// changes the state or remote system in a way that might impact the check -// results. -// -// Unlike a checks.State, this type only tracks the overall results for -// each checkable object and doesn't aim to preserve the identity of individual -// checks in the configuration. For our UI reporting purposes, it is entire -// objects that pass or fail based on their declared checks; the individual -// checks have no durable identity between runs, and so are only a language -// design convenience to help authors describe various independent conditions -// with different failure messages each. -// -// CheckResults should typically be considered immutable once constructed: -// instead of updating it in-place,instead construct an entirely new -// CheckResults object based on a fresh checks.State. -type CheckResults struct { - // ConfigResults has all of the individual check results grouped by the - // configuration object they relate to. - // - // The top-level map here will always have a key for every configuration - // object that includes checks at the time of evaluating the results, - // even if there turned out to be no instances of that object and - // therefore no individual check results. - ConfigResults addrs.Map[addrs.ConfigCheckable, *CheckResultAggregate] -} - -// CheckResultAggregate represents both the overall result for a particular -// configured object that has checks and the individual checkable objects -// it declared, if any. -type CheckResultAggregate struct { - // Status is the aggregate status across all objects. - // - // Sometimes an error or check failure during planning will prevent - // Terraform Core from even determining the individual checkable objects - // associated with a downstream configuration object, and that situation is - // described here by this Status being checks.StatusUnknown and there being - // no elements in the ObjectResults field. - // - // That's different than Terraform Core explicitly reporting that there are - // no instances of the config object (e.g. a resource with count = 0), - // which leads to the aggregate status being checks.StatusPass while - // ObjectResults is still empty. - Status checks.Status - - ObjectResults addrs.Map[addrs.Checkable, *CheckResultObject] -} - -// CheckResultObject is the check status for a single checkable object. -// -// This aggregates together all of the checks associated with a particular -// object into a single pass/fail/error/unknown result, because checkable -// objects have durable addresses that can survive between runs, but their -// individual checks do not. (Module authors are free to reorder their checks -// for a particular object in the configuration with no change in meaning.) -type CheckResultObject struct { - // Status is the check status of the checkable object, derived from the - // results of all of its individual checks. - Status checks.Status - - // FailureMessages is an optional set of module-author-defined messages - // describing the problems that the checks detected, for objects whose - // status is checks.StatusFail. - // - // (checks.StatusError problems get reported as normal diagnostics during - // evaluation instead, and so will not appear here.) - FailureMessages []string -} - -// NewCheckResults constructs a new states.CheckResults object that is a -// snapshot of the check statuses recorded in the given checks.State object. -// -// This should be called only after a Terraform Core run has completed and -// recorded any results from running the checks in the given object. -func NewCheckResults(source *checks.State) *CheckResults { - ret := &CheckResults{ - ConfigResults: addrs.MakeMap[addrs.ConfigCheckable, *CheckResultAggregate](), - } - - for _, configAddr := range source.AllConfigAddrs() { - aggr := &CheckResultAggregate{ - Status: source.AggregateCheckStatus(configAddr), - ObjectResults: addrs.MakeMap[addrs.Checkable, *CheckResultObject](), - } - - for _, objectAddr := range source.ObjectAddrs(configAddr) { - obj := &CheckResultObject{ - Status: source.ObjectCheckStatus(objectAddr), - FailureMessages: source.ObjectFailureMessages(objectAddr), - } - aggr.ObjectResults.Put(objectAddr, obj) - } - - ret.ConfigResults.Put(configAddr, aggr) - } - - // If there aren't actually any configuration objects then we'll just - // leave the map as a whole nil, because having it be zero-value makes - // life easier for deep comparisons in unit tests elsewhere. - if ret.ConfigResults.Len() == 0 { - ret.ConfigResults.Elems = nil - } - - return ret -} - -// GetObjectResult looks up the result for a single object, or nil if there -// is no such object. -// -// In main code we shouldn't typically need to look up individual objects -// like this, since we'll usually be reporting check results in an aggregate -// form, but determining the result of a particular object is useful in our -// internal unit tests, and so this is here primarily for that purpose. -func (r *CheckResults) GetObjectResult(objectAddr addrs.Checkable) *CheckResultObject { - configAddr := objectAddr.ConfigCheckable() - - aggr := r.ConfigResults.Get(configAddr) - if aggr == nil { - return nil - } - - return aggr.ObjectResults.Get(objectAddr) -} - -func (r *CheckResults) DeepCopy() *CheckResults { - if r == nil { - return nil - } - ret := &CheckResults{} - if r.ConfigResults.Elems == nil { - return ret - } - - ret.ConfigResults = addrs.MakeMap[addrs.ConfigCheckable, *CheckResultAggregate]() - - for _, configElem := range r.ConfigResults.Elems { - aggr := &CheckResultAggregate{ - Status: configElem.Value.Status, - } - - if configElem.Value.ObjectResults.Elems != nil { - aggr.ObjectResults = addrs.MakeMap[addrs.Checkable, *CheckResultObject]() - - for _, objectElem := range configElem.Value.ObjectResults.Elems { - result := &CheckResultObject{ - Status: objectElem.Value.Status, - - // NOTE: We don't deep-copy this slice because it's - // immutable once constructed by convention. - FailureMessages: objectElem.Value.FailureMessages, - } - aggr.ObjectResults.Put(objectElem.Key, result) - } - } - - ret.ConfigResults.Put(configElem.Key, aggr) - } - - return ret -} - -// ObjectAddrsKnown determines whether the set of objects recorded in this -// aggregate is accurate (true) or if it's incomplete as a result of the -// run being interrupted before instance expansion. -func (r *CheckResultAggregate) ObjectAddrsKnown() bool { - if r.ObjectResults.Len() != 0 { - // If there are any object results at all then we definitely know. - return true - } - - // If we don't have any object addresses then we distinguish a known - // empty set of objects from an unknown set of objects by the aggregate - // status being unknown. - return r.Status != checks.StatusUnknown -} diff --git a/internal/states/module.go b/internal/states/module.go deleted file mode 100644 index 2f6242ace522..000000000000 --- a/internal/states/module.go +++ /dev/null @@ -1,321 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" -) - -// Module is a container for the states of objects within a particular module. -type Module struct { - Addr addrs.ModuleInstance - - // Resources contains the state for each resource. The keys in this map are - // an implementation detail and must not be used by outside callers. - Resources map[string]*Resource - - // OutputValues contains the state for each output value. The keys in this - // map are output value names. - OutputValues map[string]*OutputValue - - // LocalValues contains the value for each named output value. The keys - // in this map are local value names. - LocalValues map[string]cty.Value -} - -// NewModule constructs an empty module state for the given module address. -func NewModule(addr addrs.ModuleInstance) *Module { - return &Module{ - Addr: addr, - Resources: map[string]*Resource{}, - OutputValues: map[string]*OutputValue{}, - LocalValues: map[string]cty.Value{}, - } -} - -// Resource returns the state for the resource with the given address within -// the receiving module state, or nil if the requested resource is not tracked -// in the state. -func (ms *Module) Resource(addr addrs.Resource) *Resource { - return ms.Resources[addr.String()] -} - -// ResourceInstance returns the state for the resource instance with the given -// address within the receiving module state, or nil if the requested instance -// is not tracked in the state. -func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { - rs := ms.Resource(addr.Resource) - if rs == nil { - return nil - } - return rs.Instance(addr.Key) -} - -// SetResourceProvider updates the resource-level metadata for the resource -// with the given address, creating the resource state for it if it doesn't -// already exist. -func (ms *Module) SetResourceProvider(addr addrs.Resource, provider addrs.AbsProviderConfig) { - rs := ms.Resource(addr) - if rs == nil { - rs = &Resource{ - Addr: addr.Absolute(ms.Addr), - Instances: map[addrs.InstanceKey]*ResourceInstance{}, - } - ms.Resources[addr.String()] = rs - } - - rs.ProviderConfig = provider -} - -// RemoveResource removes the entire state for the given resource, taking with -// it any instances associated with the resource. This should generally be -// called only for resource objects whose instances have all been destroyed. -func (ms *Module) RemoveResource(addr addrs.Resource) { - delete(ms.Resources, addr.String()) -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simultaneously -// updating the recorded provider configuration address and dependencies. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance will be removed altogether. -// -// The provider address is a resource-wide setting and is updated for all other -// instances of the same resource as a side-effect of this call. -func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - rs := ms.Resource(addr.Resource) - // if the resource is nil and the object is nil, don't do anything! - // you'll probably just cause issues - if obj == nil && rs == nil { - return - } - if obj == nil && rs != nil { - // does the resource have any other objects? - // if not then delete the whole resource - if len(rs.Instances) == 0 { - delete(ms.Resources, addr.Resource.String()) - return - } - // check for an existing resource, now that we've ensured that rs.Instances is more than 0/not nil - is := rs.Instance(addr.Key) - if is == nil { - // if there is no instance on the resource with this address and obj is nil, return and change nothing - return - } - // if we have an instance, update the current - is.Current = obj - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - // Delete the resource if it has no instances, but only if NoEach - if len(rs.Instances) == 0 { - delete(ms.Resources, addr.Resource.String()) - return - } - } - // Nothing more to do here, so return! - return - } - if rs == nil && obj != nil { - // We don't have have a resource so make one, which is a side effect of setResourceMeta - ms.SetResourceProvider(addr.Resource, provider) - // now we have a resource! so update the rs value to point to it - rs = ms.Resource(addr.Resource) - } - // Get our instance from the resource; it could be there or not at this point - is := rs.Instance(addr.Key) - if is == nil { - // if we don't have a resource, create one and add to the instances - is = rs.CreateInstance(addr.Key) - // update the resource meta because we have a new - ms.SetResourceProvider(addr.Resource, provider) - } - // Update the resource's ProviderConfig, in case the provider has updated - rs.ProviderConfig = provider - is.Current = obj -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - ms.SetResourceProvider(addr.Resource, provider) - - rs := ms.Resource(addr.Resource) - is := rs.EnsureInstance(addr.Key) - if obj != nil { - is.Deposed[key] = obj - } else { - delete(is.Deposed, key) - } - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// ForgetResourceInstanceAll removes the record of all objects associated with -// the specified resource instance, if present. If not present, this is a no-op. -func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) { - rs := ms.Resource(addr.Resource) - if rs == nil { - return - } - delete(rs.Instances, addr.Key) - - if len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// ForgetResourceInstanceDeposed removes the record of the deposed object with -// the given address and key, if present. If not present, this is a no-op. -func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { - rs := ms.Resource(addr.Resource) - if rs == nil { - return - } - is := rs.Instance(addr.Key) - if is == nil { - return - } - delete(is.Deposed, key) - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// deposeResourceInstanceObject is the real implementation of -// SyncState.DeposeResourceInstanceObject. -func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { - is := ms.ResourceInstance(addr) - if is == nil { - return NotDeposed - } - return is.deposeCurrentObject(forceKey) -} - -// maybeRestoreResourceInstanceDeposed is the real implementation of -// SyncState.MaybeRestoreResourceInstanceDeposed. -func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { - rs := ms.Resource(addr.Resource) - if rs == nil { - return false - } - is := rs.Instance(addr.Key) - if is == nil { - return false - } - if is.Current != nil { - return false - } - if len(is.Deposed) == 0 { - return false - } - is.Current = is.Deposed[key] - delete(is.Deposed, key) - return true -} - -// SetOutputValue writes an output value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { - os := &OutputValue{ - Addr: addrs.AbsOutputValue{ - Module: ms.Addr, - OutputValue: addrs.OutputValue{ - Name: name, - }, - }, - Value: value, - Sensitive: sensitive, - } - ms.OutputValues[name] = os - return os -} - -// RemoveOutputValue removes the output value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveOutputValue(name string) { - delete(ms.OutputValues, name) -} - -// SetLocalValue writes a local value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetLocalValue(name string, value cty.Value) { - ms.LocalValues[name] = value -} - -// RemoveLocalValue removes the local value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveLocalValue(name string) { - delete(ms.LocalValues, name) -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// You probably shouldn't call this! See the method of the same name on -// type State for more information on what this is for and the rare situations -// where it is safe to use. -func (ms *Module) PruneResourceHusks() { - for _, rs := range ms.Resources { - if len(rs.Instances) == 0 { - ms.RemoveResource(rs.Addr.Resource) - } - } -} - -// empty returns true if the receving module state is contributing nothing -// to the state. In other words, it returns true if the module could be -// removed from the state altogether without changing the meaning of the state. -// -// In practice a module containing no objects is the same as a non-existent -// module, and so we can opportunistically clean up once a module becomes -// empty on the assumption that it will be re-added if needed later. -func (ms *Module) empty() bool { - if ms == nil { - return true - } - - // This must be updated to cover any new collections added to Module - // in future. - return (len(ms.Resources) == 0 && - len(ms.OutputValues) == 0 && - len(ms.LocalValues) == 0) -} diff --git a/internal/states/output_value.go b/internal/states/output_value.go deleted file mode 100644 index 541595164805..000000000000 --- a/internal/states/output_value.go +++ /dev/null @@ -1,16 +0,0 @@ -package states - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// OutputValue represents the state of a particular output value. -// -// It is not valid to mutate an OutputValue object once it has been created. -// Instead, create an entirely new OutputValue to replace the previous one. -type OutputValue struct { - Addr addrs.AbsOutputValue - Value cty.Value - Sensitive bool -} diff --git a/internal/states/remote/state.go b/internal/states/remote/state.go deleted file mode 100644 index d8da9d827adb..000000000000 --- a/internal/states/remote/state.go +++ /dev/null @@ -1,265 +0,0 @@ -package remote - -import ( - "bytes" - "fmt" - "log" - "sync" - - uuid "github.com/hashicorp/go-uuid" - - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/internal/terraform" -) - -// State implements the State interfaces in the state package to handle -// reading and writing the remote state. This State on its own does no -// local caching so every persist will go to the remote storage and local -// writes will go to memory. -type State struct { - mu sync.Mutex - - Client Client - - // We track two pieces of meta data in addition to the state itself: - // - // lineage - the state's unique ID - // serial - the monotonic counter of "versions" of the state - // - // Both of these (along with state) have a sister field - // that represents the values read in from an existing source. - // All three of these values are used to determine if the new - // state has changed from an existing state we read in. - lineage, readLineage string - serial, readSerial uint64 - state, readState *states.State - disableLocks bool -} - -var _ statemgr.Full = (*State)(nil) -var _ statemgr.Migrator = (*State)(nil) - -// statemgr.Reader impl. -func (s *State) State() *states.State { - s.mu.Lock() - defer s.mu.Unlock() - - return s.state.DeepCopy() -} - -func (s *State) GetRootOutputValues() (map[string]*states.OutputValue, error) { - if err := s.RefreshState(); err != nil { - return nil, fmt.Errorf("Failed to load state: %s", err) - } - - state := s.State() - if state == nil { - state = states.NewState() - } - - return state.RootModule().OutputValues, nil -} - -// StateForMigration is part of our implementation of statemgr.Migrator. -func (s *State) StateForMigration() *statefile.File { - s.mu.Lock() - defer s.mu.Unlock() - - return statefile.New(s.state.DeepCopy(), s.lineage, s.serial) -} - -// statemgr.Writer impl. -func (s *State) WriteState(state *states.State) error { - s.mu.Lock() - defer s.mu.Unlock() - - // We create a deep copy of the state here, because the caller also has - // a reference to the given object and can potentially go on to mutate - // it after we return, but we want the snapshot at this point in time. - s.state = state.DeepCopy() - - return nil -} - -// WriteStateForMigration is part of our implementation of statemgr.Migrator. -func (s *State) WriteStateForMigration(f *statefile.File, force bool) error { - s.mu.Lock() - defer s.mu.Unlock() - - if !force { - checkFile := statefile.New(s.state, s.lineage, s.serial) - if err := statemgr.CheckValidImport(f, checkFile); err != nil { - return err - } - } - - // The remote backend needs to pass the `force` flag through to its client. - // For backends that support such operations, inform the client - // that a force push has been requested - c, isForcePusher := s.Client.(ClientForcePusher) - if force && isForcePusher { - c.EnableForcePush() - } - - // We create a deep copy of the state here, because the caller also has - // a reference to the given object and can potentially go on to mutate - // it after we return, but we want the snapshot at this point in time. - s.state = f.State.DeepCopy() - s.lineage = f.Lineage - s.serial = f.Serial - - return nil -} - -// statemgr.Refresher impl. -func (s *State) RefreshState() error { - s.mu.Lock() - defer s.mu.Unlock() - return s.refreshState() -} - -// refreshState is the main implementation of RefreshState, but split out so -// that we can make internal calls to it from methods that are already holding -// the s.mu lock. -func (s *State) refreshState() error { - payload, err := s.Client.Get() - if err != nil { - return err - } - - // no remote state is OK - if payload == nil { - s.readState = nil - s.lineage = "" - s.serial = 0 - return nil - } - - stateFile, err := statefile.Read(bytes.NewReader(payload.Data)) - if err != nil { - return err - } - - s.lineage = stateFile.Lineage - s.serial = stateFile.Serial - s.state = stateFile.State - - // Properties from the remote must be separate so we can - // track changes as lineage, serial and/or state are mutated - s.readLineage = stateFile.Lineage - s.readSerial = stateFile.Serial - s.readState = s.state.DeepCopy() - return nil -} - -// statemgr.Persister impl. -func (s *State) PersistState(schemas *terraform.Schemas) error { - s.mu.Lock() - defer s.mu.Unlock() - - log.Printf("[DEBUG] states/remote: state read serial is: %d; serial is: %d", s.readSerial, s.serial) - log.Printf("[DEBUG] states/remote: state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) - - if s.readState != nil { - lineageUnchanged := s.readLineage != "" && s.lineage == s.readLineage - serialUnchanged := s.readSerial != 0 && s.serial == s.readSerial - stateUnchanged := statefile.StatesMarshalEqual(s.state, s.readState) - if stateUnchanged && lineageUnchanged && serialUnchanged { - // If the state, lineage or serial haven't changed at all then we have nothing to do. - return nil - } - s.serial++ - } else { - // We might be writing a new state altogether, but before we do that - // we'll check to make sure there isn't already a snapshot present - // that we ought to be updating. - err := s.refreshState() - if err != nil { - return fmt.Errorf("failed checking for existing remote state: %s", err) - } - log.Printf("[DEBUG] states/remote: after refresh, state read serial is: %d; serial is: %d", s.readSerial, s.serial) - log.Printf("[DEBUG] states/remote: after refresh, state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) - if s.lineage == "" { // indicates that no state snapshot is present yet - lineage, err := uuid.GenerateUUID() - if err != nil { - return fmt.Errorf("failed to generate initial lineage: %v", err) - } - s.lineage = lineage - s.serial++ - } - } - - f := statefile.New(s.state, s.lineage, s.serial) - - var buf bytes.Buffer - err := statefile.Write(f, &buf) - if err != nil { - return err - } - - err = s.Client.Put(buf.Bytes()) - if err != nil { - return err - } - - // After we've successfully persisted, what we just wrote is our new - // reference state until someone calls RefreshState again. - // We've potentially overwritten (via force) the state, lineage - // and / or serial (and serial was incremented) so we copy over all - // three fields so everything matches the new state and a subsequent - // operation would correctly detect no changes to the lineage, serial or state. - s.readState = s.state.DeepCopy() - s.readLineage = s.lineage - s.readSerial = s.serial - return nil -} - -// Lock calls the Client's Lock method if it's implemented. -func (s *State) Lock(info *statemgr.LockInfo) (string, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.disableLocks { - return "", nil - } - - if c, ok := s.Client.(ClientLocker); ok { - return c.Lock(info) - } - return "", nil -} - -// Unlock calls the Client's Unlock method if it's implemented. -func (s *State) Unlock(id string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.disableLocks { - return nil - } - - if c, ok := s.Client.(ClientLocker); ok { - return c.Unlock(id) - } - return nil -} - -// DisableLocks turns the Lock and Unlock methods into no-ops. This is intended -// to be called during initialization of a state manager and should not be -// called after any of the statemgr.Full interface methods have been called. -func (s *State) DisableLocks() { - s.disableLocks = true -} - -// StateSnapshotMeta returns the metadata from the most recently persisted -// or refreshed persistent state snapshot. -// -// This is an implementation of statemgr.PersistentMeta. -func (s *State) StateSnapshotMeta() statemgr.SnapshotMeta { - return statemgr.SnapshotMeta{ - Lineage: s.lineage, - Serial: s.serial, - } -} diff --git a/internal/states/remote/state_test.go b/internal/states/remote/state_test.go deleted file mode 100644 index 71e86c65703c..000000000000 --- a/internal/states/remote/state_test.go +++ /dev/null @@ -1,740 +0,0 @@ -package remote - -import ( - "log" - "sync" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/zclconf/go-cty/cty" - - tfaddr "github.com/hashicorp/terraform-registry-address" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" - "github.com/hashicorp/terraform/version" -) - -func TestState_impl(t *testing.T) { - var _ statemgr.Reader = new(State) - var _ statemgr.Writer = new(State) - var _ statemgr.Persister = new(State) - var _ statemgr.Refresher = new(State) - var _ statemgr.OutputReader = new(State) - var _ statemgr.Locker = new(State) -} - -func TestStateRace(t *testing.T) { - s := &State{ - Client: nilClient{}, - } - - current := states.NewState() - - var wg sync.WaitGroup - - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - s.WriteState(current) - s.PersistState(nil) - s.RefreshState() - }() - } - wg.Wait() -} - -// testCase encapsulates a test state test -type testCase struct { - name string - // A function to mutate state and return a cleanup function - mutationFunc func(*State) (*states.State, func()) - // The expected requests to have taken place - expectedRequests []mockClientRequest - // Mark this case as not having a request - noRequest bool -} - -// isRequested ensures a test that is specified as not having -// a request doesn't have one by checking if a method exists -// on the expectedRequest. -func (tc testCase) isRequested(t *testing.T) bool { - for _, expectedMethod := range tc.expectedRequests { - hasMethod := expectedMethod.Method != "" - if tc.noRequest && hasMethod { - t.Fatalf("expected no content for %q but got: %v", tc.name, expectedMethod) - } - } - return !tc.noRequest -} - -func TestStatePersist(t *testing.T) { - testCases := []testCase{ - { - name: "first state persistence", - mutationFunc: func(mgr *State) (*states.State, func()) { - mgr.state = &states.State{ - Modules: map[string]*states.Module{"": {}}, - } - s := mgr.State() - s.RootModule().SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Name: "myfile", - Type: "local_file", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "filename": "file.txt", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: tfaddr.Provider{Namespace: "local"}, - }, - ) - return s, func() {} - }, - expectedRequests: []mockClientRequest{ - // Expect an initial refresh, which returns nothing since there is no remote state. - { - Method: "Get", - Content: nil, - }, - // Expect a second refresh, since the read state is nil - { - Method: "Get", - Content: nil, - }, - // Expect an initial push with values and a serial of 1 - { - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, // encoding/json decodes this as float64 by default - "lineage": "some meaningless value", - "serial": 1.0, // encoding/json decodes this as float64 by default - "terraform_version": version.Version, - "outputs": map[string]interface{}{}, - "resources": []interface{}{ - map[string]interface{}{ - "instances": []interface{}{ - map[string]interface{}{ - "attributes_flat": map[string]interface{}{ - "filename": "file.txt", - }, - "schema_version": 0.0, - "sensitive_attributes": []interface{}{}, - }, - }, - "mode": "managed", - "name": "myfile", - "provider": `provider["/local/"]`, - "type": "local_file", - }, - }, - "check_results": nil, - }, - }, - }, - }, - // If lineage changes, expect the serial to increment - { - name: "change lineage", - mutationFunc: func(mgr *State) (*states.State, func()) { - mgr.lineage = "mock-lineage" - return mgr.State(), func() {} - }, - expectedRequests: []mockClientRequest{ - { - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, // encoding/json decodes this as float64 by default - "lineage": "mock-lineage", - "serial": 2.0, // encoding/json decodes this as float64 by default - "terraform_version": version.Version, - "outputs": map[string]interface{}{}, - "resources": []interface{}{ - map[string]interface{}{ - "instances": []interface{}{ - map[string]interface{}{ - "attributes_flat": map[string]interface{}{ - "filename": "file.txt", - }, - "schema_version": 0.0, - "sensitive_attributes": []interface{}{}, - }, - }, - "mode": "managed", - "name": "myfile", - "provider": `provider["/local/"]`, - "type": "local_file", - }, - }, - "check_results": nil, - }, - }, - }, - }, - // removing resources should increment the serial - { - name: "remove resources", - mutationFunc: func(mgr *State) (*states.State, func()) { - mgr.state.RootModule().Resources = map[string]*states.Resource{} - return mgr.State(), func() {} - }, - expectedRequests: []mockClientRequest{ - { - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, // encoding/json decodes this as float64 by default - "lineage": "mock-lineage", - "serial": 3.0, // encoding/json decodes this as float64 by default - "terraform_version": version.Version, - "outputs": map[string]interface{}{}, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - }, - }, - // If the remote serial is incremented, then we increment it once more. - { - name: "change serial", - mutationFunc: func(mgr *State) (*states.State, func()) { - originalSerial := mgr.serial - mgr.serial++ - return mgr.State(), func() { - mgr.serial = originalSerial - } - }, - expectedRequests: []mockClientRequest{ - { - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, // encoding/json decodes this as float64 by default - "lineage": "mock-lineage", - "serial": 5.0, // encoding/json decodes this as float64 by default - "terraform_version": version.Version, - "outputs": map[string]interface{}{}, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - }, - }, - // Adding an output should cause the serial to increment as well. - { - name: "add output to state", - mutationFunc: func(mgr *State) (*states.State, func()) { - s := mgr.State() - s.RootModule().SetOutputValue("foo", cty.StringVal("bar"), false) - return s, func() {} - }, - expectedRequests: []mockClientRequest{ - { - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, // encoding/json decodes this as float64 by default - "lineage": "mock-lineage", - "serial": 4.0, // encoding/json decodes this as float64 by default - "terraform_version": version.Version, - "outputs": map[string]interface{}{ - "foo": map[string]interface{}{ - "type": "string", - "value": "bar", - }, - }, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - }, - }, - // ...as should changing an output - { - name: "mutate state bar -> baz", - mutationFunc: func(mgr *State) (*states.State, func()) { - s := mgr.State() - s.RootModule().SetOutputValue("foo", cty.StringVal("baz"), false) - return s, func() {} - }, - expectedRequests: []mockClientRequest{ - { - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, // encoding/json decodes this as float64 by default - "lineage": "mock-lineage", - "serial": 5.0, // encoding/json decodes this as float64 by default - "terraform_version": version.Version, - "outputs": map[string]interface{}{ - "foo": map[string]interface{}{ - "type": "string", - "value": "baz", - }, - }, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - }, - }, - { - name: "nothing changed", - mutationFunc: func(mgr *State) (*states.State, func()) { - s := mgr.State() - return s, func() {} - }, - noRequest: true, - }, - // If the remote state's serial is less (force push), then we - // increment it once from there. - { - name: "reset serial (force push style)", - mutationFunc: func(mgr *State) (*states.State, func()) { - mgr.serial = 2 - return mgr.State(), func() {} - }, - expectedRequests: []mockClientRequest{ - { - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, // encoding/json decodes this as float64 by default - "lineage": "mock-lineage", - "serial": 3.0, // encoding/json decodes this as float64 by default - "terraform_version": version.Version, - "outputs": map[string]interface{}{ - "foo": map[string]interface{}{ - "type": "string", - "value": "baz", - }, - }, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - }, - }, - } - - // Initial setup of state just to give us a fixed starting point for our - // test assertions below, or else we'd need to deal with - // random lineage. - mgr := &State{ - Client: &mockClient{}, - } - - // In normal use (during a Terraform operation) we always refresh and read - // before any writes would happen, so we'll mimic that here for realism. - // NB This causes a GET to be logged so the first item in the test cases - // must account for this - if err := mgr.RefreshState(); err != nil { - t.Fatalf("failed to RefreshState: %s", err) - } - - // Our client is a mockClient which has a log we - // use to check that operations generate expected requests - mockClient := mgr.Client.(*mockClient) - - // logIdx tracks the current index of the log separate from - // the loop iteration so we can check operations that don't - // cause any requests to be generated - logIdx := 0 - - // Run tests in order. - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - s, cleanup := tc.mutationFunc(mgr) - - if err := mgr.WriteState(s); err != nil { - t.Fatalf("failed to WriteState for %q: %s", tc.name, err) - } - if err := mgr.PersistState(nil); err != nil { - t.Fatalf("failed to PersistState for %q: %s", tc.name, err) - } - - if tc.isRequested(t) { - // Get captured request from the mock client log - // based on the index of the current test - if logIdx >= len(mockClient.log) { - t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) - } - for expectedRequestIdx := 0; expectedRequestIdx < len(tc.expectedRequests); expectedRequestIdx++ { - loggedRequest := mockClient.log[logIdx] - logIdx++ - if diff := cmp.Diff(tc.expectedRequests[expectedRequestIdx], loggedRequest, cmpopts.IgnoreMapEntries(func(key string, value interface{}) bool { - // This is required since the initial state creation causes the lineage to be a UUID that is not known at test time. - return tc.name == "first state persistence" && key == "lineage" - })); len(diff) > 0 { - t.Logf("incorrect client requests for %q:\n%s", tc.name, diff) - t.Fail() - } - } - } - cleanup() - }) - } - logCnt := len(mockClient.log) - if logIdx != logCnt { - t.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) - } -} - -func TestState_GetRootOutputValues(t *testing.T) { - // Initial setup of state with outputs already defined - mgr := &State{ - Client: &mockClient{ - current: []byte(` - { - "version": 4, - "lineage": "mock-lineage", - "serial": 1, - "terraform_version":"0.0.0", - "outputs": {"foo": {"value":"bar", "type": "string"}}, - "resources": [] - } - `), - }, - } - - outputs, err := mgr.GetRootOutputValues() - if err != nil { - t.Errorf("Expected GetRootOutputValues to not return an error, but it returned %v", err) - } - - if len(outputs) != 1 { - t.Errorf("Expected %d outputs, but received %d", 1, len(outputs)) - } -} - -type migrationTestCase struct { - name string - // A function to generate a statefile - stateFile func(*State) *statefile.File - // The expected request to have taken place - expectedRequest mockClientRequest - // Mark this case as not having a request - expectedError string - // force flag passed to client - force bool -} - -func TestWriteStateForMigration(t *testing.T) { - mgr := &State{ - Client: &mockClient{ - current: []byte(` - { - "version": 4, - "lineage": "mock-lineage", - "serial": 3, - "terraform_version":"0.0.0", - "outputs": {"foo": {"value":"bar", "type": "string"}}, - "resources": [] - } - `), - }, - } - - testCases := []migrationTestCase{ - // Refreshing state before we run the test loop causes a GET - { - name: "refresh state", - stateFile: func(mgr *State) *statefile.File { - return mgr.StateForMigration() - }, - expectedRequest: mockClientRequest{ - Method: "Get", - Content: map[string]interface{}{ - "version": 4.0, - "lineage": "mock-lineage", - "serial": 3.0, - "terraform_version": "0.0.0", - "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, - "resources": []interface{}{}, - }, - }, - }, - { - name: "cannot import lesser serial without force", - stateFile: func(mgr *State) *statefile.File { - return statefile.New(mgr.state, mgr.lineage, 1) - }, - expectedError: "cannot import state with serial 1 over newer state with serial 3", - }, - { - name: "cannot import differing lineage without force", - stateFile: func(mgr *State) *statefile.File { - return statefile.New(mgr.state, "different-lineage", mgr.serial) - }, - expectedError: `cannot import state with lineage "different-lineage" over unrelated state with lineage "mock-lineage"`, - }, - { - name: "can import lesser serial with force", - stateFile: func(mgr *State) *statefile.File { - return statefile.New(mgr.state, mgr.lineage, 1) - }, - expectedRequest: mockClientRequest{ - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, - "lineage": "mock-lineage", - "serial": 2.0, - "terraform_version": version.Version, - "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - force: true, - }, - { - name: "cannot import differing lineage without force", - stateFile: func(mgr *State) *statefile.File { - return statefile.New(mgr.state, "different-lineage", mgr.serial) - }, - expectedRequest: mockClientRequest{ - Method: "Put", - Content: map[string]interface{}{ - "version": 4.0, - "lineage": "different-lineage", - "serial": 3.0, - "terraform_version": version.Version, - "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - force: true, - }, - } - - // In normal use (during a Terraform operation) we always refresh and read - // before any writes would happen, so we'll mimic that here for realism. - // NB This causes a GET to be logged so the first item in the test cases - // must account for this - if err := mgr.RefreshState(); err != nil { - t.Fatalf("failed to RefreshState: %s", err) - } - - if err := mgr.WriteState(mgr.State()); err != nil { - t.Fatalf("failed to write initial state: %s", err) - } - - // Our client is a mockClient which has a log we - // use to check that operations generate expected requests - mockClient := mgr.Client.(*mockClient) - - // logIdx tracks the current index of the log separate from - // the loop iteration so we can check operations that don't - // cause any requests to be generated - logIdx := 0 - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - sf := tc.stateFile(mgr) - err := mgr.WriteStateForMigration(sf, tc.force) - shouldError := tc.expectedError != "" - - // If we are expecting and error check it and move on - if shouldError { - if err == nil { - t.Fatalf("test case %q should have failed with error %q", tc.name, tc.expectedError) - } else if err.Error() != tc.expectedError { - t.Fatalf("test case %q expected error %q but got %q", tc.name, tc.expectedError, err) - } - return - } - - if err != nil { - t.Fatalf("test case %q failed: %v", tc.name, err) - } - - // At this point we should just do a normal write and persist - // as would happen from the CLI - mgr.WriteState(mgr.State()) - mgr.PersistState(nil) - - if logIdx >= len(mockClient.log) { - t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) - } - loggedRequest := mockClient.log[logIdx] - logIdx++ - if diff := cmp.Diff(tc.expectedRequest, loggedRequest); len(diff) > 0 { - t.Fatalf("incorrect client requests for %q:\n%s", tc.name, diff) - } - }) - } - - logCnt := len(mockClient.log) - if logIdx != logCnt { - log.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) - } -} - -// This test runs the same test cases as above, but with -// a client that implements EnableForcePush -- this allows -// us to test that -force continues to work for backends without -// this interface, but that this interface works for those that do. -func TestWriteStateForMigrationWithForcePushClient(t *testing.T) { - mgr := &State{ - Client: &mockClientForcePusher{ - current: []byte(` - { - "version": 4, - "lineage": "mock-lineage", - "serial": 3, - "terraform_version":"0.0.0", - "outputs": {"foo": {"value":"bar", "type": "string"}}, - "resources": [] - } - `), - }, - } - - testCases := []migrationTestCase{ - // Refreshing state before we run the test loop causes a GET - { - name: "refresh state", - stateFile: func(mgr *State) *statefile.File { - return mgr.StateForMigration() - }, - expectedRequest: mockClientRequest{ - Method: "Get", - Content: map[string]interface{}{ - "version": 4.0, - "lineage": "mock-lineage", - "serial": 3.0, - "terraform_version": "0.0.0", - "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, - "resources": []interface{}{}, - }, - }, - }, - { - name: "cannot import lesser serial without force", - stateFile: func(mgr *State) *statefile.File { - return statefile.New(mgr.state, mgr.lineage, 1) - }, - expectedError: "cannot import state with serial 1 over newer state with serial 3", - }, - { - name: "cannot import differing lineage without force", - stateFile: func(mgr *State) *statefile.File { - return statefile.New(mgr.state, "different-lineage", mgr.serial) - }, - expectedError: `cannot import state with lineage "different-lineage" over unrelated state with lineage "mock-lineage"`, - }, - { - name: "can import lesser serial with force", - stateFile: func(mgr *State) *statefile.File { - return statefile.New(mgr.state, mgr.lineage, 1) - }, - expectedRequest: mockClientRequest{ - Method: "Force Put", - Content: map[string]interface{}{ - "version": 4.0, - "lineage": "mock-lineage", - "serial": 2.0, - "terraform_version": version.Version, - "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - force: true, - }, - { - name: "cannot import differing lineage without force", - stateFile: func(mgr *State) *statefile.File { - return statefile.New(mgr.state, "different-lineage", mgr.serial) - }, - expectedRequest: mockClientRequest{ - Method: "Force Put", - Content: map[string]interface{}{ - "version": 4.0, - "lineage": "different-lineage", - "serial": 3.0, - "terraform_version": version.Version, - "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, - "resources": []interface{}{}, - "check_results": nil, - }, - }, - force: true, - }, - } - - // In normal use (during a Terraform operation) we always refresh and read - // before any writes would happen, so we'll mimic that here for realism. - // NB This causes a GET to be logged so the first item in the test cases - // must account for this - if err := mgr.RefreshState(); err != nil { - t.Fatalf("failed to RefreshState: %s", err) - } - - if err := mgr.WriteState(mgr.State()); err != nil { - t.Fatalf("failed to write initial state: %s", err) - } - - // Our client is a mockClientForcePusher which has a log we - // use to check that operations generate expected requests - mockClient := mgr.Client.(*mockClientForcePusher) - - if mockClient.force { - t.Fatalf("client should not default to force") - } - - // logIdx tracks the current index of the log separate from - // the loop iteration so we can check operations that don't - // cause any requests to be generated - logIdx := 0 - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Always reset client to not be force pushing - mockClient.force = false - sf := tc.stateFile(mgr) - err := mgr.WriteStateForMigration(sf, tc.force) - shouldError := tc.expectedError != "" - - // If we are expecting and error check it and move on - if shouldError { - if err == nil { - t.Fatalf("test case %q should have failed with error %q", tc.name, tc.expectedError) - } else if err.Error() != tc.expectedError { - t.Fatalf("test case %q expected error %q but got %q", tc.name, tc.expectedError, err) - } - return - } - - if err != nil { - t.Fatalf("test case %q failed: %v", tc.name, err) - } - - if tc.force && !mockClient.force { - t.Fatalf("test case %q should have enabled force push", tc.name) - } - - // At this point we should just do a normal write and persist - // as would happen from the CLI - mgr.WriteState(mgr.State()) - mgr.PersistState(nil) - - if logIdx >= len(mockClient.log) { - t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) - } - loggedRequest := mockClient.log[logIdx] - logIdx++ - if diff := cmp.Diff(tc.expectedRequest, loggedRequest); len(diff) > 0 { - t.Fatalf("incorrect client requests for %q:\n%s", tc.name, diff) - } - }) - } - - logCnt := len(mockClient.log) - if logIdx != logCnt { - log.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) - } -} diff --git a/internal/states/remote/testing.go b/internal/states/remote/testing.go deleted file mode 100644 index 197f87ac8c12..000000000000 --- a/internal/states/remote/testing.go +++ /dev/null @@ -1,102 +0,0 @@ -package remote - -import ( - "bytes" - "testing" - - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/states/statemgr" -) - -// TestClient is a generic function to test any client. -func TestClient(t *testing.T, c Client) { - var buf bytes.Buffer - s := statemgr.TestFullInitialState() - sf := statefile.New(s, "stub-lineage", 2) - err := statefile.Write(sf, &buf) - if err != nil { - t.Fatalf("err: %s", err) - } - data := buf.Bytes() - - if err := c.Put(data); err != nil { - t.Fatalf("put: %s", err) - } - - p, err := c.Get() - if err != nil { - t.Fatalf("get: %s", err) - } - if !bytes.Equal(p.Data, data) { - t.Fatalf("expected full state %q\n\ngot: %q", string(p.Data), string(data)) - } - - if err := c.Delete(); err != nil { - t.Fatalf("delete: %s", err) - } - - p, err = c.Get() - if err != nil { - t.Fatalf("get: %s", err) - } - if p != nil { - t.Fatalf("expected empty state, got: %q", string(p.Data)) - } -} - -// Test the lock implementation for a remote.Client. -// This test requires 2 client instances, in oder to have multiple remote -// clients since some implementations may tie the client to the lock, or may -// have reentrant locks. -func TestRemoteLocks(t *testing.T, a, b Client) { - lockerA, ok := a.(statemgr.Locker) - if !ok { - t.Fatal("client A not a statemgr.Locker") - } - - lockerB, ok := b.(statemgr.Locker) - if !ok { - t.Fatal("client B not a statemgr.Locker") - } - - infoA := statemgr.NewLockInfo() - infoA.Operation = "test" - infoA.Who = "clientA" - - infoB := statemgr.NewLockInfo() - infoB.Operation = "test" - infoB.Who = "clientB" - - lockIDA, err := lockerA.Lock(infoA) - if err != nil { - t.Fatal("unable to get initial lock:", err) - } - - _, err = lockerB.Lock(infoB) - if err == nil { - lockerA.Unlock(lockIDA) - t.Fatal("client B obtained lock while held by client A") - } - if _, ok := err.(*statemgr.LockError); !ok { - t.Errorf("expected a LockError, but was %t: %s", err, err) - } - - if err := lockerA.Unlock(lockIDA); err != nil { - t.Fatal("error unlocking client A", err) - } - - lockIDB, err := lockerB.Lock(infoB) - if err != nil { - t.Fatal("unable to obtain lock from client B") - } - - if lockIDB == lockIDA { - t.Fatalf("duplicate lock IDs: %q", lockIDB) - } - - if err = lockerB.Unlock(lockIDB); err != nil { - t.Fatal("error unlocking client B:", err) - } - - // TODO: Should we enforce that Unlock requires the correct ID? -} diff --git a/internal/states/resource.go b/internal/states/resource.go deleted file mode 100644 index 1c1f65bedef4..000000000000 --- a/internal/states/resource.go +++ /dev/null @@ -1,215 +0,0 @@ -package states - -import ( - "fmt" - "math/rand" - "time" - - "github.com/hashicorp/terraform/internal/addrs" -) - -// Resource represents the state of a resource. -type Resource struct { - // Addr is the absolute address for the resource this state object - // belongs to. - Addr addrs.AbsResource - - // Instances contains the potentially-multiple instances associated with - // this resource. This map can contain a mixture of different key types, - // but only the ones of InstanceKeyType are considered current. - Instances map[addrs.InstanceKey]*ResourceInstance - - // ProviderConfig is the absolute address for the provider configuration that - // most recently managed this resource. This is used to connect a resource - // with a provider configuration when the resource configuration block is - // not available, such as if it has been removed from configuration - // altogether. - ProviderConfig addrs.AbsProviderConfig -} - -// Instance returns the state for the instance with the given key, or nil -// if no such instance is tracked within the state. -func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { - return rs.Instances[key] -} - -// CreateInstance creates an instance and adds it to the resource -func (rs *Resource) CreateInstance(key addrs.InstanceKey) *ResourceInstance { - is := NewResourceInstance() - rs.Instances[key] = is - return is -} - -// EnsureInstance returns the state for the instance with the given key, -// creating a new empty state for it if one doesn't already exist. -// -// Because this may create and save a new state, it is considered to be -// a write operation. -func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { - ret := rs.Instance(key) - if ret == nil { - ret = NewResourceInstance() - rs.Instances[key] = ret - } - return ret -} - -// ResourceInstance represents the state of a particular instance of a resource. -type ResourceInstance struct { - // Current, if non-nil, is the remote object that is currently represented - // by the corresponding resource instance. - Current *ResourceInstanceObjectSrc - - // Deposed, if len > 0, contains any remote objects that were previously - // represented by the corresponding resource instance but have been - // replaced and are pending destruction due to the create_before_destroy - // lifecycle mode. - Deposed map[DeposedKey]*ResourceInstanceObjectSrc -} - -// NewResourceInstance constructs and returns a new ResourceInstance, ready to -// use. -func NewResourceInstance() *ResourceInstance { - return &ResourceInstance{ - Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, - } -} - -// HasCurrent returns true if this resource instance has a "current"-generation -// object. Most instances do, but this can briefly be false during a -// create-before-destroy replace operation when the current has been deposed -// but its replacement has not yet been created. -func (i *ResourceInstance) HasCurrent() bool { - return i != nil && i.Current != nil -} - -// HasDeposed returns true if this resource instance has a deposed object -// with the given key. -func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { - return i != nil && i.Deposed[key] != nil -} - -// HasAnyDeposed returns true if this resource instance has one or more -// deposed objects. -func (i *ResourceInstance) HasAnyDeposed() bool { - return i != nil && len(i.Deposed) > 0 -} - -// HasObjects returns true if this resource has any objects at all, whether -// current or deposed. -func (i *ResourceInstance) HasObjects() bool { - return i.Current != nil || len(i.Deposed) != 0 -} - -// deposeCurrentObject is part of the real implementation of -// SyncState.DeposeResourceInstanceObject. The exported method uses a lock -// to ensure that we can safely allocate an unused deposed key without -// collision. -func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { - if !i.HasCurrent() { - return NotDeposed - } - - key := forceKey - if key == NotDeposed { - key = i.findUnusedDeposedKey() - } else { - if _, exists := i.Deposed[key]; exists { - panic(fmt.Sprintf("forced key %s is already in use", forceKey)) - } - } - i.Deposed[key] = i.Current - i.Current = nil - return key -} - -// GetGeneration retrieves the object of the given generation from the -// ResourceInstance, or returns nil if there is no such object. -// -// If the given generation is nil or invalid, this method will panic. -func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { - if gen == CurrentGen { - return i.Current - } - if dk, ok := gen.(DeposedKey); ok { - return i.Deposed[dk] - } - if gen == nil { - panic("get with nil Generation") - } - // Should never fall out here, since the above covers all possible - // Generation values. - panic(fmt.Sprintf("get invalid Generation %#v", gen)) -} - -// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance at the time of the call. -// -// Note that the validity of this result may change if new deposed keys are -// allocated before it is used. To avoid this risk, instead use the -// DeposeResourceInstanceObject method on the SyncState wrapper type, which -// allocates a key and uses it atomically. -func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { - return i.findUnusedDeposedKey() -} - -// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance. -func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { - for { - key := NewDeposedKey() - if _, exists := i.Deposed[key]; !exists { - return key - } - // Spin until we find a unique one. This shouldn't take long, because - // we have a 32-bit keyspace and there's rarely more than one deposed - // instance. - } -} - -// DeposedKey is a 8-character hex string used to uniquely identify deposed -// instance objects in the state. -type DeposedKey string - -// NotDeposed is a special invalid value of DeposedKey that is used to represent -// the absense of a deposed key. It must not be used as an actual deposed key. -const NotDeposed = DeposedKey("") - -var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) - -// NewDeposedKey generates a pseudo-random deposed key. Because of the short -// length of these keys, uniqueness is not a natural consequence and so the -// caller should test to see if the generated key is already in use and generate -// another if so, until a unique key is found. -func NewDeposedKey() DeposedKey { - v := deposedKeyRand.Uint32() - return DeposedKey(fmt.Sprintf("%08x", v)) -} - -func (k DeposedKey) String() string { - return string(k) -} - -func (k DeposedKey) GoString() string { - ks := string(k) - switch { - case ks == "": - return "states.NotDeposed" - default: - return fmt.Sprintf("states.DeposedKey(%s)", ks) - } -} - -// Generation is a helper method to convert a DeposedKey into a Generation. -// If the reciever is anything other than NotDeposed then the result is -// just the same value as a Generation. If the receiver is NotDeposed then -// the result is CurrentGen. -func (k DeposedKey) Generation() Generation { - if k == NotDeposed { - return CurrentGen - } - return k -} - -// generation is an implementation of Generation. -func (k DeposedKey) generation() {} diff --git a/internal/states/state.go b/internal/states/state.go deleted file mode 100644 index 9992f93caf7e..000000000000 --- a/internal/states/state.go +++ /dev/null @@ -1,634 +0,0 @@ -package states - -import ( - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" -) - -// State is the top-level type of a Terraform state. -// -// A state should be mutated only via its accessor methods, to ensure that -// invariants are preserved. -// -// Access to State and the nested values within it is not concurrency-safe, -// so when accessing a State object concurrently it is the caller's -// responsibility to ensure that only one write is in progress at a time -// and that reads only occur when no write is in progress. The most common -// way to achieve this is to wrap the State in a SyncState and use the -// higher-level atomic operations supported by that type. -type State struct { - // Modules contains the state for each module. The keys in this map are - // an implementation detail and must not be used by outside callers. - Modules map[string]*Module - - // CheckResults contains a snapshot of the statuses of checks at the - // end of the most recent update to the state. Callers might compare - // checks between runs to see if e.g. a previously-failing check has - // been fixed since the last run, or similar. - // - // CheckResults can be nil to indicate that there are no check results - // from the previous run at all, which is subtly different than the - // previous run having affirmatively recorded that there are no checks - // to run. For example, if this object was created from a state snapshot - // created by a version of Terraform that didn't yet support checks - // then this field will be nil. - CheckResults *CheckResults -} - -// NewState constructs a minimal empty state, containing an empty root module. -func NewState() *State { - modules := map[string]*Module{} - modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) - return &State{ - Modules: modules, - } -} - -// BuildState is a helper -- primarily intended for tests -- to build a state -// using imperative code against the StateSync type while still acting as -// an expression of type *State to assign into a containing struct. -func BuildState(cb func(*SyncState)) *State { - s := NewState() - cb(s.SyncWrapper()) - return s -} - -// Empty returns true if there are no resources or populated output values -// in the receiver. In other words, if this state could be safely replaced -// with the return value of NewState and be functionally equivalent. -func (s *State) Empty() bool { - if s == nil { - return true - } - for _, ms := range s.Modules { - if len(ms.Resources) != 0 { - return false - } - if len(ms.OutputValues) != 0 { - return false - } - } - return true -} - -// Module returns the state for the module with the given address, or nil if -// the requested module is not tracked in the state. -func (s *State) Module(addr addrs.ModuleInstance) *Module { - if s == nil { - panic("State.Module on nil *State") - } - return s.Modules[addr.String()] -} - -// ModuleInstances returns the set of Module states that matches the given path. -func (s *State) ModuleInstances(addr addrs.Module) []*Module { - var ms []*Module - for _, m := range s.Modules { - if m.Addr.Module().Equal(addr) { - ms = append(ms, m) - } - } - return ms -} - -// ModuleOutputs returns all outputs for the given module call under the -// parentAddr instance. -func (s *State) ModuleOutputs(parentAddr addrs.ModuleInstance, module addrs.ModuleCall) []*OutputValue { - var os []*OutputValue - for _, m := range s.Modules { - // can't get outputs from the root module - if m.Addr.IsRoot() { - continue - } - - parent, call := m.Addr.Call() - // make sure this is a descendent in the correct path - if !parentAddr.Equal(parent) { - continue - } - - // and check if this is the correct child - if call.Name != module.Name { - continue - } - - for _, o := range m.OutputValues { - os = append(os, o) - } - } - - return os -} - -// RemoveModule removes the module with the given address from the state, -// unless it is the root module. The root module cannot be deleted, and so -// this method will panic if that is attempted. -// -// Removing a module implicitly discards all of the resources, outputs and -// local values within it, and so this should usually be done only for empty -// modules. For callers accessing the state through a SyncState wrapper, modules -// are automatically pruned if they are empty after one of their contained -// elements is removed. -func (s *State) RemoveModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - panic("attempted to remove root module") - } - - delete(s.Modules, addr.String()) -} - -// RootModule is a convenient alias for Module(addrs.RootModuleInstance). -func (s *State) RootModule() *Module { - if s == nil { - panic("RootModule called on nil State") - } - return s.Modules[addrs.RootModuleInstance.String()] -} - -// EnsureModule returns the state for the module with the given address, -// creating and adding a new one if necessary. -// -// Since this might modify the state to add a new instance, it is considered -// to be a write operation. -func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { - ms := s.Module(addr) - if ms == nil { - ms = NewModule(addr) - s.Modules[addr.String()] = ms - } - return ms -} - -// HasManagedResourceInstanceObjects returns true if there is at least one -// resource instance object (current or deposed) associated with a managed -// resource in the receiving state. -// -// A true result would suggest that just discarding this state without first -// destroying these objects could leave "dangling" objects in remote systems, -// no longer tracked by any Terraform state. -func (s *State) HasManagedResourceInstanceObjects() bool { - if s == nil { - return false - } - for _, ms := range s.Modules { - for _, rs := range ms.Resources { - if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { - continue - } - for _, is := range rs.Instances { - if is.Current != nil || len(is.Deposed) != 0 { - return true - } - } - } - } - return false -} - -// Resource returns the state for the resource with the given address, or nil -// if no such resource is tracked in the state. -func (s *State) Resource(addr addrs.AbsResource) *Resource { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.Resource(addr.Resource) -} - -// Resources returns the set of resources that match the given configuration path. -func (s *State) Resources(addr addrs.ConfigResource) []*Resource { - var ret []*Resource - for _, m := range s.ModuleInstances(addr.Module) { - r := m.Resource(addr.Resource) - if r != nil { - ret = append(ret, r) - } - } - return ret -} - -// AllManagedResourceInstanceObjectAddrs returns a set of addresses for all of -// the leaf resource instance objects associated with managed resources that -// are tracked in this state. -// -// This result is the set of objects that would be effectively "forgotten" -// (like "terraform state rm") if this state were totally discarded, such as -// by deleting a workspace. This function is intended only for reporting -// context in error messages, such as when we reject deleting a "non-empty" -// workspace as detected by s.HasManagedResourceInstanceObjects. -// -// The ordering of the result is meaningless but consistent. DeposedKey will -// be NotDeposed (the zero value of DeposedKey) for any "current" objects. -// This method is guaranteed to return at least one item if -// s.HasManagedResourceInstanceObjects returns true for the same state, and -// to return a zero-length slice if it returns false. -func (s *State) AllResourceInstanceObjectAddrs() []struct { - Instance addrs.AbsResourceInstance - DeposedKey DeposedKey -} { - if s == nil { - return nil - } - - // We use an unnamed return type here just because we currently have no - // general need to return pairs of instance address and deposed key aside - // from this method, and this method itself is only of marginal value - // when producing some error messages. - // - // If that need ends up arising more in future then it might make sense to - // name this as addrs.AbsResourceInstanceObject, although that would require - // moving DeposedKey into the addrs package too. - type ResourceInstanceObject = struct { - Instance addrs.AbsResourceInstance - DeposedKey DeposedKey - } - var ret []ResourceInstanceObject - - for _, ms := range s.Modules { - for _, rs := range ms.Resources { - if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { - continue - } - - for instKey, is := range rs.Instances { - instAddr := rs.Addr.Instance(instKey) - if is.Current != nil { - ret = append(ret, ResourceInstanceObject{instAddr, NotDeposed}) - } - for deposedKey := range is.Deposed { - ret = append(ret, ResourceInstanceObject{instAddr, deposedKey}) - } - } - } - } - - sort.SliceStable(ret, func(i, j int) bool { - objI, objJ := ret[i], ret[j] - switch { - case !objI.Instance.Equal(objJ.Instance): - return objI.Instance.Less(objJ.Instance) - default: - return objI.DeposedKey < objJ.DeposedKey - } - }) - - return ret -} - -// ResourceInstance returns the state for the resource instance with the given -// address, or nil if no such resource is tracked in the state. -func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - if s == nil { - panic("State.ResourceInstance on nil *State") - } - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.ResourceInstance(addr.Resource) -} - -// OutputValue returns the state for the output value with the given address, -// or nil if no such output value is tracked in the state. -func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.OutputValues[addr.OutputValue.Name] -} - -// LocalValue returns the value of the named local value with the given address, -// or cty.NilVal if no such value is tracked in the state. -func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { - ms := s.Module(addr.Module) - if ms == nil { - return cty.NilVal - } - return ms.LocalValues[addr.LocalValue.Name] -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving state. -// -// The result is de-duplicated so that each distinct address appears only once. -func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { - if s == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, ms := range s.Modules { - for _, rc := range ms.Resources { - m[rc.ProviderConfig.String()] = rc.ProviderConfig - } - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} - -// ProviderRequirements returns a description of all of the providers that -// are required to work with the receiving state. -// -// Because the state does not track specific version information for providers, -// the requirements returned by this method will always be unconstrained. -// The result should usually be merged with a Requirements derived from the -// current configuration in order to apply some constraints. -func (s *State) ProviderRequirements() getproviders.Requirements { - configAddrs := s.ProviderAddrs() - ret := make(getproviders.Requirements, len(configAddrs)) - for _, configAddr := range configAddrs { - ret[configAddr.Provider] = nil // unconstrained dependency - } - return ret -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// This should generally be used only after a "terraform destroy" operation, -// to finalize the cleanup of the state. It is not correct to use this after -// other operations because if a resource has "count = 0" or "for_each" over -// an empty collection then we want to retain it in the state so that references -// to it, particularly in "strange" contexts like "terraform console", can be -// properly resolved. -// -// This method MUST NOT be called concurrently with other readers and writers -// of the receiving state. -func (s *State) PruneResourceHusks() { - for _, m := range s.Modules { - m.PruneResourceHusks() - if len(m.Resources) == 0 && !m.Addr.IsRoot() { - s.RemoveModule(m.Addr) - } - } -} - -// SyncWrapper returns a SyncState object wrapping the receiver. -func (s *State) SyncWrapper() *SyncState { - return &SyncState{ - state: s, - } -} - -// MoveAbsResource moves the given src AbsResource's current state to the new -// dst address. This will panic if the src AbsResource does not exist in state, -// or if there is already a resource at the dst address. It is the caller's -// responsibility to verify the validity of the move (for example, that the src -// and dst are compatible types). -func (s *State) MoveAbsResource(src, dst addrs.AbsResource) { - // verify that the src address exists and the dst address does not - rs := s.Resource(src) - if rs == nil { - panic(fmt.Sprintf("no state for src address %s", src.String())) - } - - ds := s.Resource(dst) - if ds != nil { - panic(fmt.Sprintf("dst resource %s already exists", dst.String())) - } - - ms := s.Module(src.Module) - ms.RemoveResource(src.Resource) - - // Remove the module if it is empty (and not root) after removing the - // resource. - if !ms.Addr.IsRoot() && ms.empty() { - s.RemoveModule(src.Module) - } - - // Update the address before adding it to the state - rs.Addr = dst - s.EnsureModule(dst.Module).Resources[dst.Resource.String()] = rs -} - -// MaybeMoveAbsResource moves the given src AbsResource's current state to the -// new dst address. This function will succeed if both the src address does not -// exist in state and the dst address does; the return value indicates whether -// or not the move occurred. This function will panic if either the src does not -// exist or the dst does exist (but not both). -func (s *State) MaybeMoveAbsResource(src, dst addrs.AbsResource) bool { - // Get the source and destinatation addresses from state. - rs := s.Resource(src) - ds := s.Resource(dst) - - // Normal case: the src exists in state, dst does not - if rs != nil && ds == nil { - s.MoveAbsResource(src, dst) - return true - } - - if rs == nil && ds != nil { - // The source is not in state, the destination is. This is not - // guaranteed to be idempotent since we aren't tracking exact moves, but - // it's useful information for the caller. - return false - } else { - panic("invalid move") - } -} - -// MoveAbsResourceInstance moves the given src AbsResourceInstance's current state to -// the new dst address. This will panic if the src AbsResourceInstance does not -// exist in state, or if there is already a resource at the dst address. It is -// the caller's responsibility to verify the validity of the move (for example, -// that the src and dst are compatible types). -func (s *State) MoveAbsResourceInstance(src, dst addrs.AbsResourceInstance) { - srcInstanceState := s.ResourceInstance(src) - if srcInstanceState == nil { - panic(fmt.Sprintf("no state for src address %s", src.String())) - } - - dstInstanceState := s.ResourceInstance(dst) - if dstInstanceState != nil { - panic(fmt.Sprintf("dst resource %s already exists", dst.String())) - } - - srcResourceState := s.Resource(src.ContainingResource()) - srcProviderAddr := srcResourceState.ProviderConfig - dstResourceAddr := dst.ContainingResource() - - // Remove the source resource instance from the module's state, and then the - // module if empty. - ms := s.Module(src.Module) - ms.ForgetResourceInstanceAll(src.Resource) - if !ms.Addr.IsRoot() && ms.empty() { - s.RemoveModule(src.Module) - } - - dstModule := s.EnsureModule(dst.Module) - - // See if there is already a resource we can add this instance to. - dstResourceState := s.Resource(dstResourceAddr) - if dstResourceState == nil { - // If we're moving to an address without an index then that - // suggests the user's intent is to establish both the - // resource and the instance at the same time (since the - // address covers both). If there's an index in the - // target then allow creating the new instance here. - dstModule.SetResourceProvider( - dstResourceAddr.Resource, - srcProviderAddr, // in this case, we bring the provider along as if we were moving the whole resource - ) - dstResourceState = dstModule.Resource(dstResourceAddr.Resource) - } - - dstResourceState.Instances[dst.Resource.Key] = srcInstanceState -} - -// MaybeMoveAbsResourceInstance moves the given src AbsResourceInstance's -// current state to the new dst address. This function will succeed if both the -// src address does not exist in state and the dst address does; the return -// value indicates whether or not the move occured. This function will panic if -// either the src does not exist or the dst does exist (but not both). -func (s *State) MaybeMoveAbsResourceInstance(src, dst addrs.AbsResourceInstance) bool { - // get the src and dst resource instances from state - rs := s.ResourceInstance(src) - ds := s.ResourceInstance(dst) - - // Normal case: the src exists in state, dst does not - if rs != nil && ds == nil { - s.MoveAbsResourceInstance(src, dst) - return true - } - - if rs == nil && ds != nil { - // The source is not in state, the destination is. This is not - // guaranteed to be idempotent since we aren't tracking exact moves, but - // it's useful information. - return false - } else { - panic("invalid move") - } -} - -// MoveModuleInstance moves the given src ModuleInstance's current state to the -// new dst address. This will panic if the src ModuleInstance does not -// exist in state, or if there is already a resource at the dst address. It is -// the caller's responsibility to verify the validity of the move. -func (s *State) MoveModuleInstance(src, dst addrs.ModuleInstance) { - if src.IsRoot() || dst.IsRoot() { - panic("cannot move to or from root module") - } - - srcMod := s.Module(src) - if srcMod == nil { - panic(fmt.Sprintf("no state for src module %s", src.String())) - } - - dstMod := s.Module(dst) - if dstMod != nil { - panic(fmt.Sprintf("dst module %s already exists in state", dst.String())) - } - - s.RemoveModule(src) - - srcMod.Addr = dst - s.EnsureModule(dst) - s.Modules[dst.String()] = srcMod - - // Update any Resource's addresses. - if srcMod.Resources != nil { - for _, r := range srcMod.Resources { - r.Addr.Module = dst - } - } - - // Update any OutputValues's addresses. - if srcMod.OutputValues != nil { - for _, ov := range srcMod.OutputValues { - ov.Addr.Module = dst - } - } -} - -// MaybeMoveModuleInstance moves the given src ModuleInstance's current state to -// the new dst address. This function will succeed if both the src address does -// not exist in state and the dst address does; the return value indicates -// whether or not the move occured. This function will panic if either the src -// does not exist or the dst does exist (but not both). -func (s *State) MaybeMoveModuleInstance(src, dst addrs.ModuleInstance) bool { - if src.IsRoot() || dst.IsRoot() { - panic("cannot move to or from root module") - } - - srcMod := s.Module(src) - dstMod := s.Module(dst) - - // Normal case: the src exists in state, dst does not - if srcMod != nil && dstMod == nil { - s.MoveModuleInstance(src, dst) - return true - } - - if srcMod == nil || src.IsRoot() && dstMod != nil { - // The source is not in state, the destination is. This is not - // guaranteed to be idempotent since we aren't tracking exact moves, but - // it's useful information. - return false - } else { - panic("invalid move") - } -} - -// MoveModule takes a source and destination addrs.Module address, and moves all -// state Modules which are contained by the src address to the new address. -func (s *State) MoveModule(src, dst addrs.AbsModuleCall) { - if src.Module.IsRoot() || dst.Module.IsRoot() { - panic("cannot move to or from root module") - } - - // Modules only exist as ModuleInstances in state, so we need to check each - // state Module and see if it is contained by the src address to get a full - // list of modules to move. - var srcMIs []*Module - for _, module := range s.Modules { - if !module.Addr.IsRoot() { - if src.Module.TargetContains(module.Addr) { - srcMIs = append(srcMIs, module) - } - } - } - - if len(srcMIs) == 0 { - panic(fmt.Sprintf("no matching module instances found for src module %s", src.String())) - } - - for _, ms := range srcMIs { - newInst := make(addrs.ModuleInstance, len(ms.Addr)) - copy(newInst, ms.Addr) - if ms.Addr.IsDeclaredByCall(src) { - // Easy case: we just need to update the last step with the new name - newInst[len(newInst)-1].Name = dst.Call.Name - } else { - // Trickier: this Module is a submodule. we need to find and update - // only that appropriate step - for s := range newInst { - if newInst[s].Name == src.Call.Name { - newInst[s].Name = dst.Call.Name - } - } - } - s.MoveModuleInstance(ms.Addr, newInst) - } -} diff --git a/internal/states/state_test.go b/internal/states/state_test.go deleted file mode 100644 index 768772aebe44..000000000000 --- a/internal/states/state_test.go +++ /dev/null @@ -1,1008 +0,0 @@ -package states - -import ( - "fmt" - "reflect" - "testing" - - "github.com/go-test/deep" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/lang/marks" -) - -func TestState(t *testing.T) { - // This basic tests exercises the main mutation methods to construct - // a state. It is not fully comprehensive, so other tests should visit - // more esoteric codepaths. - - state := NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetLocalValue("foo", cty.StringVal("foo value")) - rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) - rootModule.SetOutputValue("secret", cty.StringVal("secret value"), true) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "baz", - }.Instance(addrs.IntKey(0)), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - childModule := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - childModule.SetOutputValue("pizza", cty.StringVal("hawaiian"), false) - multiModA := state.EnsureModule(addrs.RootModuleInstance.Child("multi", addrs.StringKey("a"))) - multiModA.SetOutputValue("pizza", cty.StringVal("cheese"), false) - multiModB := state.EnsureModule(addrs.RootModuleInstance.Child("multi", addrs.StringKey("b"))) - multiModB.SetOutputValue("pizza", cty.StringVal("sausage"), false) - - want := &State{ - Modules: map[string]*Module{ - "": { - Addr: addrs.RootModuleInstance, - LocalValues: map[string]cty.Value{ - "foo": cty.StringVal("foo value"), - }, - OutputValues: map[string]*OutputValue{ - "bar": { - Addr: addrs.AbsOutputValue{ - OutputValue: addrs.OutputValue{ - Name: "bar", - }, - }, - Value: cty.StringVal("bar value"), - Sensitive: false, - }, - "secret": { - Addr: addrs.AbsOutputValue{ - OutputValue: addrs.OutputValue{ - Name: "secret", - }, - }, - Value: cty.StringVal("secret value"), - Sensitive: true, - }, - }, - Resources: map[string]*Resource{ - "test_thing.baz": { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "baz", - }.Absolute(addrs.RootModuleInstance), - - Instances: map[addrs.InstanceKey]*ResourceInstance{ - addrs.IntKey(0): { - Current: &ResourceInstanceObjectSrc{ - SchemaVersion: 1, - Status: ObjectReady, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, - }, - }, - ProviderConfig: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - }, - }, - "module.child": { - Addr: addrs.RootModuleInstance.Child("child", addrs.NoKey), - LocalValues: map[string]cty.Value{}, - OutputValues: map[string]*OutputValue{ - "pizza": { - Addr: addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance.Child("child", addrs.NoKey), - OutputValue: addrs.OutputValue{ - Name: "pizza", - }, - }, - Value: cty.StringVal("hawaiian"), - Sensitive: false, - }, - }, - Resources: map[string]*Resource{}, - }, - `module.multi["a"]`: { - Addr: addrs.RootModuleInstance.Child("multi", addrs.StringKey("a")), - LocalValues: map[string]cty.Value{}, - OutputValues: map[string]*OutputValue{ - "pizza": { - Addr: addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance.Child("multi", addrs.StringKey("a")), - OutputValue: addrs.OutputValue{ - Name: "pizza", - }, - }, - Value: cty.StringVal("cheese"), - Sensitive: false, - }, - }, - Resources: map[string]*Resource{}, - }, - `module.multi["b"]`: { - Addr: addrs.RootModuleInstance.Child("multi", addrs.StringKey("b")), - LocalValues: map[string]cty.Value{}, - OutputValues: map[string]*OutputValue{ - "pizza": { - Addr: addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance.Child("multi", addrs.StringKey("b")), - OutputValue: addrs.OutputValue{ - Name: "pizza", - }, - }, - Value: cty.StringVal("sausage"), - Sensitive: false, - }, - }, - Resources: map[string]*Resource{}, - }, - }, - } - - { - // Our structure goes deep, so we need to temporarily override the - // deep package settings to ensure that we visit the full structure. - oldDeepDepth := deep.MaxDepth - oldDeepCompareUnexp := deep.CompareUnexportedFields - deep.MaxDepth = 50 - deep.CompareUnexportedFields = true - defer func() { - deep.MaxDepth = oldDeepDepth - deep.CompareUnexportedFields = oldDeepCompareUnexp - }() - } - - for _, problem := range deep.Equal(state, want) { - t.Error(problem) - } - - expectedOutputs := map[string]string{ - `module.multi["a"].output.pizza`: "cheese", - `module.multi["b"].output.pizza`: "sausage", - } - - for _, o := range state.ModuleOutputs(addrs.RootModuleInstance, addrs.ModuleCall{Name: "multi"}) { - addr := o.Addr.String() - expected := expectedOutputs[addr] - delete(expectedOutputs, addr) - - if expected != o.Value.AsString() { - t.Fatalf("expected %q:%q, got %q", addr, expected, o.Value.AsString()) - } - } - - for addr, o := range expectedOutputs { - t.Fatalf("missing output %q:%q", addr, o) - } -} - -func TestStateDeepCopyObject(t *testing.T) { - obj := &ResourceInstanceObject{ - Value: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("id"), - }), - Private: []byte("private"), - Status: ObjectReady, - Dependencies: []addrs.ConfigResource{ - { - Module: addrs.RootModule, - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "bar", - }, - }, - }, - CreateBeforeDestroy: true, - } - - objCopy := obj.DeepCopy() - if !reflect.DeepEqual(obj, objCopy) { - t.Fatalf("not equal\n%#v\n%#v", obj, objCopy) - } -} - -func TestStateDeepCopy(t *testing.T) { - state := NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetLocalValue("foo", cty.StringVal("foo value")) - rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) - rootModule.SetOutputValue("secret", cty.StringVal("secret value"), true) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "baz", - }.Instance(addrs.IntKey(0)), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - Private: []byte("private data"), - Dependencies: []addrs.ConfigResource{}, - CreateBeforeDestroy: true, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }.Instance(addrs.IntKey(0)), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - // Sensitive path at "woozles" - AttrSensitivePaths: []cty.PathValueMarks{ - { - Path: cty.Path{cty.GetAttrStep{Name: "woozles"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - Private: []byte("private data"), - Dependencies: []addrs.ConfigResource{ - { - Module: addrs.RootModule, - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "baz", - }, - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - childModule := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - childModule.SetOutputValue("pizza", cty.StringVal("hawaiian"), false) - - stateCopy := state.DeepCopy() - if !state.Equal(stateCopy) { - t.Fatalf("\nexpected:\n%q\ngot:\n%q\n", state, stateCopy) - } -} - -func TestStateHasResourceInstanceObjects(t *testing.T) { - providerConfig := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.MustParseProviderSourceString("test/test"), - } - childModuleProviderConfig := addrs.AbsProviderConfig{ - Module: addrs.RootModule.Child("child"), - Provider: addrs.MustParseProviderSourceString("test/test"), - } - - tests := map[string]struct { - Setup func(ss *SyncState) - Want bool - }{ - "empty": { - func(ss *SyncState) {}, - false, - }, - "one current, ready object in root module": { - func(ss *SyncState) { - ss.SetResourceInstanceCurrent( - mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: ObjectReady, - }, - providerConfig, - ) - }, - true, - }, - "one current, ready object in child module": { - func(ss *SyncState) { - ss.SetResourceInstanceCurrent( - mustAbsResourceAddr("module.child.test.foo").Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: ObjectReady, - }, - childModuleProviderConfig, - ) - }, - true, - }, - "one current, tainted object in root module": { - func(ss *SyncState) { - ss.SetResourceInstanceCurrent( - mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: ObjectTainted, - }, - providerConfig, - ) - }, - true, - }, - "one deposed, ready object in root module": { - func(ss *SyncState) { - ss.SetResourceInstanceDeposed( - mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), - DeposedKey("uhoh"), - &ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: ObjectTainted, - }, - providerConfig, - ) - }, - true, - }, - "one empty resource husk in root module": { - func(ss *SyncState) { - // Current Terraform doesn't actually create resource husks - // as part of its everyday work, so this is a "should never - // happen" case but we'll test to make sure we're robust to - // it anyway, because this was a historical bug blocking - // "terraform workspace delete" and similar. - ss.SetResourceInstanceCurrent( - mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: ObjectTainted, - }, - providerConfig, - ) - s := ss.Lock() - delete(s.Modules[""].Resources["test.foo"].Instances, addrs.NoKey) - ss.Unlock() - }, - false, - }, - "one current data resource object in root module": { - func(ss *SyncState) { - ss.SetResourceInstanceCurrent( - mustAbsResourceAddr("data.test.foo").Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: ObjectReady, - }, - providerConfig, - ) - }, - false, // data resources aren't managed resources, so they don't count - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - state := BuildState(test.Setup) - got := state.HasManagedResourceInstanceObjects() - if got != test.Want { - t.Errorf("wrong result\nstate content: (using legacy state string format; might not be comprehensive)\n%s\n\ngot: %t\nwant: %t", state, got, test.Want) - } - }) - } - -} - -func TestState_MoveAbsResource(t *testing.T) { - // Set up a starter state for the embedded tests, which should start from a copy of this state. - state := NewState() - rootModule := state.RootModule() - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.IntKey(0)), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Absolute(addrs.RootModuleInstance) - - t.Run("basic move", func(t *testing.T) { - s := state.DeepCopy() - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(addrs.RootModuleInstance) - - s.MoveAbsResource(src, dst) - - if s.Empty() { - t.Fatal("unexpected empty state") - } - - if len(s.RootModule().Resources) != 1 { - t.Fatalf("wrong number of resources in state; expected 1, found %d", len(state.RootModule().Resources)) - } - - got := s.Resource(dst) - if got.Addr.Resource != dst.Resource { - t.Fatalf("dst resource not in state") - } - }) - - t.Run("move to new module", func(t *testing.T) { - s := state.DeepCopy() - dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("one")) - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(dstModule) - - s.MoveAbsResource(src, dst) - - if s.Empty() { - t.Fatal("unexpected empty state") - } - - if s.Module(dstModule) == nil { - t.Fatalf("child module %s not in state", dstModule.String()) - } - - if len(s.Module(dstModule).Resources) != 1 { - t.Fatalf("wrong number of resources in state; expected 1, found %d", len(s.Module(dstModule).Resources)) - } - - got := s.Resource(dst) - if got.Addr.Resource != dst.Resource { - t.Fatalf("dst resource not in state") - } - }) - - t.Run("from a child module to root", func(t *testing.T) { - s := state.DeepCopy() - srcModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) - cm := s.EnsureModule(srcModule) - cm.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "child", - }.Instance(addrs.IntKey(0)), // Moving the AbsResouce moves all instances - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - cm.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "child", - }.Instance(addrs.IntKey(1)), // Moving the AbsResouce moves all instances - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(addrs.RootModuleInstance) - s.MoveAbsResource(src, dst) - - if s.Empty() { - t.Fatal("unexpected empty state") - } - - // The child module should have been removed after removing its only resource - if s.Module(srcModule) != nil { - t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) - } - - if len(s.RootModule().Resources) != 2 { - t.Fatalf("wrong number of resources in state; expected 2, found %d", len(s.RootModule().Resources)) - } - - if len(s.Resource(dst).Instances) != 2 { - t.Fatalf("wrong number of resource instances for dst, got %d expected 2", len(s.Resource(dst).Instances)) - } - - got := s.Resource(dst) - if got.Addr.Resource != dst.Resource { - t.Fatalf("dst resource not in state") - } - }) - - t.Run("module to new module", func(t *testing.T) { - s := NewState() - srcModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("exists")) - dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("new")) - cm := s.EnsureModule(srcModule) - cm.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "child", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(dstModule) - s.MoveAbsResource(src, dst) - - if s.Empty() { - t.Fatal("unexpected empty state") - } - - // The child module should have been removed after removing its only resource - if s.Module(srcModule) != nil { - t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) - } - - gotMod := s.Module(dstModule) - if len(gotMod.Resources) != 1 { - t.Fatalf("wrong number of resources in state; expected 1, found %d", len(gotMod.Resources)) - } - - got := s.Resource(dst) - if got.Addr.Resource != dst.Resource { - t.Fatalf("dst resource not in state") - } - }) - - t.Run("module to new module", func(t *testing.T) { - s := NewState() - srcModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("exists")) - dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("new")) - cm := s.EnsureModule(srcModule) - cm.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "child", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(dstModule) - s.MoveAbsResource(src, dst) - - if s.Empty() { - t.Fatal("unexpected empty state") - } - - // The child module should have been removed after removing its only resource - if s.Module(srcModule) != nil { - t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) - } - - gotMod := s.Module(dstModule) - if len(gotMod.Resources) != 1 { - t.Fatalf("wrong number of resources in state; expected 1, found %d", len(gotMod.Resources)) - } - - got := s.Resource(dst) - if got.Addr.Resource != dst.Resource { - t.Fatalf("dst resource not in state") - } - }) -} - -func TestState_MaybeMoveAbsResource(t *testing.T) { - state := NewState() - rootModule := state.RootModule() - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.IntKey(0)), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Absolute(addrs.RootModuleInstance) - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(addrs.RootModuleInstance) - - // First move, success - t.Run("first move", func(t *testing.T) { - moved := state.MaybeMoveAbsResource(src, dst) - if !moved { - t.Fatal("wrong result") - } - }) - - // Trying to move a resource that doesn't exist in state to a resource which does exist should be a noop. - t.Run("noop", func(t *testing.T) { - moved := state.MaybeMoveAbsResource(src, dst) - if moved { - t.Fatal("wrong result") - } - }) -} - -func TestState_MoveAbsResourceInstance(t *testing.T) { - state := NewState() - rootModule := state.RootModule() - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - // src resource from the state above - src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - - t.Run("resource to resource instance", func(t *testing.T) { - s := state.DeepCopy() - // For a little extra fun, move a resource to a resource instance: test_thing.foo to test_thing.foo[1] - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance) - - s.MoveAbsResourceInstance(src, dst) - - if s.Empty() { - t.Fatal("unexpected empty state") - } - - if len(s.RootModule().Resources) != 1 { - t.Fatalf("wrong number of resources in state; expected 1, found %d", len(state.RootModule().Resources)) - } - - got := s.ResourceInstance(dst) - if got == nil { - t.Fatalf("dst resource not in state") - } - }) - - t.Run("move to new module", func(t *testing.T) { - s := state.DeepCopy() - // test_thing.foo to module.kinder.test_thing.foo["baz"] - dstModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(dstModule) - - s.MoveAbsResourceInstance(src, dst) - - if s.Empty() { - t.Fatal("unexpected empty state") - } - - if s.Module(dstModule) == nil { - t.Fatalf("child module %s not in state", dstModule.String()) - } - - if len(s.Module(dstModule).Resources) != 1 { - t.Fatalf("wrong number of resources in state; expected 1, found %d", len(s.Module(dstModule).Resources)) - } - - got := s.ResourceInstance(dst) - if got == nil { - t.Fatalf("dst resource not in state") - } - }) -} - -func TestState_MaybeMoveAbsResourceInstance(t *testing.T) { - state := NewState() - rootModule := state.RootModule() - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - // For a little extra fun, let's go from a resource to a resource instance: test_thing.foo to test_thing.bar[1] - src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance) - - // First move, success - t.Run("first move", func(t *testing.T) { - moved := state.MaybeMoveAbsResourceInstance(src, dst) - if !moved { - t.Fatal("wrong result") - } - got := state.ResourceInstance(dst) - if got == nil { - t.Fatal("destination resource instance not in state") - } - }) - - // Moving a resource instance that doesn't exist in state to a resource which does exist should be a noop. - t.Run("noop", func(t *testing.T) { - moved := state.MaybeMoveAbsResourceInstance(src, dst) - if moved { - t.Fatal("wrong result") - } - }) -} - -func TestState_MoveModuleInstance(t *testing.T) { - state := NewState() - srcModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) - m := state.EnsureModule(srcModule) - m.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - dstModule := addrs.RootModuleInstance.Child("child", addrs.IntKey(3)) - state.MoveModuleInstance(srcModule, dstModule) - - // srcModule should have been removed, dstModule should exist and have one resource - if len(state.Modules) != 2 { // kinder[3] and root - t.Fatalf("wrong number of modules in state. Expected 2, got %d", len(state.Modules)) - } - - got := state.Module(dstModule) - if got == nil { - t.Fatal("dstModule not found") - } - - gone := state.Module(srcModule) - if gone != nil { - t.Fatal("srcModule not removed from state") - } - - r := got.Resource(mustAbsResourceAddr("test_thing.foo").Resource) - if r.Addr.Module.String() != dstModule.String() { - fmt.Println(r.Addr.Module.String()) - t.Fatal("resource address was not updated") - } - -} - -func TestState_MaybeMoveModuleInstance(t *testing.T) { - state := NewState() - src := addrs.RootModuleInstance.Child("child", addrs.StringKey("a")) - cm := state.EnsureModule(src) - cm.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - dst := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("b")) - - // First move, success - t.Run("first move", func(t *testing.T) { - moved := state.MaybeMoveModuleInstance(src, dst) - if !moved { - t.Fatal("wrong result") - } - }) - - // Second move, should be a noop - t.Run("noop", func(t *testing.T) { - moved := state.MaybeMoveModuleInstance(src, dst) - if moved { - t.Fatal("wrong result") - } - }) -} - -func TestState_MoveModule(t *testing.T) { - // For this test, add two module instances (kinder and kinder["a"]). - // MoveModule(kinder) should move both instances. - state := NewState() // starter state, should be copied by the subtests. - srcModule := addrs.RootModule.Child("kinder") - m := state.EnsureModule(srcModule.UnkeyedInstanceShim()) - m.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - moduleInstance := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("a")) - mi := state.EnsureModule(moduleInstance) - mi.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - _, mc := srcModule.Call() - src := mc.Absolute(addrs.RootModuleInstance.Child("kinder", addrs.NoKey)) - - t.Run("basic", func(t *testing.T) { - s := state.DeepCopy() - _, dstMC := addrs.RootModule.Child("child").Call() - dst := dstMC.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - s.MoveModule(src, dst) - - // srcModule should have been removed, dstModule should exist and have one resource - if len(s.Modules) != 3 { // child, child["a"] and root - t.Fatalf("wrong number of modules in state. Expected 3, got %d", len(s.Modules)) - } - - got := s.Module(dst.Module) - if got == nil { - t.Fatal("dstModule not found") - } - - got = s.Module(addrs.RootModuleInstance.Child("child", addrs.StringKey("a"))) - if got == nil { - t.Fatal("dstModule instance \"a\" not found") - } - - gone := s.Module(srcModule.UnkeyedInstanceShim()) - if gone != nil { - t.Fatal("srcModule not removed from state") - } - }) - - t.Run("nested modules", func(t *testing.T) { - s := state.DeepCopy() - - // add a child module to module.kinder - mi := mustParseModuleInstanceStr(`module.kinder.module.grand[1]`) - m := s.EnsureModule(mi) - m.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &ResourceInstanceObjectSrc{ - Status: ObjectReady, - SchemaVersion: 1, - AttrsJSON: []byte(`{"woozles":"confuzles"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - _, dstMC := addrs.RootModule.Child("child").Call() - dst := dstMC.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - s.MoveModule(src, dst) - - moved := s.Module(addrs.RootModuleInstance.Child("child", addrs.StringKey("a"))) - if moved == nil { - t.Fatal("dstModule not found") - } - - // The nested module's relative address should also have been updated - nested := s.Module(mustParseModuleInstanceStr(`module.child.module.grand[1]`)) - if nested == nil { - t.Fatal("nested child module of src wasn't moved") - } - }) -} - -func mustParseModuleInstanceStr(str string) addrs.ModuleInstance { - addr, diags := addrs.ParseModuleInstanceStr(str) - if diags.HasErrors() { - panic(diags.Err()) - } - return addr -} - -func mustAbsResourceAddr(s string) addrs.AbsResource { - addr, diags := addrs.ParseAbsResourceStr(s) - if diags.HasErrors() { - panic(diags.Err()) - } - return addr -} diff --git a/internal/states/statefile/diagnostics.go b/internal/states/statefile/diagnostics.go deleted file mode 100644 index b45b05ee0b3e..000000000000 --- a/internal/states/statefile/diagnostics.go +++ /dev/null @@ -1,62 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/internal/tfdiags" -) - -const invalidFormat = "Invalid state file format" - -// jsonUnmarshalDiags is a helper that translates errors returned from -// json.Unmarshal into hopefully-more-helpful diagnostics messages. -func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if err == nil { - return diags - } - - switch tErr := err.(type) { - case *json.SyntaxError: - // We've usually already successfully parsed a source file as JSON at - // least once before we'd use jsonUnmarshalDiags with it (to sniff - // the version number) so this particular error should not appear much - // in practice. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - // This is likely to be the most common area, describing a - // non-conformance between the file and the expected file format - // at a semantic level. - if tErr.Field != "" { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), - )) - break - } else { - // Without a field name, we can't really say anything helpful. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - "The state file does not conform to the expected JSON data structure.", - )) - } - default: - // Fallback for all other types of errors. This can happen only for - // custom UnmarshalJSON implementations, so should be encountered - // only rarely. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), - )) - } - - return diags -} diff --git a/internal/states/statemgr/filesystem_test.go b/internal/states/statemgr/filesystem_test.go deleted file mode 100644 index 2f6285fbd1ee..000000000000 --- a/internal/states/statemgr/filesystem_test.go +++ /dev/null @@ -1,453 +0,0 @@ -package statemgr - -import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - "testing" - - "github.com/go-test/deep" - version "github.com/hashicorp/go-version" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - tfversion "github.com/hashicorp/terraform/version" -) - -func TestFilesystem(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - ls := testFilesystem(t) - defer os.Remove(ls.readPath) - TestFull(t, ls) -} - -func TestFilesystemRace(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - ls := testFilesystem(t) - defer os.Remove(ls.readPath) - - current := TestFullInitialState() - - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - ls.WriteState(current) - }() - } - wg.Wait() -} - -func TestFilesystemLocks(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - s := testFilesystem(t) - defer os.Remove(s.readPath) - - // lock first - info := NewLockInfo() - info.Operation = "test" - lockID, err := s.Lock(info) - if err != nil { - t.Fatal(err) - } - - out, err := exec.Command("go", "run", "testdata/lockstate.go", s.path).CombinedOutput() - if err != nil { - t.Fatal("unexpected lock failure", err, string(out)) - } - - if !strings.Contains(string(out), "lock failed") { - t.Fatal("expected 'locked failed', got", string(out)) - } - - // check our lock info - lockInfo, err := s.lockInfo() - if err != nil { - t.Fatal(err) - } - - if lockInfo.Operation != "test" { - t.Fatalf("invalid lock info %#v\n", lockInfo) - } - - // a noop, since we unlock on exit - if err := s.Unlock(lockID); err != nil { - t.Fatal(err) - } - - // local locks can re-lock - lockID, err = s.Lock(info) - if err != nil { - t.Fatal(err) - } - - if err := s.Unlock(lockID); err != nil { - t.Fatal(err) - } - - // we should not be able to unlock the same lock twice - if err := s.Unlock(lockID); err == nil { - t.Fatal("unlocking an unlocked state should fail") - } - - // make sure lock info is gone - lockInfoPath := s.lockInfoPath() - if _, err := os.Stat(lockInfoPath); !os.IsNotExist(err) { - t.Fatal("lock info not removed") - } -} - -// Verify that we can write to the state file, as Windows' mandatory locking -// will prevent writing to a handle different than the one that hold the lock. -func TestFilesystem_writeWhileLocked(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - s := testFilesystem(t) - defer os.Remove(s.readPath) - - // lock first - info := NewLockInfo() - info.Operation = "test" - lockID, err := s.Lock(info) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := s.Unlock(lockID); err != nil { - t.Fatal(err) - } - }() - - if err := s.WriteState(TestFullInitialState()); err != nil { - t.Fatal(err) - } -} - -func TestFilesystem_pathOut(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - f, err := ioutil.TempFile("", "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - f.Close() - defer os.Remove(f.Name()) - - ls := testFilesystem(t) - ls.path = f.Name() - defer os.Remove(ls.path) - - TestFull(t, ls) -} - -func TestFilesystem_backup(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - f, err := ioutil.TempFile("", "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - f.Close() - defer os.Remove(f.Name()) - - ls := testFilesystem(t) - backupPath := f.Name() - ls.SetBackupPath(backupPath) - - TestFull(t, ls) - - // The backup functionality should've saved a copy of the original state - // prior to all of the modifications that TestFull does. - bfh, err := os.Open(backupPath) - if err != nil { - t.Fatal(err) - } - bf, err := statefile.Read(bfh) - if err != nil { - t.Fatal(err) - } - origState := TestFullInitialState() - if !bf.State.Equal(origState) { - for _, problem := range deep.Equal(origState, bf.State) { - t.Error(problem) - } - } -} - -// This test verifies a particularly tricky behavior where the input file -// is overridden and backups are enabled at the same time. This combination -// requires special care because we must ensure that when we create a backup -// it is of the original contents of the output file (which we're overwriting), -// not the contents of the input file (which is left unchanged). -func TestFilesystem_backupAndReadPath(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - - workDir := t.TempDir() - - markerOutput := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) - - outState := states.BuildState(func(ss *states.SyncState) { - ss.SetOutputValue( - markerOutput, - cty.StringVal("from-output-state"), - false, // not sensitive - ) - }) - outFile, err := os.Create(filepath.Join(workDir, "output.tfstate")) - if err != nil { - t.Fatalf("failed to create temporary outFile %s", err) - } - defer outFile.Close() - err = statefile.Write(&statefile.File{ - Lineage: "-", - Serial: 0, - TerraformVersion: version.Must(version.NewVersion("1.2.3")), - State: outState, - }, outFile) - if err != nil { - t.Fatalf("failed to write initial outfile state to %s: %s", outFile.Name(), err) - } - - inState := states.BuildState(func(ss *states.SyncState) { - ss.SetOutputValue( - markerOutput, - cty.StringVal("from-input-state"), - false, // not sensitive - ) - }) - inFile, err := os.Create(filepath.Join(workDir, "input.tfstate")) - if err != nil { - t.Fatalf("failed to create temporary inFile %s", err) - } - defer inFile.Close() - err = statefile.Write(&statefile.File{ - Lineage: "-", - Serial: 0, - TerraformVersion: version.Must(version.NewVersion("1.2.3")), - State: inState, - }, inFile) - if err != nil { - t.Fatalf("failed to write initial infile state to %s: %s", inFile.Name(), err) - } - - backupPath := outFile.Name() + ".backup" - - ls := NewFilesystemBetweenPaths(inFile.Name(), outFile.Name()) - ls.SetBackupPath(backupPath) - - newState := states.BuildState(func(ss *states.SyncState) { - ss.SetOutputValue( - markerOutput, - cty.StringVal("from-new-state"), - false, // not sensitive - ) - }) - err = ls.WriteState(newState) - if err != nil { - t.Fatalf("failed to write new state: %s", err) - } - - // The backup functionality should've saved a copy of the original contents - // of the _output_ file, even though the first snapshot was read from - // the _input_ file. - t.Run("backup file", func(t *testing.T) { - bfh, err := os.Open(backupPath) - if err != nil { - t.Fatal(err) - } - bf, err := statefile.Read(bfh) - if err != nil { - t.Fatal(err) - } - os := bf.State.OutputValue(markerOutput) - if got, want := os.Value, cty.StringVal("from-output-state"); !want.RawEquals(got) { - t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) - } - }) - t.Run("output file", func(t *testing.T) { - ofh, err := os.Open(outFile.Name()) - if err != nil { - t.Fatal(err) - } - of, err := statefile.Read(ofh) - if err != nil { - t.Fatal(err) - } - os := of.State.OutputValue(markerOutput) - if got, want := os.Value, cty.StringVal("from-new-state"); !want.RawEquals(got) { - t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) - } - }) -} - -func TestFilesystem_nonExist(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - ls := NewFilesystem("ishouldntexist") - if err := ls.RefreshState(); err != nil { - t.Fatalf("err: %s", err) - } - - if state := ls.State(); state != nil { - t.Fatalf("bad: %#v", state) - } -} - -func TestFilesystem_lockUnlockWithoutWrite(t *testing.T) { - info := NewLockInfo() - info.Operation = "test" - - ls := testFilesystem(t) - - // Delete the just-created tempfile so that Lock recreates it - os.Remove(ls.path) - - // Lock the state, and in doing so recreate the tempfile - lockID, err := ls.Lock(info) - if err != nil { - t.Fatal(err) - } - - if !ls.created { - t.Fatal("should have marked state as created") - } - - if err := ls.Unlock(lockID); err != nil { - t.Fatal(err) - } - - _, err = os.Stat(ls.path) - if os.IsNotExist(err) { - // Success! Unlocking the state successfully deleted the tempfile - return - } else if err != nil { - t.Fatalf("unexpected error from os.Stat: %s", err) - } else { - os.Remove(ls.readPath) - t.Fatal("should have removed path, but exists") - } -} - -func TestFilesystem_impl(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - var _ Reader = new(Filesystem) - var _ Writer = new(Filesystem) - var _ Persister = new(Filesystem) - var _ Refresher = new(Filesystem) - var _ OutputReader = new(Filesystem) - var _ Locker = new(Filesystem) -} - -func testFilesystem(t *testing.T) *Filesystem { - f, err := ioutil.TempFile("", "tf") - if err != nil { - t.Fatalf("failed to create temporary file %s", err) - } - t.Logf("temporary state file at %s", f.Name()) - - err = statefile.Write(&statefile.File{ - Lineage: "test-lineage", - Serial: 0, - TerraformVersion: version.Must(version.NewVersion("1.2.3")), - State: TestFullInitialState(), - }, f) - if err != nil { - t.Fatalf("failed to write initial state to %s: %s", f.Name(), err) - } - f.Close() - - ls := NewFilesystem(f.Name()) - if err := ls.RefreshState(); err != nil { - t.Fatalf("initial refresh failed: %s", err) - } - - return ls -} - -// Make sure we can refresh while the state is locked -func TestFilesystem_refreshWhileLocked(t *testing.T) { - defer testOverrideVersion(t, "1.2.3")() - f, err := ioutil.TempFile("", "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - - err = statefile.Write(&statefile.File{ - Lineage: "test-lineage", - Serial: 0, - TerraformVersion: version.Must(version.NewVersion("1.2.3")), - State: TestFullInitialState(), - }, f) - if err != nil { - t.Fatalf("err: %s", err) - } - f.Close() - - s := NewFilesystem(f.Name()) - defer os.Remove(s.path) - - // lock first - info := NewLockInfo() - info.Operation = "test" - lockID, err := s.Lock(info) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := s.Unlock(lockID); err != nil { - t.Fatal(err) - } - }() - - if err := s.RefreshState(); err != nil { - t.Fatal(err) - } - - readState := s.State() - if readState == nil { - t.Fatal("missing state") - } -} - -func TestFilesystem_GetRootOutputValues(t *testing.T) { - fs := testFilesystem(t) - - outputs, err := fs.GetRootOutputValues() - if err != nil { - t.Errorf("Expected GetRootOutputValues to not return an error, but it returned %v", err) - } - - if len(outputs) != 2 { - t.Errorf("Expected %d outputs, but received %d", 2, len(outputs)) - } -} - -func testOverrideVersion(t *testing.T, v string) func() { - oldVersionStr := tfversion.Version - oldPrereleaseStr := tfversion.Prerelease - oldSemVer := tfversion.SemVer - - var newPrereleaseStr string - if dash := strings.Index(v, "-"); dash != -1 { - newPrereleaseStr = v[dash+1:] - v = v[:dash] - } - - newSemVer, err := version.NewVersion(v) - if err != nil { - t.Errorf("invalid override version %q: %s", v, err) - } - newVersionStr := newSemVer.String() - - tfversion.Version = newVersionStr - tfversion.Prerelease = newPrereleaseStr - tfversion.SemVer = newSemVer - - return func() { // reset function - tfversion.Version = oldVersionStr - tfversion.Prerelease = oldPrereleaseStr - tfversion.SemVer = oldSemVer - } -} diff --git a/internal/states/statemgr/helper.go b/internal/states/statemgr/helper.go deleted file mode 100644 index 6cae85702ea9..000000000000 --- a/internal/states/statemgr/helper.go +++ /dev/null @@ -1,54 +0,0 @@ -package statemgr - -// The functions in this file are helper wrappers for common sequences of -// operations done against full state managers. - -import ( - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terraform" - "github.com/hashicorp/terraform/version" -) - -// NewStateFile creates a new statefile.File object, with a newly-minted -// lineage identifier and serial 0, and returns a pointer to it. -func NewStateFile() *statefile.File { - return &statefile.File{ - Lineage: NewLineage(), - TerraformVersion: version.SemVer, - State: states.NewState(), - } -} - -// RefreshAndRead refreshes the persistent snapshot in the given state manager -// and then returns it. -// -// This is a wrapper around calling RefreshState and then State on the given -// manager. -func RefreshAndRead(mgr Storage) (*states.State, error) { - err := mgr.RefreshState() - if err != nil { - return nil, err - } - return mgr.State(), nil -} - -// WriteAndPersist writes a snapshot of the given state to the given state -// manager's transient store and then immediately persists it. -// -// The caller must ensure that the given state is not concurrently modified -// while this function is running, but it is safe to modify it after this -// function has returned. -// -// If an error is returned, it is undefined whether the state has been saved -// to the transient store or not, and so the only safe response is to bail -// out quickly with a user-facing error. In situations where more control -// is required, call WriteState and PersistState on the state manager directly -// and handle their errors. -func WriteAndPersist(mgr Storage, state *states.State, schemas *terraform.Schemas) error { - err := mgr.WriteState(state) - if err != nil { - return err - } - return mgr.PersistState(schemas) -} diff --git a/internal/states/statemgr/plan.go b/internal/states/statemgr/plan.go deleted file mode 100644 index fb42df31295f..000000000000 --- a/internal/states/statemgr/plan.go +++ /dev/null @@ -1,71 +0,0 @@ -package statemgr - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" -) - -// PlannedStateUpdate is a special helper to obtain a statefile representation -// of a not-yet-written state snapshot that can be written later by a call -// to the companion function WritePlannedStateUpdate. -// -// The statefile object returned here has an unusual interpretation of its -// metadata that is understood only by WritePlannedStateUpdate, and so the -// returned object should not be used for any other purpose. -// -// If the state manager implements Locker then it is the caller's -// responsibility to hold the lock at least for the duration of this call. -// It is not safe to modify the given state concurrently while -// PlannedStateUpdate is running. -func PlannedStateUpdate(mgr Transient, planned *states.State) *statefile.File { - ret := &statefile.File{ - State: planned.DeepCopy(), - } - - // If the given manager uses snapshot metadata then we'll save that - // in our file so we can check it again during WritePlannedStateUpdate. - if mr, ok := mgr.(PersistentMeta); ok { - m := mr.StateSnapshotMeta() - ret.Lineage = m.Lineage - ret.Serial = m.Serial - } - - return ret -} - -// WritePlannedStateUpdate is a companion to PlannedStateUpdate that attempts -// to apply a state update that was planned earlier to the given state -// manager. -// -// An error is returned if this function detects that a new state snapshot -// has been written to the backend since the update was planned, since that -// invalidates the plan. An error is returned also if the manager itself -// rejects the given state when asked to store it. -// -// If the returned error is nil, the given manager's transient state snapshot -// is updated to match what was planned. It is the caller's responsibility -// to then persist that state if the manager also implements Persistent and -// the snapshot should be written to the persistent store. -// -// If the state manager implements Locker then it is the caller's -// responsibility to hold the lock at least for the duration of this call. -func WritePlannedStateUpdate(mgr Transient, planned *statefile.File) error { - // If the given manager uses snapshot metadata then we'll check to make - // sure no new snapshots have been created since we planned to write - // the given state file. - if mr, ok := mgr.(PersistentMeta); ok { - m := mr.StateSnapshotMeta() - if planned.Lineage != "" { - if planned.Lineage != m.Lineage { - return fmt.Errorf("planned state update is from an unrelated state lineage than the current state") - } - if planned.Serial != m.Serial { - return fmt.Errorf("stored state has been changed by another operation since the given update was planned") - } - } - } - - return mgr.WriteState(planned.State) -} diff --git a/internal/states/statemgr/testing.go b/internal/states/statemgr/testing.go deleted file mode 100644 index eabf46dc0e40..000000000000 --- a/internal/states/statemgr/testing.go +++ /dev/null @@ -1,163 +0,0 @@ -package statemgr - -import ( - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" -) - -// TestFull is a helper for testing full state manager implementations. It -// expects that the given implementation is pre-loaded with a snapshot of the -// result from TestFullInitialState. -// -// If the given state manager also implements PersistentMeta, this function -// will test that the snapshot metadata changes as expected between calls -// to the methods of Persistent. -func TestFull(t *testing.T, s Full) { - t.Helper() - - if err := s.RefreshState(); err != nil { - t.Fatalf("err: %s", err) - } - - // Check that the initial state is correct. - // These do have different Lineages, but we will replace current below. - initial := TestFullInitialState() - if state := s.State(); !state.Equal(initial) { - t.Fatalf("state does not match expected initial state\n\ngot:\n%s\nwant:\n%s", spew.Sdump(state), spew.Sdump(initial)) - } - - var initialMeta SnapshotMeta - if sm, ok := s.(PersistentMeta); ok { - initialMeta = sm.StateSnapshotMeta() - } - - // Now we've proven that the state we're starting with is an initial - // state, we'll complete our work here with that state, since otherwise - // further writes would violate the invariant that we only try to write - // states that share the same lineage as what was initially written. - current := s.State() - - // Write a new state and verify that we have it - current.RootModule().SetOutputValue("bar", cty.StringVal("baz"), false) - - if err := s.WriteState(current); err != nil { - t.Fatalf("err: %s", err) - } - - if actual := s.State(); !actual.Equal(current) { - t.Fatalf("bad:\n%#v\n\n%#v", actual, current) - } - - // Test persistence - if err := s.PersistState(nil); err != nil { - t.Fatalf("err: %s", err) - } - - // Refresh if we got it - if err := s.RefreshState(); err != nil { - t.Fatalf("err: %s", err) - } - - var newMeta SnapshotMeta - if sm, ok := s.(PersistentMeta); ok { - newMeta = sm.StateSnapshotMeta() - if got, want := newMeta.Lineage, initialMeta.Lineage; got != want { - t.Errorf("Lineage changed from %q to %q", want, got) - } - if after, before := newMeta.Serial, initialMeta.Serial; after == before { - t.Errorf("Serial didn't change from %d after new module added", before) - } - } - - // Same serial - serial := newMeta.Serial - if err := s.WriteState(current); err != nil { - t.Fatalf("err: %s", err) - } - if err := s.PersistState(nil); err != nil { - t.Fatalf("err: %s", err) - } - - if sm, ok := s.(PersistentMeta); ok { - newMeta = sm.StateSnapshotMeta() - if newMeta.Serial != serial { - t.Fatalf("serial changed after persisting with no changes: got %d, want %d", newMeta.Serial, serial) - } - } - - if sm, ok := s.(PersistentMeta); ok { - newMeta = sm.StateSnapshotMeta() - } - - // Change the serial - current = current.DeepCopy() - current.EnsureModule(addrs.RootModuleInstance).SetOutputValue( - "serialCheck", cty.StringVal("true"), false, - ) - if err := s.WriteState(current); err != nil { - t.Fatalf("err: %s", err) - } - if err := s.PersistState(nil); err != nil { - t.Fatalf("err: %s", err) - } - - if sm, ok := s.(PersistentMeta); ok { - oldMeta := newMeta - newMeta = sm.StateSnapshotMeta() - - if newMeta.Serial <= serial { - t.Fatalf("serial incorrect after persisting with changes: got %d, want > %d", newMeta.Serial, serial) - } - - if newMeta.TerraformVersion != oldMeta.TerraformVersion { - t.Fatalf("TFVersion changed from %s to %s", oldMeta.TerraformVersion, newMeta.TerraformVersion) - } - - // verify that Lineage doesn't change along with Serial, or during copying. - if newMeta.Lineage != oldMeta.Lineage { - t.Fatalf("Lineage changed from %q to %q", oldMeta.Lineage, newMeta.Lineage) - } - } - - // Check that State() returns a copy by modifying the copy and comparing - // to the current state. - stateCopy := s.State() - stateCopy.EnsureModule(addrs.RootModuleInstance.Child("another", addrs.NoKey)) - if reflect.DeepEqual(stateCopy, s.State()) { - t.Fatal("State() should return a copy") - } - - // our current expected state should also marshal identically to the persisted state - if !statefile.StatesMarshalEqual(current, s.State()) { - t.Fatalf("Persisted state altered unexpectedly.\n\ngot:\n%s\nwant:\n%s", spew.Sdump(s.State()), spew.Sdump(current)) - } -} - -// TestFullInitialState is a state that should be snapshotted into a -// full state manager before passing it into TestFull. -func TestFullInitialState() *states.State { - state := states.NewState() - childMod := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - rAddr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "foo", - } - providerAddr := addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider(rAddr.ImpliedProvider()), - Module: addrs.RootModule, - } - childMod.SetResourceProvider(rAddr, providerAddr) - - state.RootModule().SetOutputValue("sensitive_output", cty.StringVal("it's a secret"), true) - state.RootModule().SetOutputValue("nonsensitive_output", cty.StringVal("hello, world!"), false) - - return state -} diff --git a/internal/terraform/context.go b/internal/terraform/context.go deleted file mode 100644 index 18b3f4b37b54..000000000000 --- a/internal/terraform/context.go +++ /dev/null @@ -1,431 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sort" - "sync" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// InputMode defines what sort of input will be asked for when Input -// is called on Context. -type InputMode byte - -const ( - // InputModeProvider asks for provider variables - InputModeProvider InputMode = 1 << iota - - // InputModeStd is the standard operating mode and asks for both variables - // and providers. - InputModeStd = InputModeProvider -) - -// ContextOpts are the user-configurable options to create a context with -// NewContext. -type ContextOpts struct { - Meta *ContextMeta - Hooks []Hook - Parallelism int - Providers map[addrs.Provider]providers.Factory - Provisioners map[string]provisioners.Factory - - UIInput UIInput -} - -// ContextMeta is metadata about the running context. This is information -// that this package or structure cannot determine on its own but exposes -// into Terraform in various ways. This must be provided by the Context -// initializer. -type ContextMeta struct { - Env string // Env is the state environment - - // OriginalWorkingDir is the working directory where the Terraform CLI - // was run from, which may no longer actually be the current working - // directory if the user included the -chdir=... option. - // - // If this string is empty then the original working directory is the same - // as the current working directory. - // - // In most cases we should respect the user's override by ignoring this - // path and just using the current working directory, but this is here - // for some exceptional cases where the original working directory is - // needed. - OriginalWorkingDir string -} - -// Context represents all the context that Terraform needs in order to -// perform operations on infrastructure. This structure is built using -// NewContext. -type Context struct { - // meta captures some misc. information about the working directory where - // we're taking these actions, and thus which should remain steady between - // operations. - meta *ContextMeta - - plugins *contextPlugins - - hooks []Hook - sh *stopHook - uiInput UIInput - - l sync.Mutex // Lock acquired during any task - parallelSem Semaphore - providerInputConfig map[string]map[string]cty.Value - runCond *sync.Cond - runContext context.Context - runContextCancel context.CancelFunc -} - -// (additional methods on Context can be found in context_*.go files.) - -// NewContext creates a new Context structure. -// -// Once a Context is created, the caller must not access or mutate any of -// the objects referenced (directly or indirectly) by the ContextOpts fields. -// -// If the returned diagnostics contains errors then the resulting context is -// invalid and must not be used. -func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - log.Printf("[TRACE] terraform.NewContext: starting") - - // Copy all the hooks and add our stop hook. We don't append directly - // to the Config so that we're not modifying that in-place. - sh := new(stopHook) - hooks := make([]Hook, len(opts.Hooks)+1) - copy(hooks, opts.Hooks) - hooks[len(opts.Hooks)] = sh - - // Determine parallelism, default to 10. We do this both to limit - // CPU pressure but also to have an extra guard against rate throttling - // from providers. - // We throw an error in case of negative parallelism - par := opts.Parallelism - if par < 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid parallelism value", - fmt.Sprintf("The parallelism must be a positive value. Not %d.", par), - )) - return nil, diags - } - - if par == 0 { - par = 10 - } - - plugins := newContextPlugins(opts.Providers, opts.Provisioners) - - log.Printf("[TRACE] terraform.NewContext: complete") - - return &Context{ - hooks: hooks, - meta: opts.Meta, - uiInput: opts.UIInput, - - plugins: plugins, - - parallelSem: NewSemaphore(par), - providerInputConfig: make(map[string]map[string]cty.Value), - sh: sh, - }, diags -} - -func (c *Context) Schemas(config *configs.Config, state *states.State) (*Schemas, tfdiags.Diagnostics) { - // TODO: This method gets called multiple times on the same context with - // the same inputs by different parts of Terraform that all need the - // schemas, and it's typically quite expensive because it has to spin up - // plugins to gather their schemas, so it'd be good to have some caching - // here to remember plugin schemas we already loaded since the plugin - // selections can't change during the life of a *Context object. - - var diags tfdiags.Diagnostics - - ret, err := loadSchemas(config, state, c.plugins) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to load plugin schemas", - fmt.Sprintf("Error while loading schemas for plugin components: %s.", err), - )) - return nil, diags - } - return ret, diags -} - -type ContextGraphOpts struct { - // If true, validates the graph structure (checks for cycles). - Validate bool - - // Legacy graphs only: won't prune the graph - Verbose bool -} - -// Stop stops the running task. -// -// Stop will block until the task completes. -func (c *Context) Stop() { - log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") - - c.l.Lock() - defer c.l.Unlock() - - // If we're running, then stop - if c.runContextCancel != nil { - log.Printf("[WARN] terraform: run context exists, stopping") - - // Tell the hook we want to stop - c.sh.Stop() - - // Stop the context - c.runContextCancel() - c.runContextCancel = nil - } - - // Grab the condition var before we exit - if cond := c.runCond; cond != nil { - log.Printf("[INFO] terraform: waiting for graceful stop to complete") - cond.Wait() - } - - log.Printf("[WARN] terraform: stop complete") -} - -func (c *Context) acquireRun(phase string) func() { - // With the run lock held, grab the context lock to make changes - // to the run context. - c.l.Lock() - defer c.l.Unlock() - - // Wait until we're no longer running - for c.runCond != nil { - c.runCond.Wait() - } - - // Build our lock - c.runCond = sync.NewCond(&c.l) - - // Create a new run context - c.runContext, c.runContextCancel = context.WithCancel(context.Background()) - - // Reset the stop hook so we're not stopped - c.sh.Reset() - - return c.releaseRun -} - -func (c *Context) releaseRun() { - // Grab the context lock so that we can make modifications to fields - c.l.Lock() - defer c.l.Unlock() - - // End our run. We check if runContext is non-nil because it can be - // set to nil if it was cancelled via Stop() - if c.runContextCancel != nil { - c.runContextCancel() - } - - // Unlock all waiting our condition - cond := c.runCond - c.runCond = nil - cond.Broadcast() - - // Unset the context - c.runContext = nil -} - -// watchStop immediately returns a `stop` and a `wait` chan after dispatching -// the watchStop goroutine. This will watch the runContext for cancellation and -// stop the providers accordingly. When the watch is no longer needed, the -// `stop` chan should be closed before waiting on the `wait` chan. -// The `wait` chan is important, because without synchronizing with the end of -// the watchStop goroutine, the runContext may also be closed during the select -// incorrectly causing providers to be stopped. Even if the graph walk is done -// at that point, stopping a provider permanently cancels its StopContext which -// can cause later actions to fail. -func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { - stop := make(chan struct{}) - wait := make(chan struct{}) - - // get the runContext cancellation channel now, because releaseRun will - // write to the runContext field. - done := c.runContext.Done() - - go func() { - defer logging.PanicHandler() - - defer close(wait) - // Wait for a stop or completion - select { - case <-done: - // done means the context was canceled, so we need to try and stop - // providers. - case <-stop: - // our own stop channel was closed. - return - } - - // If we're here, we're stopped, trigger the call. - log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") - - { - // Copy the providers so that a misbehaved blocking Stop doesn't - // completely hang Terraform. - walker.providerLock.Lock() - ps := make([]providers.Interface, 0, len(walker.providerCache)) - for _, p := range walker.providerCache { - ps = append(ps, p) - } - defer walker.providerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - - { - // Call stop on all the provisioners - walker.provisionerLock.Lock() - ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) - for _, p := range walker.provisionerCache { - ps = append(ps, p) - } - defer walker.provisionerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - }() - - return stop, wait -} - -// checkConfigDependencies checks whether the recieving context is able to -// support the given configuration, returning error diagnostics if not. -// -// Currently this function checks whether the current Terraform CLI version -// matches the version requirements of all of the modules, and whether our -// plugin library contains all of the plugin names/addresses needed. -// -// This function does *not* check that external modules are installed (that's -// the responsibility of the configuration loader) and doesn't check that the -// plugins are of suitable versions to match any version constraints (which is -// the responsibility of the code which installed the plugins and then -// constructed the Providers/Provisioners maps passed in to NewContext). -// -// In most cases we should typically catch the problems this function detects -// before we reach this point, but this function can come into play in some -// unusual cases outside of the main workflow, and can avoid some -// potentially-more-confusing errors from later operations. -func (c *Context) checkConfigDependencies(config *configs.Config) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // This checks the Terraform CLI version constraints specified in all of - // the modules. - diags = diags.Append(CheckCoreVersionRequirements(config)) - - // We only check that we have a factory for each required provider, and - // assume the caller already assured that any separately-installed - // plugins are of a suitable version, match expected checksums, etc. - providerReqs, hclDiags := config.ProviderRequirements() - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return diags - } - for providerAddr := range providerReqs { - if !c.plugins.HasProvider(providerAddr) { - if !providerAddr.IsBuiltIn() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Missing required provider", - fmt.Sprintf( - "This configuration requires provider %s, but that provider isn't available. You may be able to install it automatically by running:\n terraform init", - providerAddr, - ), - )) - } else { - // Built-in providers can never be installed by "terraform init", - // so no point in confusing the user by suggesting that. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Missing required provider", - fmt.Sprintf( - "This configuration requires built-in provider %s, but that provider isn't available in this Terraform version.", - providerAddr, - ), - )) - } - } - } - - // Our handling of provisioners is much less sophisticated than providers - // because they are in many ways a legacy system. We need to go hunting - // for them more directly in the configuration. - config.DeepEach(func(modCfg *configs.Config) { - if modCfg == nil || modCfg.Module == nil { - return // should not happen, but we'll be robust - } - for _, rc := range modCfg.Module.ManagedResources { - if rc.Managed == nil { - continue // should not happen, but we'll be robust - } - for _, pc := range rc.Managed.Provisioners { - if !c.plugins.HasProvisioner(pc.Type) { - // This is not a very high-quality error, because really - // the caller of terraform.NewContext should've already - // done equivalent checks when doing plugin discovery. - // This is just to make sure we return a predictable - // error in a central place, rather than failing somewhere - // later in the non-deterministically-ordered graph walk. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Missing required provisioner plugin", - fmt.Sprintf( - "This configuration requires provisioner plugin %q, which isn't available. If you're intending to use an external provisioner plugin, you must install it manually into one of the plugin search directories before running Terraform.", - pc.Type, - ), - )) - } - } - } - }) - - // Because we were doing a lot of map iteration above, and we're only - // generating sourceless diagnostics anyway, our diagnostics will not be - // in a deterministic order. To ensure stable output when there are - // multiple errors to report, we'll sort these particular diagnostics - // so they are at least always consistent alone. This ordering is - // arbitrary and not a compatibility constraint. - sort.Slice(diags, func(i, j int) bool { - // Because these are sourcelss diagnostics and we know they are all - // errors, we know they'll only differ in their description fields. - descI := diags[i].Description() - descJ := diags[j].Description() - switch { - case descI.Summary != descJ.Summary: - return descI.Summary < descJ.Summary - default: - return descI.Detail < descJ.Detail - } - }) - - return diags -} diff --git a/internal/terraform/context_apply.go b/internal/terraform/context_apply.go deleted file mode 100644 index ada934e3f5c6..000000000000 --- a/internal/terraform/context_apply.go +++ /dev/null @@ -1,187 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Apply performs the actions described by the given Plan object and returns -// the resulting updated state. -// -// The given configuration *must* be the same configuration that was passed -// earlier to Context.Plan in order to create this plan. -// -// Even if the returned diagnostics contains errors, Apply always returns the -// resulting state which is likely to have been partially-updated. -func (c *Context) Apply(plan *plans.Plan, config *configs.Config) (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("apply")() - - log.Printf("[DEBUG] Building and walking apply graph for %s plan", plan.UIMode) - - if plan.Errored { - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot apply failed plan", - `The given plan is incomplete due to errors during planning, and so it cannot be applied.`, - )) - return nil, diags - } - - graph, operation, diags := c.applyGraph(plan, config, true) - if diags.HasErrors() { - return nil, diags - } - - workingState := plan.PriorState.DeepCopy() - walker, walkDiags := c.walk(graph, operation, &graphWalkOpts{ - Config: config, - InputState: workingState, - Changes: plan.Changes, - - // We need to propagate the check results from the plan phase, - // because that will tell us which checkable objects we're expecting - // to see updated results from during the apply step. - PlanTimeCheckResults: plan.Checks, - }) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - - // After the walk is finished, we capture a simplified snapshot of the - // check result data as part of the new state. - walker.State.RecordCheckResults(walker.Checks) - - newState := walker.State.Close() - if plan.UIMode == plans.DestroyMode && !diags.HasErrors() { - // NOTE: This is a vestigial violation of the rule that we mustn't - // use plan.UIMode to affect apply-time behavior. - // We ideally ought to just call newState.PruneResourceHusks - // unconditionally here, but we historically didn't and haven't yet - // verified that it'd be safe to do so. - newState.PruneResourceHusks() - } - - if len(plan.TargetAddrs) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Applied changes may be incomplete", - `The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending: - terraform plan - -Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - // FIXME: we cannot check for an empty plan for refresh-only, because root - // outputs are always stored as changes. The final condition of the state - // also depends on some cleanup which happens during the apply walk. It - // would probably make more sense if applying a refresh-only plan were - // simply just returning the planned state and checks, but some extra - // cleanup is going to be needed to make the plan state match what apply - // would do. For now we can copy the checks over which were overwritten - // during the apply walk. - // Despite the intent of UIMode, it must still be used for apply-time - // differences in destroy plans too, so we can make use of that here as - // well. - if plan.UIMode == plans.RefreshOnlyMode { - newState.CheckResults = plan.Checks.DeepCopy() - } - - return newState, diags -} - -func (c *Context) applyGraph(plan *plans.Plan, config *configs.Config, validate bool) (*Graph, walkOperation, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - variables := InputValues{} - for name, dyVal := range plan.VariableValues { - val, err := dyVal.Decode(cty.DynamicPseudoType) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid variable value in plan", - fmt.Sprintf("Invalid value for variable %q recorded in plan file: %s.", name, err), - )) - continue - } - - variables[name] = &InputValue{ - Value: val, - SourceType: ValueFromPlan, - } - } - if diags.HasErrors() { - return nil, walkApply, diags - } - - // The plan.VariableValues field only records variables that were actually - // set by the caller in the PlanOpts, so we may need to provide - // placeholders for any other variables that the user didn't set, in - // which case Terraform will once again use the default value from the - // configuration when we visit these variables during the graph walk. - for name := range config.Module.Variables { - if _, ok := variables[name]; ok { - continue - } - variables[name] = &InputValue{ - Value: cty.NilVal, - SourceType: ValueFromPlan, - } - } - - operation := walkApply - if plan.UIMode == plans.DestroyMode { - // FIXME: Due to differences in how objects must be handled in the - // graph and evaluated during a complete destroy, we must continue to - // use plans.DestroyMode to switch on this behavior. If all objects - // which require special destroy handling can be tracked in the plan, - // then this switch will no longer be needed and we can remove the - // walkDestroy operation mode. - // TODO: Audit that and remove walkDestroy as an operation mode. - operation = walkDestroy - } - - graph, moreDiags := (&ApplyGraphBuilder{ - Config: config, - Changes: plan.Changes, - State: plan.PriorState, - RootVariableValues: variables, - Plugins: c.plugins, - Targets: plan.TargetAddrs, - ForceReplace: plan.ForceReplaceAddrs, - Operation: operation, - }).Build(addrs.RootModuleInstance) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return nil, walkApply, diags - } - - return graph, operation, diags -} - -// ApplyGraphForUI is a last vestage of graphs in the public interface of -// Context (as opposed to graphs as an implementation detail) intended only for -// use by the "terraform graph" command when asked to render an apply-time -// graph. -// -// The result of this is intended only for rendering ot the user as a dot -// graph, and so may change in future in order to make the result more useful -// in that context, even if drifts away from the physical graph that Terraform -// Core currently uses as an implementation detail of planning. -func (c *Context) ApplyGraphForUI(plan *plans.Plan, config *configs.Config) (*Graph, tfdiags.Diagnostics) { - // For now though, this really is just the internal graph, confusing - // implementation details and all. - - var diags tfdiags.Diagnostics - - graph, _, moreDiags := c.applyGraph(plan, config, false) - diags = diags.Append(moreDiags) - return graph, diags -} diff --git a/internal/terraform/context_apply2_test.go b/internal/terraform/context_apply2_test.go deleted file mode 100644 index 97769176dd3b..000000000000 --- a/internal/terraform/context_apply2_test.go +++ /dev/null @@ -1,1956 +0,0 @@ -package terraform - -import ( - "bytes" - "errors" - "fmt" - "strings" - "sync" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Test that the PreApply hook is called with the correct deposed key -func TestContext2Apply_createBeforeDestroy_deposedKeyPreApply(t *testing.T) { - m := testModule(t, "apply-cbd-deposed-only") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - deposedKey := states.NewDeposedKey() - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceDeposed( - mustResourceInstanceAddr("aws_instance.bar").Resource, - deposedKey, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - hook := new(MockHook) - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{hook}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Verify PreApply was called correctly - if !hook.PreApplyCalled { - t.Fatalf("PreApply hook not called") - } - if addr, wantAddr := hook.PreApplyAddr, mustResourceInstanceAddr("aws_instance.bar"); !addr.Equal(wantAddr) { - t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) - } - if gen := hook.PreApplyGen; gen != deposedKey { - t.Errorf("expected gen to be %q, but was %q", deposedKey, gen) - } -} - -func TestContext2Apply_destroyWithDataSourceExpansion(t *testing.T) { - // While managed resources store their destroy-time dependencies, data - // sources do not. This means that if a provider were only included in a - // destroy graph because of data sources, it could have dependencies which - // are not correctly ordered. Here we verify that the provider is not - // included in the destroy operation, and all dependency evaluations - // succeed. - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - source = "./mod" -} - -provider "other" { - foo = module.mod.data -} - -# this should not require the provider be present during destroy -data "other_data_source" "a" { -} -`, - - "mod/main.tf": ` -data "test_data_source" "a" { - count = 1 -} - -data "test_data_source" "b" { - count = data.test_data_source.a[0].foo == "ok" ? 1 : 0 -} - -output "data" { - value = data.test_data_source.a[0].foo == "ok" ? data.test_data_source.b[0].foo : "nope" -} -`, - }) - - testP := testProvider("test") - otherP := testProvider("other") - - readData := func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("data_source"), - "foo": cty.StringVal("ok"), - }), - } - } - - testP.ReadDataSourceFn = readData - otherP.ReadDataSourceFn = readData - - ps := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(testP), - addrs.NewDefaultProvider("other"): testProviderFuncFixed(otherP), - } - - otherP.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - foo := req.Config.GetAttr("foo") - if foo.IsNull() || foo.AsString() != "ok" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("incorrect config val: %#v\n", foo)) - } - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: ps, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - // now destroy the whole thing - ctx = testContext2(t, &ContextOpts{ - Providers: ps, - }) - - plan, diags = ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - otherP.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - // should not be used to destroy data sources - resp.Diagnostics = resp.Diagnostics.Append(errors.New("provider should not be used")) - return resp - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } -} - -func TestContext2Apply_destroyThenUpdate(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { - value = "udpated" -} -`, - }) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - var orderMu sync.Mutex - var order []string - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - id := req.PriorState.GetAttr("id").AsString() - if id == "b" { - // slow down the b destroy, since a should wait for it - time.Sleep(100 * time.Millisecond) - } - - orderMu.Lock() - order = append(order, id) - orderMu.Unlock() - - resp.NewState = req.PlannedState - return resp - } - - addrA := mustResourceInstanceAddr(`test_instance.a`) - addrB := mustResourceInstanceAddr(`test_instance.b`) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"a","value":"old","type":"test"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - - // test_instance.b depended on test_instance.a, and therefor should be - // destroyed before any changes to test_instance.a - s.SetResourceInstanceCurrent(addrB, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"b"}`), - Status: states.ObjectReady, - Dependencies: []addrs.ConfigResource{addrA.ContainingResource().Config()}, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if order[0] != "b" { - t.Fatalf("expected apply order [b, a], got: %v\n", order) - } -} - -// verify that dependencies are updated in the state during refresh and apply -func TestApply_updateDependencies(t *testing.T) { - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - - fooAddr := mustResourceInstanceAddr("aws_instance.foo") - barAddr := mustResourceInstanceAddr("aws_instance.bar") - bazAddr := mustResourceInstanceAddr("aws_instance.baz") - bamAddr := mustResourceInstanceAddr("aws_instance.bam") - binAddr := mustResourceInstanceAddr("aws_instance.bin") - root.SetResourceInstanceCurrent( - fooAddr.Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - Dependencies: []addrs.ConfigResource{ - bazAddr.ContainingResource().Config(), - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - binAddr.Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bin","type":"aws_instance","unknown":"ok"}`), - Dependencies: []addrs.ConfigResource{ - bazAddr.ContainingResource().Config(), - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - bazAddr.Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz"}`), - Dependencies: []addrs.ConfigResource{ - // Existing dependencies should not be removed from orphaned instances - bamAddr.ContainingResource().Config(), - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - barAddr.Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "aws_instance" "bar" { - foo = aws_instance.foo.id -} - -resource "aws_instance" "foo" { -} - -resource "aws_instance" "bin" { -} -`, - }) - - p := testProvider("aws") - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - bar := plan.PriorState.ResourceInstance(barAddr) - if len(bar.Current.Dependencies) == 0 || !bar.Current.Dependencies[0].Equal(fooAddr.ContainingResource().Config()) { - t.Fatalf("bar should depend on foo after refresh, but got %s", bar.Current.Dependencies) - } - - foo := plan.PriorState.ResourceInstance(fooAddr) - if len(foo.Current.Dependencies) == 0 || !foo.Current.Dependencies[0].Equal(bazAddr.ContainingResource().Config()) { - t.Fatalf("foo should depend on baz after refresh because of the update, but got %s", foo.Current.Dependencies) - } - - bin := plan.PriorState.ResourceInstance(binAddr) - if len(bin.Current.Dependencies) != 0 { - t.Fatalf("bin should depend on nothing after refresh because there is no change, but got %s", bin.Current.Dependencies) - } - - baz := plan.PriorState.ResourceInstance(bazAddr) - if len(baz.Current.Dependencies) == 0 || !baz.Current.Dependencies[0].Equal(bamAddr.ContainingResource().Config()) { - t.Fatalf("baz should depend on bam after refresh, but got %s", baz.Current.Dependencies) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - bar = state.ResourceInstance(barAddr) - if len(bar.Current.Dependencies) == 0 || !bar.Current.Dependencies[0].Equal(fooAddr.ContainingResource().Config()) { - t.Fatalf("bar should still depend on foo after apply, but got %s", bar.Current.Dependencies) - } - - foo = state.ResourceInstance(fooAddr) - if len(foo.Current.Dependencies) != 0 { - t.Fatalf("foo should have no deps after apply, but got %s", foo.Current.Dependencies) - } - -} - -func TestContext2Apply_additionalSensitiveFromState(t *testing.T) { - // Ensure we're not trying to double-mark values decoded from state - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "secret" { - sensitive = true - default = ["secret"] -} - -resource "test_resource" "a" { - sensitive_attr = var.secret -} - -resource "test_resource" "b" { - value = test_resource.a.id -} -`, - }) - - p := new(MockProvider) - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "value": { - Type: cty.String, - Optional: true, - }, - "sensitive_attr": { - Type: cty.List(cty.String), - Optional: true, - Sensitive: true, - }, - }, - }, - }, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`test_resource.a`), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"a","sensitive_attr":["secret"]}`), - AttrSensitivePaths: []cty.PathValueMarks{ - { - Path: cty.GetAttrPath("sensitive_attr"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Apply_sensitiveOutputPassthrough(t *testing.T) { - // Ensure we're not trying to double-mark values decoded from state - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - source = "./mod" -} - -resource "test_object" "a" { - test_string = module.mod.out -} -`, - - "mod/main.tf": ` -variable "in" { - sensitive = true - default = "foo" -} -output "out" { - value = var.in -} -`, - }) - - p := simpleMockProvider() - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - obj := state.ResourceInstance(mustResourceInstanceAddr("test_object.a")) - if len(obj.Current.AttrSensitivePaths) != 1 { - t.Fatalf("Expected 1 sensitive mark for test_object.a, got %#v\n", obj.Current.AttrSensitivePaths) - } - - plan, diags = ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - // make sure the same marks are compared in the next plan as well - for _, c := range plan.Changes.Resources { - if c.Action != plans.NoOp { - t.Errorf("Unexpcetd %s change for %s", c.Action, c.Addr) - } - } -} - -func TestContext2Apply_ignoreImpureFunctionChanges(t *testing.T) { - // The impure function call should not cause a planned change with - // ignore_changes - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "pw" { - sensitive = true - default = "foo" -} - -resource "test_object" "x" { - test_map = { - string = "X${bcrypt(var.pw)}" - } - lifecycle { - ignore_changes = [ test_map["string"] ] - } -} - -resource "test_object" "y" { - test_map = { - string = "X${bcrypt(var.pw)}" - } - lifecycle { - ignore_changes = [ test_map ] - } -} - -`, - }) - - p := simpleMockProvider() - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // FINAL PLAN: - plan, diags = ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - // make sure the same marks are compared in the next plan as well - for _, c := range plan.Changes.Resources { - if c.Action != plans.NoOp { - t.Logf("marks before: %#v", c.BeforeValMarks) - t.Logf("marks after: %#v", c.AfterValMarks) - t.Errorf("Unexpcetd %s change for %s", c.Action, c.Addr) - } - } -} - -func TestContext2Apply_destroyWithDeposed(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "x" { - test_string = "ok" - lifecycle { - create_before_destroy = true - } -}`, - }) - - p := simpleMockProvider() - - deposedKey := states.NewDeposedKey() - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceDeposed( - mustResourceInstanceAddr("test_object.x").Resource, - deposedKey, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"deposed"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("plan: %s", diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply: %s", diags.Err()) - } - -} - -func TestContext2Apply_nullableVariables(t *testing.T) { - m := testModule(t, "apply-nullable-variables") - state := states.NewState() - ctx := testContext2(t, &ContextOpts{}) - plan, diags := ctx.Plan(m, state, &PlanOpts{}) - if diags.HasErrors() { - t.Fatalf("plan: %s", diags.Err()) - } - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply: %s", diags.Err()) - } - - outputs := state.Module(addrs.RootModuleInstance).OutputValues - // we check for null outputs be seeing that they don't exists - if _, ok := outputs["nullable_null_default"]; ok { - t.Error("nullable_null_default: expected no output value") - } - if _, ok := outputs["nullable_non_null_default"]; ok { - t.Error("nullable_non_null_default: expected no output value") - } - if _, ok := outputs["nullable_no_default"]; ok { - t.Error("nullable_no_default: expected no output value") - } - - if v := outputs["non_nullable_default"].Value; v.AsString() != "ok" { - t.Fatalf("incorrect 'non_nullable_default' output value: %#v\n", v) - } - if v := outputs["non_nullable_no_default"].Value; v.AsString() != "ok" { - t.Fatalf("incorrect 'non_nullable_no_default' output value: %#v\n", v) - } -} - -func TestContext2Apply_targetedDestroyWithMoved(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "modb" { - source = "./mod" - for_each = toset(["a", "b"]) -} -`, - "./mod/main.tf": ` -resource "test_object" "a" { -} - -module "sub" { - for_each = toset(["a", "b"]) - source = "./sub" -} - -moved { - from = module.old - to = module.sub -} -`, - "./mod/sub/main.tf": ` -resource "test_object" "s" { -} -`}) - - p := simpleMockProvider() - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // destroy only a single instance not included in the moved statements - _, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - Targets: []addrs.Targetable{mustResourceInstanceAddr(`module.modb["a"].test_object.a`)}, - }) - assertNoErrors(t, diags) -} - -func TestContext2Apply_graphError(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { - test_string = "ok" -} - -resource "test_object" "b" { - test_string = test_object.a.test_string -} -`, - }) - - p := simpleMockProvider() - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"ok"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"ok"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("plan: %s", diags.Err()) - } - - // We're going to corrupt the stored state so that the dependencies will - // cause a cycle when building the apply graph. - testObjA := plan.PriorState.Modules[""].Resources["test_object.a"].Instances[addrs.NoKey].Current - testObjA.Dependencies = append(testObjA.Dependencies, mustResourceInstanceAddr("test_object.b").ContainingResource().Config()) - - _, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("expected cycle error from apply") - } -} - -func TestContext2Apply_resourcePostcondition(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "boop" { - type = string -} - -resource "test_resource" "a" { - value = var.boop -} - -resource "test_resource" "b" { - value = test_resource.a.output - lifecycle { - postcondition { - condition = self.output != "" - error_message = "Output must not be blank." - } - } -} - -resource "test_resource" "c" { - value = test_resource.b.output -} -`, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "value": { - Type: cty.String, - Required: true, - }, - "output": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - m := req.ProposedNewState.AsValueMap() - m["output"] = cty.UnknownVal(cty.String) - - resp.PlannedState = cty.ObjectVal(m) - resp.LegacyTypeSystem = true - return resp - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - t.Run("condition pass", func(t *testing.T) { - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if len(plan.Changes.Resources) != 3 { - t.Fatalf("unexpected plan changes: %#v", plan.Changes) - } - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - m := req.PlannedState.AsValueMap() - m["output"] = cty.StringVal(fmt.Sprintf("new-%s", m["value"].AsString())) - - resp.NewState = cty.ObjectVal(m) - return resp - } - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - wantResourceAttrs := map[string]struct{ value, output string }{ - "a": {"boop", "new-boop"}, - "b": {"new-boop", "new-new-boop"}, - "c": {"new-new-boop", "new-new-new-boop"}, - } - for name, attrs := range wantResourceAttrs { - addr := mustResourceInstanceAddr(fmt.Sprintf("test_resource.%s", name)) - r := state.ResourceInstance(addr) - rd, err := r.Current.Decode(cty.Object(map[string]cty.Type{ - "value": cty.String, - "output": cty.String, - })) - if err != nil { - t.Fatalf("error decoding test_resource.a: %s", err) - } - want := cty.ObjectVal(map[string]cty.Value{ - "value": cty.StringVal(attrs.value), - "output": cty.StringVal(attrs.output), - }) - if !cmp.Equal(want, rd.Value, valueComparer) { - t.Errorf("wrong attrs for %s\n%s", addr, cmp.Diff(want, rd.Value, valueComparer)) - } - } - }) - t.Run("condition fail", func(t *testing.T) { - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if len(plan.Changes.Resources) != 3 { - t.Fatalf("unexpected plan changes: %#v", plan.Changes) - } - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - m := req.PlannedState.AsValueMap() - - // For the resource with a constraint, fudge the output to make the - // condition fail. - if value := m["value"].AsString(); value == "new-boop" { - m["output"] = cty.StringVal("") - } else { - m["output"] = cty.StringVal(fmt.Sprintf("new-%s", value)) - } - - resp.NewState = cty.ObjectVal(m) - return resp - } - state, diags := ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), "Resource postcondition failed: Output must not be blank."; got != want { - t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) - } - - // Resources a and b should still be recorded in state - wantResourceAttrs := map[string]struct{ value, output string }{ - "a": {"boop", "new-boop"}, - "b": {"new-boop", ""}, - } - for name, attrs := range wantResourceAttrs { - addr := mustResourceInstanceAddr(fmt.Sprintf("test_resource.%s", name)) - r := state.ResourceInstance(addr) - rd, err := r.Current.Decode(cty.Object(map[string]cty.Type{ - "value": cty.String, - "output": cty.String, - })) - if err != nil { - t.Fatalf("error decoding test_resource.a: %s", err) - } - want := cty.ObjectVal(map[string]cty.Value{ - "value": cty.StringVal(attrs.value), - "output": cty.StringVal(attrs.output), - }) - if !cmp.Equal(want, rd.Value, valueComparer) { - t.Errorf("wrong attrs for %s\n%s", addr, cmp.Diff(want, rd.Value, valueComparer)) - } - } - - // Resource c should not be in state - if state.ResourceInstance(mustResourceInstanceAddr("test_resource.c")) != nil { - t.Error("test_resource.c should not exist in state, but is") - } - }) -} - -func TestContext2Apply_outputValuePrecondition(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` - variable "input" { - type = string - } - - module "child" { - source = "./child" - - input = var.input - } - - output "result" { - value = module.child.result - - precondition { - condition = var.input != "" - error_message = "Input must not be empty." - } - } - `, - "child/main.tf": ` - variable "input" { - type = string - } - - output "result" { - value = var.input - - precondition { - condition = var.input != "" - error_message = "Input must not be empty." - } - } - `, - }) - - checkableObjects := []addrs.Checkable{ - addrs.OutputValue{Name: "result"}.Absolute(addrs.RootModuleInstance), - addrs.OutputValue{Name: "result"}.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), - } - - t.Run("pass", func(t *testing.T) { - ctx := testContext2(t, &ContextOpts{}) - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "input": &InputValue{ - Value: cty.StringVal("beep"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoDiagnostics(t, diags) - - for _, addr := range checkableObjects { - result := plan.Checks.GetObjectResult(addr) - if result == nil { - t.Fatalf("no check result for %s in the plan", addr) - } - if got, want := result.Status, checks.StatusPass; got != want { - t.Fatalf("wrong check status for %s during planning\ngot: %s\nwant: %s", addr, got, want) - } - } - - state, diags := ctx.Apply(plan, m) - assertNoDiagnostics(t, diags) - for _, addr := range checkableObjects { - result := state.CheckResults.GetObjectResult(addr) - if result == nil { - t.Fatalf("no check result for %s in the final state", addr) - } - if got, want := result.Status, checks.StatusPass; got != want { - t.Errorf("wrong check status for %s after apply\ngot: %s\nwant: %s", addr, got, want) - } - } - }) - - t.Run("fail", func(t *testing.T) { - // NOTE: This test actually catches a failure during planning and so - // cannot proceed to apply, so it's really more of a plan test - // than an apply test but better to keep all of these - // thematically-related test cases together. - ctx := testContext2(t, &ContextOpts{}) - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "input": &InputValue{ - Value: cty.StringVal(""), - SourceType: ValueFromCLIArg, - }, - }, - }) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } - - const wantSummary = "Module output value precondition failed" - found := false - for _, diag := range diags { - if diag.Severity() == tfdiags.Error && diag.Description().Summary == wantSummary { - found = true - break - } - } - - if !found { - t.Fatalf("missing expected error\nwant summary: %s\ngot: %s", wantSummary, diags.Err().Error()) - } - }) -} - -func TestContext2Apply_resourceConditionApplyTimeFail(t *testing.T) { - // This tests the less common situation where a condition fails due to - // a change in a resource other than the one the condition is attached to, - // and the condition result is unknown during planning. - // - // This edge case is a tricky one because it relies on Terraform still - // visiting test_resource.b (in the configuration below) to evaluate - // its conditions even though there aren't any changes directly planned - // for it, so that we can consider whether changes to test_resource.a - // have changed the outcome. - - m := testModuleInline(t, map[string]string{ - "main.tf": ` - variable "input" { - type = string - } - - resource "test_resource" "a" { - value = var.input - } - - resource "test_resource" "b" { - value = "beep" - - lifecycle { - postcondition { - condition = test_resource.a.output == self.output - error_message = "Outputs must match." - } - } - } - `, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "value": { - Type: cty.String, - Required: true, - }, - "output": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - // Whenever "value" changes, "output" follows it during the apply step, - // but is initially unknown during the plan step. - - m := req.ProposedNewState.AsValueMap() - priorVal := cty.NullVal(cty.String) - if !req.PriorState.IsNull() { - priorVal = req.PriorState.GetAttr("value") - } - if m["output"].IsNull() || !priorVal.RawEquals(m["value"]) { - m["output"] = cty.UnknownVal(cty.String) - } - - resp.PlannedState = cty.ObjectVal(m) - resp.LegacyTypeSystem = true - return resp - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - m := req.PlannedState.AsValueMap() - m["output"] = m["value"] - resp.NewState = cty.ObjectVal(m) - return resp - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - instA := mustResourceInstanceAddr("test_resource.a") - instB := mustResourceInstanceAddr("test_resource.b") - - // Preparation: an initial plan and apply with a correct input variable - // should succeed and give us a valid and complete state to use for the - // subsequent plan and apply that we'll expect to fail. - var prevRunState *states.State - { - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "input": &InputValue{ - Value: cty.StringVal("beep"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - planA := plan.Changes.ResourceInstance(instA) - if planA == nil || planA.Action != plans.Create { - t.Fatalf("incorrect initial plan for instance A\nwant a 'create' change\ngot: %s", spew.Sdump(planA)) - } - planB := plan.Changes.ResourceInstance(instB) - if planB == nil || planB.Action != plans.Create { - t.Fatalf("incorrect initial plan for instance B\nwant a 'create' change\ngot: %s", spew.Sdump(planB)) - } - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - stateA := state.ResourceInstance(instA) - if stateA == nil || stateA.Current == nil || !bytes.Contains(stateA.Current.AttrsJSON, []byte(`"beep"`)) { - t.Fatalf("incorrect initial state for instance A\ngot: %s", spew.Sdump(stateA)) - } - stateB := state.ResourceInstance(instB) - if stateB == nil || stateB.Current == nil || !bytes.Contains(stateB.Current.AttrsJSON, []byte(`"beep"`)) { - t.Fatalf("incorrect initial state for instance B\ngot: %s", spew.Sdump(stateB)) - } - prevRunState = state - } - - // Now we'll run another plan and apply with a different value for - // var.input that should cause the test_resource.b condition to be unknown - // during planning and then fail during apply. - { - plan, diags := ctx.Plan(m, prevRunState, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "input": &InputValue{ - Value: cty.StringVal("boop"), // NOTE: This has changed - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - planA := plan.Changes.ResourceInstance(instA) - if planA == nil || planA.Action != plans.Update { - t.Fatalf("incorrect initial plan for instance A\nwant an 'update' change\ngot: %s", spew.Sdump(planA)) - } - planB := plan.Changes.ResourceInstance(instB) - if planB == nil || planB.Action != plans.NoOp { - t.Fatalf("incorrect initial plan for instance B\nwant a 'no-op' change\ngot: %s", spew.Sdump(planB)) - } - - _, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("final apply succeeded, but should've failed with a postcondition error") - } - if len(diags) != 1 { - t.Fatalf("expected exactly one diagnostic, but got: %s", diags.Err().Error()) - } - if got, want := diags[0].Description().Summary, "Resource postcondition failed"; got != want { - t.Fatalf("wrong diagnostic summary\ngot: %s\nwant: %s", got, want) - } - } -} - -// pass an input through some expanded values, and back to a provider to make -// sure we can fully evaluate a provider configuration during a destroy plan. -func TestContext2Apply_destroyWithConfiguredProvider(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "in" { - type = map(string) - default = { - "a" = "first" - "b" = "second" - } -} - -module "mod" { - source = "./mod" - for_each = var.in - in = each.value -} - -locals { - config = [for each in module.mod : each.out] -} - -provider "other" { - output = [for each in module.mod : each.out] - local = local.config - var = var.in -} - -resource "other_object" "other" { -} -`, - "./mod/main.tf": ` -variable "in" { - type = string -} - -data "test_object" "d" { - test_string = var.in -} - -resource "test_object" "a" { - test_string = var.in -} - -output "out" { - value = data.test_object.d.output -} -`}) - - testProvider := &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "test_object": providers.Schema{Block: simpleTestSchema()}, - }, - DataSources: map[string]providers.Schema{ - "test_object": providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "test_string": { - Type: cty.String, - Optional: true, - }, - "output": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }, - }, - } - - testProvider.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - cfg := req.Config.AsValueMap() - s := cfg["test_string"].AsString() - if !strings.Contains("firstsecond", s) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("expected 'first' or 'second', got %s", s)) - return resp - } - - cfg["output"] = cty.StringVal(s + "-ok") - resp.State = cty.ObjectVal(cfg) - return resp - } - - otherProvider := &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "output": { - Type: cty.List(cty.String), - Optional: true, - }, - "local": { - Type: cty.List(cty.String), - Optional: true, - }, - "var": { - Type: cty.Map(cty.String), - Optional: true, - }, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "other_object": providers.Schema{Block: simpleTestSchema()}, - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(testProvider), - addrs.NewDefaultProvider("other"): testProviderFuncFixed(otherProvider), - }, - }) - - opts := SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables)) - plan, diags := ctx.Plan(m, states.NewState(), opts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // Resource changes which have dependencies across providers which - // themselves depend on resources can result in cycles. - // Because other_object transitively depends on the module resources - // through its provider, we trigger changes on both sides of this boundary - // to ensure we can create a valid plan. - // - // Taint the object to make sure a replacement works in the plan. - otherObjAddr := mustResourceInstanceAddr("other_object.other") - otherObj := state.ResourceInstance(otherObjAddr) - otherObj.Current.Status = states.ObjectTainted - // Force a change which needs to be reverted. - testObjAddr := mustResourceInstanceAddr(`module.mod["a"].test_object.a`) - testObjA := state.ResourceInstance(testObjAddr) - testObjA.Current.AttrsJSON = []byte(`{"test_bool":null,"test_list":null,"test_map":null,"test_number":null,"test_string":"changed"}`) - - _, diags = ctx.Plan(m, state, opts) - assertNoErrors(t, diags) - return - - otherProvider.ConfigureProviderCalled = false - otherProvider.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - // check that our config is complete, even during a destroy plan - expected := cty.ObjectVal(map[string]cty.Value{ - "local": cty.ListVal([]cty.Value{cty.StringVal("first-ok"), cty.StringVal("second-ok")}), - "output": cty.ListVal([]cty.Value{cty.StringVal("first-ok"), cty.StringVal("second-ok")}), - "var": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("first"), - "b": cty.StringVal("second"), - }), - }) - - if !req.Config.RawEquals(expected) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf( - `incorrect provider config: -expected: %#v -got: %#v`, - expected, req.Config)) - } - - return resp - } - - opts.Mode = plans.DestroyMode - // skip refresh so that we don't configure the provider before the destroy plan - opts.SkipRefresh = true - - // destroy only a single instance not included in the moved statements - _, diags = ctx.Plan(m, state, opts) - assertNoErrors(t, diags) - - if !otherProvider.ConfigureProviderCalled { - t.Fatal("failed to configure provider during destroy plan") - } -} - -// check that a provider can verify a planned destroy -func TestContext2Apply_plannedDestroy(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "x" { - test_string = "ok" -}`, - }) - - p := simpleMockProvider() - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - if !req.ProposedNewState.IsNull() { - // we should only be destroying in this test - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unexpected plan with %#v", req.ProposedNewState)) - return resp - } - - resp.PlannedState = req.ProposedNewState - // we're going to verify the destroy plan by inserting private data required for destroy - resp.PlannedPrivate = append(resp.PlannedPrivate, []byte("planned")...) - return resp - } - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - // if the value is nil, we return that directly to correspond to a delete - if !req.PlannedState.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unexpected apply with %#v", req.PlannedState)) - return resp - } - - resp.NewState = req.PlannedState - - // make sure we get our private data from the plan - private := string(req.PlannedPrivate) - if private != "planned" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("missing private data from plan, got %q", private)) - } - return resp - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.x").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"test_string":"ok"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - // we don't want to refresh, because that actually runs a normal plan - SkipRefresh: true, - }) - if diags.HasErrors() { - t.Fatalf("plan: %s", diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply: %s", diags.Err()) - } -} - -func TestContext2Apply_missingOrphanedResource(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -# changed resource address to create a new object -resource "test_object" "y" { - test_string = "y" -} -`, - }) - - p := simpleMockProvider() - - // report the prior value is missing - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - resp.NewState = cty.NullVal(req.PriorState.Type()) - return resp - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.x").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"test_string":"x"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - opts := SimplePlanOpts(plans.NormalMode, nil) - plan, diags := ctx.Plan(m, state, opts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} - -// Outputs should not cause evaluation errors during destroy -// Check eval from both root level outputs and module outputs, which are -// handled differently during apply. -func TestContext2Apply_outputsNotToEvaluate(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - source = "./mod" - cond = false -} - -output "from_resource" { - value = module.mod.from_resource -} - -output "from_data" { - value = module.mod.from_data -} -`, - - "./mod/main.tf": ` -variable "cond" { - type = bool -} - -module "mod" { - source = "../mod2/" - cond = var.cond -} - -output "from_resource" { - value = module.mod.resource -} - -output "from_data" { - value = module.mod.data -} -`, - - "./mod2/main.tf": ` -variable "cond" { - type = bool -} - -resource "test_object" "x" { - count = var.cond ? 0:1 -} - -data "test_object" "d" { - count = var.cond ? 0:1 -} - -output "resource" { - value = var.cond ? null : test_object.x.*.test_string[0] -} - -output "data" { - value = one(data.test_object.d[*].test_string) -} -`}) - - p := simpleMockProvider() - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - resp.State = req.Config - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - // apply the state - opts := SimplePlanOpts(plans.NormalMode, nil) - plan, diags := ctx.Plan(m, states.NewState(), opts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // and destroy - opts = SimplePlanOpts(plans.DestroyMode, nil) - plan, diags = ctx.Plan(m, state, opts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // and destroy again with no state - if !state.Empty() { - t.Fatal("expected empty state, got", state) - } - - opts = SimplePlanOpts(plans.DestroyMode, nil) - plan, diags = ctx.Plan(m, state, opts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} - -// don't evaluate conditions on outputs when destroying -func TestContext2Apply_noOutputChecksOnDestroy(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - source = "./mod" -} - -output "from_resource" { - value = module.mod.from_resource -} -`, - - "./mod/main.tf": ` -resource "test_object" "x" { - test_string = "wrong val" -} - -output "from_resource" { - value = test_object.x.test_string - precondition { - condition = test_object.x.test_string == "ok" - error_message = "resource error" - } -} -`}) - - p := simpleMockProvider() - - state := states.NewState() - mod := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey)) - mod.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.x").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"test_string":"wrong_val"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - opts := SimplePlanOpts(plans.DestroyMode, nil) - plan, diags := ctx.Plan(m, state, opts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} - -// -refresh-only should update checks -func TestContext2Apply_refreshApplyUpdatesChecks(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "x" { - test_string = "ok" - lifecycle { - postcondition { - condition = self.test_string == "ok" - error_message = "wrong val" - } - } -} - -output "from_resource" { - value = test_object.x.test_string - precondition { - condition = test_object.x.test_string == "ok" - error_message = "wrong val" - } -} -`}) - - p := simpleMockProvider() - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "test_string": cty.StringVal("ok"), - }), - } - - state := states.NewState() - mod := state.EnsureModule(addrs.RootModuleInstance) - mod.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.x").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"test_string":"wrong val"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - mod.SetOutputValue("from_resource", cty.StringVal("wrong val"), false) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - opts := SimplePlanOpts(plans.RefreshOnlyMode, nil) - plan, diags := ctx.Plan(m, state, opts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) - - resCheck := state.CheckResults.GetObjectResult(mustResourceInstanceAddr("test_object.x")) - if resCheck.Status != checks.StatusPass { - t.Fatalf("unexpected check %s: %s\n", resCheck.Status, resCheck.FailureMessages) - } - - outAddr := addrs.AbsOutputValue{ - Module: addrs.RootModuleInstance, - OutputValue: addrs.OutputValue{ - Name: "from_resource", - }, - } - outCheck := state.CheckResults.GetObjectResult(outAddr) - if outCheck.Status != checks.StatusPass { - t.Fatalf("unexpected check %s: %s\n", outCheck.Status, outCheck.FailureMessages) - } -} - -// NoOp changes may have conditions to evaluate, but should not re-plan and -// apply the entire resource. -func TestContext2Apply_noRePlanNoOp(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "x" { -} - -resource "test_object" "y" { - # test_object.w is being re-created, so this precondition must be evaluated - # during apply, however this resource should otherwise be a NoOp. - lifecycle { - precondition { - condition = test_object.x.test_string == null - error_message = "test_object.x.test_string should be null" - } - } -} -`}) - - p := simpleMockProvider() - // make sure we can compute the attr - testString := p.GetProviderSchemaResponse.ResourceTypes["test_object"].Block.Attributes["test_string"] - testString.Computed = true - testString.Optional = false - - yAddr := mustResourceInstanceAddr("test_object.y") - - state := states.NewState() - mod := state.RootModule() - mod.SetResourceInstanceCurrent( - yAddr.Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"test_string":"y"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - opts := SimplePlanOpts(plans.NormalMode, nil) - plan, diags := ctx.Plan(m, state, opts) - assertNoErrors(t, diags) - - for _, c := range plan.Changes.Resources { - if c.Addr.Equal(yAddr) && c.Action != plans.NoOp { - t.Fatalf("unexpected %s change for test_object.y", c.Action) - } - } - - // test_object.y is a NoOp change from the plan, but is included in the - // graph due to the conditions which must be evaluated. This however should - // not cause the resource to be re-planned. - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - testString := req.ProposedNewState.GetAttr("test_string") - if !testString.IsNull() && testString.AsString() == "y" { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("Unexpected apply-time plan for test_object.y. Original plan was a NoOp")) - } - resp.PlannedState = req.ProposedNewState - return resp - } - - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} - -// ensure all references from preconditions are tracked through plan and apply -func TestContext2Apply_preconditionErrorMessageRef(t *testing.T) { - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "nested" { - source = "./mod" -} - -output "nested_a" { - value = module.nested.a -} -`, - - "mod/main.tf": ` -variable "boop" { - default = "boop" -} - -variable "msg" { - default = "Incorrect boop." -} - -output "a" { - value = "x" - - precondition { - condition = var.boop == "boop" - error_message = var.msg - } -} -`, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - }) - assertNoErrors(t, diags) - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} - -func TestContext2Apply_destroyNullModuleOutput(t *testing.T) { - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "null_module" { - source = "./mod" -} - -locals { - module_output = module.null_module.null_module_test -} - -output "test_root" { - value = module.null_module.test_output -} - -output "root_module" { - value = local.module_output #fails -} -`, - - "mod/main.tf": ` -output "test_output" { - value = "test" -} - -output "null_module_test" { - value = null -} -`, - }) - - // verify plan and apply - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - }) - assertNoErrors(t, diags) - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // now destroy - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} - -func TestContext2Apply_moduleOutputWithSensitiveAttrs(t *testing.T) { - // Ensure that nested sensitive marks are stored when accessing non-root - // module outputs, and that they do not cause the entire output value to - // become sensitive. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - source = "./mod" -} - -resource "test_resource" "b" { - // if the module output were wholly sensitive it would not be valid to use in - // for_each - for_each = module.mod.resources - value = each.value.output -} - -output "root_output" { - // The root output cannot contain any sensitive marks at all. - // Applying nonsensitive would fail here if the nested sensitive mark were - // not maintained through the output. - value = [ for k, v in module.mod.resources : nonsensitive(v.output) ] -} -`, - "./mod/main.tf": ` -resource "test_resource" "a" { - for_each = {"key": "value"} - value = each.key -} - -output "resources" { - value = test_resource.a -} -`, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "value": { - Type: cty.String, - Required: true, - }, - "output": { - Type: cty.String, - Sensitive: true, - Computed: true, - }, - }, - }, - }, - }) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - }) - assertNoErrors(t, diags) - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} diff --git a/internal/terraform/context_apply_test.go b/internal/terraform/context_apply_test.go deleted file mode 100644 index 4f16453310ee..000000000000 --- a/internal/terraform/context_apply_test.go +++ /dev/null @@ -1,12632 +0,0 @@ -package terraform - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "log" - "reflect" - "runtime" - "sort" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/go-test/deep" - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestContext2Apply_basic(t *testing.T) { - m := testModule(t, "apply-good") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) < 2 { - t.Fatalf("bad: %#v", mod.Resources) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_unstable(t *testing.T) { - // This tests behavior when the configuration contains an unstable value, - // such as the result of uuid() or timestamp(), where each call produces - // a different result. - // - // This is an important case to test because we need to ensure that - // we don't re-call the function during the apply phase: the value should - // be fixed during plan - - m := testModule(t, "apply-unstable") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected error during Plan: %s", diags.Err()) - } - - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - schema := p.GetProviderSchemaResponse.ResourceTypes["test_resource"].Block - rds := plan.Changes.ResourceInstance(addr) - rd, err := rds.Decode(schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - if rd.After.GetAttr("random").IsKnown() { - t.Fatalf("Attribute 'random' has known value %#v; should be unknown in plan", rd.After.GetAttr("random")) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("unexpected error during Apply: %s", diags.Err()) - } - - mod := state.Module(addr.Module) - rss := state.ResourceInstance(addr) - - if len(mod.Resources) != 1 { - t.Fatalf("wrong number of resources %d; want 1", len(mod.Resources)) - } - - rs, err := rss.Current.Decode(schema.ImpliedType()) - if err != nil { - t.Fatalf("decode error: %v", err) - } - got := rs.Value.GetAttr("random") - if !got.IsKnown() { - t.Fatalf("random is still unknown after apply") - } - if got, want := len(got.AsString()), 36; got != want { - t.Fatalf("random string has wrong length %d; want %d", got, want) - } -} - -func TestContext2Apply_escape(t *testing.T) { - m := testModule(t, "apply-escape") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = "bar" - type = aws_instance -`) -} - -func TestContext2Apply_resourceCountOneList(t *testing.T) { - m := testModule(t, "apply-resource-count-one-list") - p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoDiagnostics(t, diags) - - got := strings.TrimSpace(state.String()) - want := strings.TrimSpace(`null_resource.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/null"] - -Outputs: - -test = [foo]`) - if got != want { - t.Fatalf("got:\n%s\n\nwant:\n%s\n", got, want) - } -} -func TestContext2Apply_resourceCountZeroList(t *testing.T) { - m := testModule(t, "apply-resource-count-zero-list") - p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - got := strings.TrimSpace(state.String()) - want := strings.TrimSpace(` -Outputs: - -test = []`) - if got != want { - t.Fatalf("wrong state\n\ngot:\n%s\n\nwant:\n%s\n", got, want) - } -} - -func TestContext2Apply_resourceDependsOnModule(t *testing.T) { - m := testModule(t, "apply-resource-depends-on-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - // verify the apply happens in the correct order - var mu sync.Mutex - var order []string - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - ami := req.PlannedState.GetAttr("ami").AsString() - switch ami { - case "child": - - // make the child slower than the parent - time.Sleep(50 * time.Millisecond) - - mu.Lock() - order = append(order, "child") - mu.Unlock() - case "parent": - mu.Lock() - order = append(order, "parent") - mu.Unlock() - } - - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if !reflect.DeepEqual(order, []string{"child", "parent"}) { - t.Fatal("resources applied out of order") - } - - checkStateString(t, state, testTerraformApplyResourceDependsOnModuleStr) -} - -// Test that without a config, the Dependencies in the state are enough -// to maintain proper ordering. -func TestContext2Apply_resourceDependsOnModuleStateOnly(t *testing.T) { - m := testModule(t, "apply-resource-depends-on-module-empty") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"parent"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.child.aws_instance.child")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.child").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"child"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - { - // verify the apply happens in the correct order - var mu sync.Mutex - var order []string - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - id := req.PriorState.GetAttr("id") - if id.IsKnown() && id.AsString() == "parent" { - // make the dep slower than the parent - time.Sleep(50 * time.Millisecond) - - mu.Lock() - order = append(order, "child") - mu.Unlock() - } else { - mu.Lock() - order = append(order, "parent") - mu.Unlock() - } - - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - if !reflect.DeepEqual(order, []string{"child", "parent"}) { - t.Fatal("resources applied out of order") - } - - checkStateString(t, state, "") - } -} - -func TestContext2Apply_resourceDependsOnModuleDestroy(t *testing.T) { - m := testModule(t, "apply-resource-depends-on-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - var globalState *states.State - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - globalState = state - } - - { - // Wait for the dependency, sleep, and verify the graph never - // called a child. - var called int32 - var checked bool - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - ami := req.PriorState.GetAttr("ami").AsString() - if ami == "parent" { - checked = true - - // Sleep to allow parallel execution - time.Sleep(50 * time.Millisecond) - - // Verify that called is 0 (dep not called) - if atomic.LoadInt32(&called) != 0 { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("module child should not be called")) - return resp - } - } - - atomic.AddInt32(&called, 1) - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, globalState, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if !checked { - t.Fatal("should check") - } - - checkStateString(t, state, ``) - } -} - -func TestContext2Apply_resourceDependsOnModuleGrandchild(t *testing.T) { - m := testModule(t, "apply-resource-depends-on-module-deep") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - { - // Wait for the dependency, sleep, and verify the graph never - // called a child. - var called int32 - var checked bool - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - planned := req.PlannedState.AsValueMap() - if ami, ok := planned["ami"]; ok && ami.AsString() == "grandchild" { - checked = true - - // Sleep to allow parallel execution - time.Sleep(50 * time.Millisecond) - - // Verify that called is 0 (dep not called) - if atomic.LoadInt32(&called) != 0 { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("aws_instance.a should not be called")) - return resp - } - } - - atomic.AddInt32(&called, 1) - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if !checked { - t.Fatal("should check") - } - - checkStateString(t, state, testTerraformApplyResourceDependsOnModuleDeepStr) - } -} - -func TestContext2Apply_resourceDependsOnModuleInModule(t *testing.T) { - m := testModule(t, "apply-resource-depends-on-module-in-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - { - // Wait for the dependency, sleep, and verify the graph never - // called a child. - var called int32 - var checked bool - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - planned := req.PlannedState.AsValueMap() - if ami, ok := planned["ami"]; ok && ami.AsString() == "grandchild" { - checked = true - - // Sleep to allow parallel execution - time.Sleep(50 * time.Millisecond) - - // Verify that called is 0 (dep not called) - if atomic.LoadInt32(&called) != 0 { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("something else was applied before grandchild; grandchild should be first")) - return resp - } - } - - atomic.AddInt32(&called, 1) - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if !checked { - t.Fatal("should check") - } - - checkStateString(t, state, testTerraformApplyResourceDependsOnModuleInModuleStr) - } -} - -func TestContext2Apply_mapVarBetweenModules(t *testing.T) { - m := testModule(t, "apply-map-var-through-module") - p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(` -Outputs: - -amis_from_module = {eu-west-1:ami-789012 eu-west-2:ami-989484 us-west-1:ami-123456 us-west-2:ami-456789 } - -module.test: - null_resource.noop: - ID = foo - provider = provider["registry.terraform.io/hashicorp/null"] - - Outputs: - - amis_out = {eu-west-1:ami-789012 eu-west-2:ami-989484 us-west-1:ami-123456 us-west-2:ami-456789 }`) - if actual != expected { - t.Fatalf("expected: \n%s\n\ngot: \n%s\n", expected, actual) - } -} - -func TestContext2Apply_refCount(t *testing.T) { - m := testModule(t, "apply-ref-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) < 2 { - t.Fatalf("bad: %#v", mod.Resources) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyRefCountStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_providerAlias(t *testing.T) { - m := testModule(t, "apply-provider-alias") - - // Each provider instance must be completely independent to ensure that we - // are verifying the correct state of each. - p := func() (providers.Interface, error) { - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - return p, nil - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): p, - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) < 2 { - t.Fatalf("bad: %#v", mod.Resources) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProviderAliasStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -// Two providers that are configured should both be configured prior to apply -func TestContext2Apply_providerAliasConfigure(t *testing.T) { - m := testModule(t, "apply-provider-alias-configure") - - // Each provider instance must be completely independent to ensure that we - // are verifying the correct state of each. - p := func() (providers.Interface, error) { - p := testProvider("another") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - return p, nil - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("another"): p, - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - // Configure to record calls AFTER Plan above - var configCount int32 - p = func() (providers.Interface, error) { - p := testProvider("another") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - atomic.AddInt32(&configCount, 1) - - foo := req.Config.GetAttr("foo").AsString() - if foo != "bar" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("foo: %#v", foo)) - } - - return - } - return p, nil - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("another"): p, - }, - }) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if configCount != 2 { - t.Fatalf("provider config expected 2 calls, got: %d", configCount) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProviderAliasConfigStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -// GH-2870 -func TestContext2Apply_providerWarning(t *testing.T) { - m := testModule(t, "apply-provider-warning") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("just a warning")) - return - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - `) - if actual != expected { - t.Fatalf("got: \n%s\n\nexpected:\n%s", actual, expected) - } - - if !p.ConfigureProviderCalled { - t.Fatalf("provider Configure() was never called!") - } -} - -func TestContext2Apply_emptyModule(t *testing.T) { - // A module with only outputs (no resources) - m := testModule(t, "apply-empty-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - actual = strings.Replace(actual, " ", "", -1) - expected := strings.TrimSpace(testTerraformApplyEmptyModuleStr) - if actual != expected { - t.Fatalf("bad: \n%s\nexpect:\n%s", actual, expected) - } -} - -func TestContext2Apply_createBeforeDestroy(t *testing.T) { - m := testModule(t, "apply-good-create-before") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar", "require_new": "abc"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if got, want := len(mod.Resources), 1; got != want { - t.Logf("state:\n%s", state) - t.Fatalf("wrong number of resources %d; want %d", got, want) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyCreateBeforeStr) - if actual != expected { - t.Fatalf("expected:\n%s\ngot:\n%s", expected, actual) - } -} - -func TestContext2Apply_createBeforeDestroyUpdate(t *testing.T) { - m := testModule(t, "apply-good-create-before-update") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - // signal that resource foo has started applying - fooChan := make(chan struct{}) - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - id := req.PriorState.GetAttr("id").AsString() - switch id { - case "bar": - select { - case <-fooChan: - resp.Diagnostics = resp.Diagnostics.Append(errors.New("bar must be updated before foo is destroyed")) - return resp - case <-time.After(100 * time.Millisecond): - // wait a moment to ensure that foo is not going to be destroyed first - } - case "foo": - close(fooChan) - } - - return testApplyFn(req) - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - fooAddr := mustResourceInstanceAddr("aws_instance.foo") - root.SetResourceInstanceCurrent( - fooAddr.Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo","foo":"bar"}`), - CreateBeforeDestroy: true, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo":"bar"}`), - CreateBeforeDestroy: true, - Dependencies: []addrs.ConfigResource{fooAddr.ContainingResource().Config()}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) != 1 { - t.Fatalf("bad: %s", state) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyCreateBeforeUpdateStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -// This tests that when a CBD resource depends on a non-CBD resource, -// we can still properly apply changes that require new for both. -func TestContext2Apply_createBeforeDestroy_dependsNonCBD(t *testing.T) { - m := testModule(t, "apply-cbd-depends-non-cbd") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar", "require_new": "abc"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo", "require_new": "abc"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = yes - type = aws_instance - value = foo - - Dependencies: - aws_instance.foo -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = yes - type = aws_instance - `) -} - -func TestContext2Apply_createBeforeDestroy_hook(t *testing.T) { - h := new(MockHook) - m := testModule(t, "apply-good-create-before") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar", "require_new": "abc"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - var actual []cty.Value - var actualLock sync.Mutex - h.PostApplyFn = func(addr addrs.AbsResourceInstance, gen states.Generation, sv cty.Value, e error) (HookAction, error) { - actualLock.Lock() - - defer actualLock.Unlock() - actual = append(actual, sv) - return HookActionContinue, nil - } - - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - expected := []cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - "require_new": cty.StringVal("xyz"), - "type": cty.StringVal("aws_instance"), - }), - cty.NullVal(cty.DynamicPseudoType), - } - - cmpOpt := cmp.Transformer("ctyshim", hcl2shim.ConfigValueFromHCL2) - if !cmp.Equal(actual, expected, cmpOpt) { - t.Fatalf("wrong state snapshot sequence\n%s", cmp.Diff(expected, actual, cmpOpt)) - } -} - -// Test that we can perform an apply with CBD in a count with deposed instances. -func TestContext2Apply_createBeforeDestroy_deposedCount(t *testing.T) { - m := testModule(t, "apply-cbd-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceDeposed( - mustResourceInstanceAddr("aws_instance.bar[0]").Resource, - states.NewDeposedKey(), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceDeposed( - mustResourceInstanceAddr("aws_instance.bar[1]").Resource, - states.NewDeposedKey(), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.bar.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.bar.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance - `) -} - -// Test that when we have a deposed instance but a good primary, we still -// destroy the deposed instance. -func TestContext2Apply_createBeforeDestroy_deposedOnly(t *testing.T) { - m := testModule(t, "apply-cbd-deposed-only") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceDeposed( - mustResourceInstanceAddr("aws_instance.bar").Resource, - states.NewDeposedKey(), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.bar: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - `) -} - -func TestContext2Apply_destroyComputed(t *testing.T) { - m := testModule(t, "apply-destroy-computed") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo", "output": "value"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("plan failed") - } else { - t.Logf("plan:\n\n%s", legacyDiffComparisonString(plan.Changes)) - } - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("apply failed") - } -} - -// Test that the destroy operation uses depends_on as a source of ordering. -func TestContext2Apply_destroyDependsOn(t *testing.T) { - // It is possible for this to be racy, so we loop a number of times - // just to check. - for i := 0; i < 10; i++ { - testContext2Apply_destroyDependsOn(t) - } -} - -func testContext2Apply_destroyDependsOn(t *testing.T) { - m := testModule(t, "apply-destroy-depends-on") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.bar")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - // Record the order we see Apply - var actual []string - var actualLock sync.Mutex - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - actualLock.Lock() - defer actualLock.Unlock() - id := req.PriorState.GetAttr("id").AsString() - actual = append(actual, id) - - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Parallelism: 1, // To check ordering - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - expected := []string{"foo", "bar"} - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong order\ngot: %#v\nwant: %#v", actual, expected) - } -} - -// Test that destroy ordering is correct with dependencies only -// in the state. -func TestContext2Apply_destroyDependsOnStateOnly(t *testing.T) { - newState := states.NewState() - root := newState.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - Dependencies: []addrs.ConfigResource{}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "bar", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }, - Module: root.Addr.Module(), - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - // It is possible for this to be racy, so we loop a number of times - // just to check. - for i := 0; i < 10; i++ { - t.Run("new", func(t *testing.T) { - testContext2Apply_destroyDependsOnStateOnly(t, newState) - }) - } -} - -func testContext2Apply_destroyDependsOnStateOnly(t *testing.T, state *states.State) { - state = state.DeepCopy() - m := testModule(t, "empty") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - // Record the order we see Apply - var actual []string - var actualLock sync.Mutex - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - actualLock.Lock() - defer actualLock.Unlock() - id := req.PriorState.GetAttr("id").AsString() - actual = append(actual, id) - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Parallelism: 1, // To check ordering - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - expected := []string{"bar", "foo"} - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong order\ngot: %#v\nwant: %#v", actual, expected) - } -} - -// Test that destroy ordering is correct with dependencies only -// in the state within a module (GH-11749) -func TestContext2Apply_destroyDependsOnStateOnlyModule(t *testing.T) { - newState := states.NewState() - child := newState.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - Dependencies: []addrs.ConfigResource{}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - child.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "bar", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }, - Module: child.Addr.Module(), - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - // It is possible for this to be racy, so we loop a number of times - // just to check. - for i := 0; i < 10; i++ { - t.Run("new", func(t *testing.T) { - testContext2Apply_destroyDependsOnStateOnlyModule(t, newState) - }) - } -} - -func testContext2Apply_destroyDependsOnStateOnlyModule(t *testing.T, state *states.State) { - state = state.DeepCopy() - m := testModule(t, "empty") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - // Record the order we see Apply - var actual []string - var actualLock sync.Mutex - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - actualLock.Lock() - defer actualLock.Unlock() - id := req.PriorState.GetAttr("id").AsString() - actual = append(actual, id) - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Parallelism: 1, // To check ordering - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - expected := []string{"bar", "foo"} - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong order\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func TestContext2Apply_dataBasic(t *testing.T) { - m := testModule(t, "apply-data-basic") - p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yo"), - "foo": cty.NullVal(cty.String), - }), - } - - hook := new(MockHook) - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{hook}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyDataBasicStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - if !hook.PreApplyCalled { - t.Fatal("PreApply not called for data source read") - } - if !hook.PostApplyCalled { - t.Fatal("PostApply not called for data source read") - } -} - -func TestContext2Apply_destroyData(t *testing.T) { - m := testModule(t, "apply-destroy-data-resource") - p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: req.Config, - } - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("data.null_data_source.testing").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"-"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/null"]`), - ) - - hook := &testHook{} - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - Hooks: []Hook{hook}, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - newState, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if got := len(newState.Modules); got != 1 { - t.Fatalf("state has %d modules after destroy; want 1", got) - } - - if got := len(newState.RootModule().Resources); got != 0 { - t.Fatalf("state has %d resources after destroy; want 0", got) - } - - wantHookCalls := []*testHookCall{ - {"PreApply", "data.null_data_source.testing"}, - {"PostApply", "data.null_data_source.testing"}, - {"PostStateUpdate", ""}, - } - if !reflect.DeepEqual(hook.Calls, wantHookCalls) { - t.Errorf("wrong hook calls\ngot: %swant: %s", spew.Sdump(hook.Calls), spew.Sdump(wantHookCalls)) - } -} - -// https://github.com/hashicorp/terraform/pull/5096 -func TestContext2Apply_destroySkipsCBD(t *testing.T) { - // Config contains CBD resource depending on non-CBD resource, which triggers - // a cycle if they are both replaced, but should _not_ trigger a cycle when - // just doing a `terraform destroy`. - m := testModule(t, "apply-destroy-cbd") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_destroyModuleVarProviderConfig(t *testing.T) { - m := testModule(t, "apply-destroy-mod-var-provider-config") - p := func() (providers.Interface, error) { - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - return p, nil - } - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): p, - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } -} - -func TestContext2Apply_destroyCrossProviders(t *testing.T) { - m := testModule(t, "apply-destroy-cross-providers") - - p_aws := testProvider("aws") - p_aws.ApplyResourceChangeFn = testApplyFn - p_aws.PlanResourceChangeFn = testDiffFn - p_aws.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - "aws_vpc": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "value": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }) - - providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p_aws), - } - - ctx, m, state := getContextForApply_destroyCrossProviders(t, m, providers) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("apply failed") - } -} - -func getContextForApply_destroyCrossProviders(t *testing.T, m *configs.Config, providerFactories map[addrs.Provider]providers.Factory) (*Context, *configs.Config, *states.State) { - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.shared").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"test"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_vpc.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id": "vpc-aaabbb12", "value":"test"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: providerFactories, - }) - - return ctx, m, state -} - -func TestContext2Apply_minimal(t *testing.T) { - m := testModule(t, "apply-minimal") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyMinimalStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_cancel(t *testing.T) { - stopped := false - - m := testModule(t, "apply-cancel") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - if !stopped { - stopped = true - go ctx.Stop() - - for { - if ctx.sh.Stopped() { - break - } - time.Sleep(10 * time.Millisecond) - } - } - return testApplyFn(req) - } - p.PlanResourceChangeFn = testDiffFn - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - // Start the Apply in a goroutine - var applyDiags tfdiags.Diagnostics - stateCh := make(chan *states.State) - go func() { - state, diags := ctx.Apply(plan, m) - applyDiags = diags - - stateCh <- state - }() - - state := <-stateCh - // only expecting an early exit error - if !applyDiags.HasErrors() { - t.Fatal("expected early exit error") - } - - for _, d := range applyDiags { - desc := d.Description() - if desc.Summary != "execution halted" { - t.Fatalf("unexpected error: %v", applyDiags.Err()) - } - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyCancelStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - if !p.StopCalled { - t.Fatal("stop should be called") - } -} - -func TestContext2Apply_cancelBlock(t *testing.T) { - m := testModule(t, "apply-cancel-block") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - applyCh := make(chan struct{}) - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - close(applyCh) - - for !ctx.sh.Stopped() { - // Wait for stop to be called. We call Gosched here so that - // the other goroutines can always be scheduled to set Stopped. - runtime.Gosched() - } - - // Sleep - time.Sleep(100 * time.Millisecond) - return testApplyFn(req) - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - // Start the Apply in a goroutine - var applyDiags tfdiags.Diagnostics - stateCh := make(chan *states.State) - go func() { - state, diags := ctx.Apply(plan, m) - applyDiags = diags - - stateCh <- state - }() - - stopDone := make(chan struct{}) - go func() { - defer close(stopDone) - <-applyCh - ctx.Stop() - }() - - // Make sure that stop blocks - select { - case <-stopDone: - t.Fatal("stop should block") - case <-time.After(10 * time.Millisecond): - } - - // Wait for stop - select { - case <-stopDone: - case <-time.After(500 * time.Millisecond): - t.Fatal("stop should be done") - } - - // Wait for apply to complete - state := <-stateCh - // only expecting an early exit error - if !applyDiags.HasErrors() { - t.Fatal("expected early exit error") - } - - for _, d := range applyDiags { - desc := d.Description() - if desc.Summary != "execution halted" { - t.Fatalf("unexpected error: %v", applyDiags.Err()) - } - } - - checkStateString(t, state, ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - `) -} - -func TestContext2Apply_cancelProvisioner(t *testing.T) { - m := testModule(t, "apply-cancel-provisioner") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - pr := testProvisioner() - pr.GetSchemaResponse = provisioners.GetSchemaResponse{ - Provisioner: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - prStopped := make(chan struct{}) - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - // Start the stop process - go ctx.Stop() - - <-prStopped - return - } - pr.StopFn = func() error { - close(prStopped) - return nil - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - // Start the Apply in a goroutine - var applyDiags tfdiags.Diagnostics - stateCh := make(chan *states.State) - go func() { - state, diags := ctx.Apply(plan, m) - applyDiags = diags - - stateCh <- state - }() - - // Wait for completion - state := <-stateCh - - // we are expecting only an early exit error - if !applyDiags.HasErrors() { - t.Fatal("expected early exit error") - } - - for _, d := range applyDiags { - desc := d.Description() - if desc.Summary != "execution halted" { - t.Fatalf("unexpected error: %v", applyDiags.Err()) - } - } - - checkStateString(t, state, ` -aws_instance.foo: (tainted) - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - `) - - if !pr.StopCalled { - t.Fatal("stop should be called") - } -} - -func TestContext2Apply_compute(t *testing.T) { - m := testModule(t, "apply-compute") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "num": { - Type: cty.Number, - Optional: true, - }, - "compute": { - Type: cty.String, - Optional: true, - }, - "compute_value": { - Type: cty.String, - Optional: true, - }, - "foo": { - Type: cty.String, - Optional: true, - }, - "id": { - Type: cty.String, - Computed: true, - }, - "type": { - Type: cty.String, - Computed: true, - }, - "value": { // Populated from compute_value because compute = "value" in the config fixture - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - SetVariables: InputValues{ - "value": &InputValue{ - Value: cty.NumberIntVal(1), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyComputeStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_countDecrease(t *testing.T) { - m := testModule(t, "apply-count-dec") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo": "foo","type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo": "foo","type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[2]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar", "foo": "foo", "type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformApplyCountDecStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_countDecreaseToOneX(t *testing.T) { - m := testModule(t, "apply-count-dec-one") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar", "foo": "foo", "type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[2]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformApplyCountDecToOneStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -// https://github.com/PeoplePerHour/terraform/pull/11 -// -// This tests a rare but possible situation where we have both a no-key and -// a zero-key instance of the same resource in the configuration when we -// disable count. -// -// The main way to get here is for a provider to fail to destroy the zero-key -// instance but succeed in creating the no-key instance, since those two -// can typically happen concurrently. There are various other ways to get here -// that might be considered user error, such as using "terraform state mv" -// to create a strange combination of different key types on the same resource. -// -// This test indirectly exercises an intentional interaction between -// refactoring.ImpliedMoveStatements and refactoring.ApplyMoves: we'll first -// generate an implied move statement from aws_instance.foo[0] to -// aws_instance.foo, but then refactoring.ApplyMoves should notice that and -// ignore the statement, in the same way as it would if an explicit move -// statement specified the same situation. -func TestContext2Apply_countDecreaseToOneCorrupted(t *testing.T) { - m := testModule(t, "apply-count-dec-one") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar", "foo": "foo", "type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz", "type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - { - got := strings.TrimSpace(legacyPlanComparisonString(state, plan.Changes)) - want := strings.TrimSpace(testTerraformApplyCountDecToOneCorruptedPlanStr) - if got != want { - t.Fatalf("wrong plan result\ngot:\n%s\nwant:\n%s", got, want) - } - } - { - change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("aws_instance.foo[0]")) - if change == nil { - t.Fatalf("no planned change for instance zero") - } - if got, want := change.Action, plans.Delete; got != want { - t.Errorf("wrong action for instance zero %s; want %s", got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseWrongRepetition; got != want { - t.Errorf("wrong action reason for instance zero %s; want %s", got, want) - } - } - { - change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("aws_instance.foo")) - if change == nil { - t.Fatalf("no planned change for no-key instance") - } - if got, want := change.Action, plans.NoOp; got != want { - t.Errorf("wrong action for no-key instance %s; want %s", got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason for no-key instance %s; want %s", got, want) - } - } - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformApplyCountDecToOneCorruptedStr) - if actual != expected { - t.Fatalf("wrong final state\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_countTainted(t *testing.T) { - m := testModule(t, "apply-count-tainted") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"bar", "type": "aws_instance", "foo": "foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - { - got := strings.TrimSpace(legacyDiffComparisonString(plan.Changes)) - want := strings.TrimSpace(` -DESTROY/CREATE: aws_instance.foo[0] - foo: "foo" => "foo" - id: "bar" => "" - type: "aws_instance" => "" -CREATE: aws_instance.foo[1] - foo: "" => "foo" - id: "" => "" - type: "" => "" -`) - if got != want { - t.Fatalf("wrong plan\n\ngot:\n%s\n\nwant:\n%s", got, want) - } - } - - s, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - got := strings.TrimSpace(s.String()) - want := strings.TrimSpace(` -aws_instance.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -aws_instance.foo.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -`) - if got != want { - t.Fatalf("wrong final state\n\ngot:\n%s\n\nwant:\n%s", got, want) - } -} - -func TestContext2Apply_countVariable(t *testing.T) { - m := testModule(t, "apply-count-variable") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyCountVariableStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_countVariableRef(t *testing.T) { - m := testModule(t, "apply-count-variable-ref") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyCountVariableRefStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_provisionerInterpCount(t *testing.T) { - // This test ensures that a provisioner can interpolate a resource count - // even though the provisioner expression is evaluated during the plan - // walk. https://github.com/hashicorp/terraform/issues/16840 - - m, snap := testModuleWithSnapshot(t, "apply-provisioner-interp-count") - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - pr := testProvisioner() - - Providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - } - - provisioners := map[string]provisioners.Factory{ - "local-exec": testProvisionerFuncFixed(pr), - } - ctx := testContext2(t, &ContextOpts{ - Providers: Providers, - Provisioners: provisioners, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - // We'll marshal and unmarshal the plan here, to ensure that we have - // a clean new context as would be created if we separately ran - // terraform plan -out=tfplan && terraform apply tfplan - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatal(err) - } - ctxOpts.Providers = Providers - ctxOpts.Provisioners = provisioners - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("failed to create context for plan: %s", diags.Err()) - } - - // Applying the plan should now succeed - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply failed unexpectedly: %s", diags.Err()) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner was not called") - } -} - -func TestContext2Apply_foreachVariable(t *testing.T) { - m := testModule(t, "plan-for-each-unknown-value") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("hello"), - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyForEachVariableStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_moduleBasic(t *testing.T) { - m := testModule(t, "apply-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyModuleStr) - if actual != expected { - t.Fatalf("bad, expected:\n%s\n\nactual:\n%s", expected, actual) - } -} - -func TestContext2Apply_moduleDestroyOrder(t *testing.T) { - m := testModule(t, "apply-module-destroy-order") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - // Create a custom apply function to track the order they were destroyed - var order []string - var orderLock sync.Mutex - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - id := req.PriorState.GetAttr("id").AsString() - - if id == "b" { - // Pause briefly to make any race conditions more visible, since - // missing edges here can cause undeterministic ordering. - time.Sleep(100 * time.Millisecond) - } - - orderLock.Lock() - defer orderLock.Unlock() - - order = append(order, id) - resp.NewState = req.PlannedState - return resp - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Required: true}, - "blah": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.child.aws_instance.a")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - expected := []string{"b", "a"} - if !reflect.DeepEqual(order, expected) { - t.Errorf("wrong order\ngot: %#v\nwant: %#v", order, expected) - } - - { - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyModuleDestroyOrderStr) - if actual != expected { - t.Errorf("wrong final state\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - } -} - -func TestContext2Apply_moduleInheritAlias(t *testing.T) { - m := testModule(t, "apply-module-provider-inherit-alias") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("value") - if val.IsNull() { - return - } - - root := req.Config.GetAttr("root") - if !root.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("child should not get root")) - } - - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` - -module.child: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"].eu - type = aws_instance - `) -} - -func TestContext2Apply_orphanResource(t *testing.T) { - // This is a two-step test: - // 1. Apply a configuration with resources that have count set. - // This should place the empty resource object in the state to record - // that each exists, and record any instances. - // 2. Apply an empty configuration against the same state, which should - // then clean up both the instances and the containing resource objects. - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - // Step 1: create the resources and instances - m := testModule(t, "apply-orphan-resource") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // At this point both resources should be recorded in the state, along - // with the single instance associated with test_thing.one. - want := states.BuildState(func(s *states.SyncState) { - providerAddr := addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - } - oneAddr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "one", - }.Absolute(addrs.RootModuleInstance) - s.SetResourceProvider(oneAddr, providerAddr) - s.SetResourceInstanceCurrent(oneAddr.Instance(addrs.IntKey(0)), &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, providerAddr) - }) - - if state.String() != want.String() { - t.Fatalf("wrong state after step 1\n%s", cmp.Diff(want, state)) - } - - // Step 2: update with an empty config, to destroy everything - m = testModule(t, "empty") - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - plan, diags = ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - { - addr := mustResourceInstanceAddr("test_thing.one[0]") - change := plan.Changes.ResourceInstance(addr) - if change == nil { - t.Fatalf("no planned change for %s", addr) - } - if got, want := change.Action, plans.Delete; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - } - - state, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // The state should now be _totally_ empty, with just an empty root module - // (since that always exists) and no resources at all. - want = states.NewState() - want.CheckResults = &states.CheckResults{} - if !cmp.Equal(state, want) { - t.Fatalf("wrong state after step 2\ngot: %swant: %s", spew.Sdump(state), spew.Sdump(want)) - } - -} - -func TestContext2Apply_moduleOrphanInheritAlias(t *testing.T) { - m := testModule(t, "apply-module-provider-inherit-alias-orphan") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("value") - if val.IsNull() { - return - } - - root := req.Config.GetAttr("root") - if !root.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("child should not get root")) - } - - return - } - - // Create a state with an orphan module - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - { - addr := mustResourceInstanceAddr("module.child.aws_instance.bar") - change := plan.Changes.ResourceInstance(addr) - if change == nil { - t.Fatalf("no planned change for %s", addr) - } - if got, want := change.Action, plans.Delete; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - // This should ideally be ResourceInstanceDeleteBecauseNoModule, but - // the codepath deciding this doesn't currently have enough information - // to differentiate, and so this is a compromise. - if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("must call configure") - } - - checkStateString(t, state, "") -} - -func TestContext2Apply_moduleOrphanProvider(t *testing.T) { - m := testModule(t, "apply-module-orphan-provider-inherit") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("value") - if val.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) - } - - return - } - - // Create a state with an orphan module - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_moduleOrphanGrandchildProvider(t *testing.T) { - m := testModule(t, "apply-module-orphan-provider-inherit") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("value") - if val.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) - } - - return - } - - // Create a state with an orphan module that is nested (grandchild) - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("parent", addrs.NoKey).Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_moduleGrandchildProvider(t *testing.T) { - m := testModule(t, "apply-module-grandchild-provider-inherit") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - var callLock sync.Mutex - called := false - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("value") - if val.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) - } - - callLock.Lock() - called = true - callLock.Unlock() - - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - callLock.Lock() - defer callLock.Unlock() - if called != true { - t.Fatalf("err: configure never called") - } -} - -// This tests an issue where all the providers in a module but not -// in the root weren't being added to the root properly. In this test -// case: aws is explicitly added to root, but "test" should be added to. -// With the bug, it wasn't. -func TestContext2Apply_moduleOnlyProvider(t *testing.T) { - m := testModule(t, "apply-module-only-provider") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - pTest := testProvider("test") - pTest.ApplyResourceChangeFn = testApplyFn - pTest.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("test"): testProviderFuncFixed(pTest), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyModuleOnlyProviderStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_moduleProviderAlias(t *testing.T) { - m := testModule(t, "apply-module-provider-alias") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyModuleProviderAliasStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_moduleProviderAliasTargets(t *testing.T) { - m := testModule(t, "apply-module-provider-alias") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.ConfigResource{ - Module: addrs.RootModule, - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "nonexistent", - Name: "thing", - }, - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(` - - `) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_moduleProviderCloseNested(t *testing.T) { - m := testModule(t, "apply-module-provider-close-nested") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -// Tests that variables used as module vars that reference data that -// already exists in the state and requires no diff works properly. This -// fixes an issue faced where module variables were pruned because they were -// accessing "non-existent" resources (they existed, just not in the graph -// cause they weren't in the diff). -func TestContext2Apply_moduleVarRefExisting(t *testing.T) { - m := testModule(t, "apply-ref-existing") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo","foo":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyModuleVarRefExistingStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_moduleVarResourceCount(t *testing.T) { - m := testModule(t, "apply-module-var-resource-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.DestroyMode, - SetVariables: InputValues{ - "num": &InputValue{ - Value: cty.NumberIntVal(2), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "num": &InputValue{ - Value: cty.NumberIntVal(5), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -// GH-819 -func TestContext2Apply_moduleBool(t *testing.T) { - m := testModule(t, "apply-module-bool") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyModuleBoolStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -// Tests that a module can be targeted and everything is properly created. -// This adds to the plan test to also just verify that apply works. -func TestContext2Apply_moduleTarget(t *testing.T) { - m := testModule(t, "plan-targeted-cross-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("B", addrs.NoKey), - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` - -module.A: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance - - Outputs: - - value = foo -module.B: - aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance - - Dependencies: - module.A.aws_instance.foo - `) -} - -func TestContext2Apply_multiProvider(t *testing.T) { - m := testModule(t, "apply-multi-provider") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - pDO := testProvider("do") - pDO.ApplyResourceChangeFn = testApplyFn - pDO.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("do"): testProviderFuncFixed(pDO), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) < 2 { - t.Fatalf("bad: %#v", mod.Resources) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyMultiProviderStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_multiProviderDestroy(t *testing.T) { - m := testModule(t, "apply-multi-provider-destroy") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "addr": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - p2 := testProvider("vault") - p2.ApplyResourceChangeFn = testApplyFn - p2.PlanResourceChangeFn = testDiffFn - p2.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "vault_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - var state *states.State - - // First, create the instances - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("vault"): testProviderFuncFixed(p2), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - state = s - } - - // Destroy them - { - // Verify that aws_instance.bar is destroyed first - var checked bool - var called int32 - var lock sync.Mutex - applyFn := func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - lock.Lock() - defer lock.Unlock() - - if req.TypeName == "aws_instance" { - checked = true - - // Sleep to allow parallel execution - time.Sleep(50 * time.Millisecond) - - // Verify that called is 0 (dep not called) - if atomic.LoadInt32(&called) != 0 { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("nothing else should be called")) - return resp - } - } - - atomic.AddInt32(&called, 1) - return testApplyFn(req) - } - - // Set the apply functions - p.ApplyResourceChangeFn = applyFn - p2.ApplyResourceChangeFn = applyFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("vault"): testProviderFuncFixed(p2), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - if !checked { - t.Fatal("should be checked") - } - - state = s - } - - checkStateString(t, state, ``) -} - -// This is like the multiProviderDestroy test except it tests that -// dependent resources within a child module that inherit provider -// configuration are still destroyed first. -func TestContext2Apply_multiProviderDestroyChild(t *testing.T) { - m := testModule(t, "apply-multi-provider-destroy-child") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - p2 := testProvider("vault") - p2.ApplyResourceChangeFn = testApplyFn - p2.PlanResourceChangeFn = testDiffFn - p2.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "vault_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - var state *states.State - - // First, create the instances - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("vault"): testProviderFuncFixed(p2), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - state = s - } - - // Destroy them - { - // Verify that aws_instance.bar is destroyed first - var checked bool - var called int32 - var lock sync.Mutex - applyFn := func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - lock.Lock() - defer lock.Unlock() - - if req.TypeName == "aws_instance" { - checked = true - - // Sleep to allow parallel execution - time.Sleep(50 * time.Millisecond) - - // Verify that called is 0 (dep not called) - if atomic.LoadInt32(&called) != 0 { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("nothing else should be called")) - return resp - } - } - - atomic.AddInt32(&called, 1) - return testApplyFn(req) - } - - // Set the apply functions - p.ApplyResourceChangeFn = applyFn - p2.ApplyResourceChangeFn = applyFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("vault"): testProviderFuncFixed(p2), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if !checked { - t.Fatal("should be checked") - } - - state = s - } - - checkStateString(t, state, ` - -`) -} - -func TestContext2Apply_multiVar(t *testing.T) { - m := testModule(t, "apply-multi-var") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - // First, apply with a count of 3 - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "num": &InputValue{ - Value: cty.NumberIntVal(3), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := state.RootModule().OutputValues["output"] - expected := cty.StringVal("bar0,bar1,bar2") - if actual == nil || actual.Value != expected { - t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) - } - - t.Logf("Initial state: %s", state.String()) - - // Apply again, reduce the count to 1 - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "num": &InputValue{ - Value: cty.NumberIntVal(1), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - t.Logf("End state: %s", state.String()) - - actual := state.RootModule().OutputValues["output"] - if actual == nil { - t.Fatal("missing output") - } - - expected := cty.StringVal("bar0") - if actual.Value != expected { - t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) - } - } -} - -// This is a holistic test of multi-var (aka "splat variable") handling -// across several different Terraform subsystems. This is here because -// historically there were quirky differences in handling across different -// parts of Terraform and so here we want to assert the expected behavior and -// ensure that it remains consistent in future. -func TestContext2Apply_multiVarComprehensive(t *testing.T) { - m := testModule(t, "apply-multi-var-comprehensive") - p := testProvider("test") - - configs := map[string]cty.Value{} - var configsLock sync.Mutex - - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - proposed := req.ProposedNewState - configsLock.Lock() - defer configsLock.Unlock() - key := proposed.GetAttr("key").AsString() - // This test was originally written using the legacy p.PlanResourceChangeFn interface, - // and so the assertions below expect an old-style ResourceConfig, which - // we'll construct via our shim for now to avoid rewriting all of the - // assertions. - configs[key] = req.ProposedNewState - - retVals := make(map[string]cty.Value) - for it := proposed.ElementIterator(); it.Next(); { - idxVal, val := it.Element() - idx := idxVal.AsString() - - switch idx { - case "id": - retVals[idx] = cty.UnknownVal(cty.String) - case "name": - retVals[idx] = cty.StringVal(key) - default: - retVals[idx] = val - } - } - - return providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(retVals), - } - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "key": {Type: cty.String, Required: true}, - - "source_id": {Type: cty.String, Optional: true}, - "source_name": {Type: cty.String, Optional: true}, - "first_source_id": {Type: cty.String, Optional: true}, - "first_source_name": {Type: cty.String, Optional: true}, - "source_ids": {Type: cty.List(cty.String), Optional: true}, - "source_names": {Type: cty.List(cty.String), Optional: true}, - "source_ids_from_func": {Type: cty.List(cty.String), Optional: true}, - "source_names_from_func": {Type: cty.List(cty.String), Optional: true}, - "source_ids_wrapped": {Type: cty.List(cty.List(cty.String)), Optional: true}, - "source_names_wrapped": {Type: cty.List(cty.List(cty.String)), Optional: true}, - - "id": {Type: cty.String, Computed: true}, - "name": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - // First, apply with a count of 3 - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "num": &InputValue{ - Value: cty.NumberIntVal(3), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - checkConfig := func(key string, want cty.Value) { - configsLock.Lock() - defer configsLock.Unlock() - - got, ok := configs[key] - if !ok { - t.Errorf("no config recorded for %s; expected a configuration", key) - return - } - - t.Run("config for "+key, func(t *testing.T) { - for _, problem := range deep.Equal(got, want) { - t.Errorf(problem) - } - }) - } - - checkConfig("multi_count_var.0", cty.ObjectVal(map[string]cty.Value{ - "source_id": cty.UnknownVal(cty.String), - "source_name": cty.StringVal("source.0"), - })) - checkConfig("multi_count_var.2", cty.ObjectVal(map[string]cty.Value{ - "source_id": cty.UnknownVal(cty.String), - "source_name": cty.StringVal("source.2"), - })) - checkConfig("multi_count_derived.0", cty.ObjectVal(map[string]cty.Value{ - "source_id": cty.UnknownVal(cty.String), - "source_name": cty.StringVal("source.0"), - })) - checkConfig("multi_count_derived.2", cty.ObjectVal(map[string]cty.Value{ - "source_id": cty.UnknownVal(cty.String), - "source_name": cty.StringVal("source.2"), - })) - checkConfig("whole_splat", cty.ObjectVal(map[string]cty.Value{ - "source_ids": cty.ListVal([]cty.Value{ - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - }), - "source_names": cty.ListVal([]cty.Value{ - cty.StringVal("source.0"), - cty.StringVal("source.1"), - cty.StringVal("source.2"), - }), - "source_ids_from_func": cty.UnknownVal(cty.String), - "source_names_from_func": cty.ListVal([]cty.Value{ - cty.StringVal("source.0"), - cty.StringVal("source.1"), - cty.StringVal("source.2"), - }), - "source_ids_wrapped": cty.ListVal([]cty.Value{ - cty.ListVal([]cty.Value{ - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - }), - }), - "source_names_wrapped": cty.ListVal([]cty.Value{ - cty.ListVal([]cty.Value{ - cty.StringVal("source.0"), - cty.StringVal("source.1"), - cty.StringVal("source.2"), - }), - }), - "first_source_id": cty.UnknownVal(cty.String), - "first_source_name": cty.StringVal("source.0"), - })) - checkConfig("child.whole_splat", cty.ObjectVal(map[string]cty.Value{ - "source_ids": cty.ListVal([]cty.Value{ - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - }), - "source_names": cty.ListVal([]cty.Value{ - cty.StringVal("source.0"), - cty.StringVal("source.1"), - cty.StringVal("source.2"), - }), - "source_ids_wrapped": cty.ListVal([]cty.Value{ - cty.ListVal([]cty.Value{ - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - }), - }), - "source_names_wrapped": cty.ListVal([]cty.Value{ - cty.ListVal([]cty.Value{ - cty.StringVal("source.0"), - cty.StringVal("source.1"), - cty.StringVal("source.2"), - }), - }), - })) - - t.Run("apply", func(t *testing.T) { - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("error during apply: %s", diags.Err()) - } - - want := map[string]interface{}{ - "source_ids": []interface{}{"foo", "foo", "foo"}, - "source_names": []interface{}{ - "source.0", - "source.1", - "source.2", - }, - } - got := map[string]interface{}{} - for k, s := range state.RootModule().OutputValues { - got[k] = hcl2shim.ConfigValueFromHCL2(s.Value) - } - if !reflect.DeepEqual(got, want) { - t.Errorf( - "wrong outputs\ngot: %s\nwant: %s", - spew.Sdump(got), spew.Sdump(want), - ) - } - }) -} - -// Test that multi-var (splat) access is ordered by count, not by -// value. -func TestContext2Apply_multiVarOrder(t *testing.T) { - m := testModule(t, "apply-multi-var-order") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - // First, apply with a count of 3 - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - t.Logf("State: %s", state.String()) - - actual := state.RootModule().OutputValues["should-be-11"] - expected := cty.StringVal("index-11") - if actual == nil || actual.Value != expected { - t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) - } -} - -// Test that multi-var (splat) access is ordered by count, not by -// value, through interpolations. -func TestContext2Apply_multiVarOrderInterp(t *testing.T) { - m := testModule(t, "apply-multi-var-order-interp") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - // First, apply with a count of 3 - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - t.Logf("State: %s", state.String()) - - actual := state.RootModule().OutputValues["should-be-11"] - expected := cty.StringVal("baz-index-11") - if actual == nil || actual.Value != expected { - t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) - } -} - -// Based on GH-10440 where a graph edge wasn't properly being created -// between a modified resource and a count instance being destroyed. -func TestContext2Apply_multiVarCountDec(t *testing.T) { - var s *states.State - - // First create resources. Nothing sneaky here. - { - m := testModule(t, "apply-multi-var-count-dec") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - log.Print("\n========\nStep 1 Plan\n========") - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "num": &InputValue{ - Value: cty.NumberIntVal(2), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - log.Print("\n========\nStep 1 Apply\n========") - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - t.Logf("Step 1 state:\n%s", state) - - s = state - } - - // Decrease the count by 1 and verify that everything happens in the - // right order. - m := testModule(t, "apply-multi-var-count-dec") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - // Verify that aws_instance.bar is modified first and nothing - // else happens at the same time. - { - var checked bool - var called int32 - var lock sync.Mutex - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - lock.Lock() - defer lock.Unlock() - - if !req.PlannedState.IsNull() { - s := req.PlannedState.AsValueMap() - if ami, ok := s["ami"]; ok && !ami.IsNull() && ami.AsString() == "special" { - checked = true - - // Sleep to allow parallel execution - time.Sleep(50 * time.Millisecond) - - // Verify that called is 0 (dep not called) - if atomic.LoadInt32(&called) != 1 { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("nothing else should be called")) - return - } - } - } - atomic.AddInt32(&called, 1) - return testApplyFn(req) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - log.Print("\n========\nStep 2 Plan\n========") - plan, diags := ctx.Plan(m, s, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "num": &InputValue{ - Value: cty.NumberIntVal(1), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - t.Logf("Step 2 plan:\n%s", legacyDiffComparisonString(plan.Changes)) - - log.Print("\n========\nStep 2 Apply\n========") - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - if !checked { - t.Error("apply never called") - } - } -} - -// Test that we can resolve a multi-var (splat) for the first resource -// created in a non-root module, which happens when the module state doesn't -// exist yet. -// https://github.com/hashicorp/terraform/issues/14438 -func TestContext2Apply_multiVarMissingState(t *testing.T) { - m := testModule(t, "apply-multi-var-missing-state") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "a_ids": {Type: cty.String, Optional: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - // First, apply with a count of 3 - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - // Before the relevant bug was fixed, Terraform would panic during apply. - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply failed: %s", diags.Err()) - } - - // If we get here with no errors or panics then our test was successful. -} - -func TestContext2Apply_outputOrphan(t *testing.T) { - m := testModule(t, "apply-output-orphan") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetOutputValue("foo", cty.StringVal("bar"), false) - root.SetOutputValue("bar", cty.StringVal("baz"), false) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyOutputOrphanStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_outputOrphanModule(t *testing.T) { - m := testModule(t, "apply-output-orphan-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformApplyOutputOrphanModuleStr) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } - - // now apply with no module in the config, which should remove the - // remaining output - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - emptyConfig := configs.NewEmptyConfig() - - // NOTE: While updating this test to pass the state in as a Plan argument, - // rather than into the testContext2 call above, it previously said - // State: state.DeepCopy(), which is a little weird since we just - // created "s" above as the result of the previous apply, but I've preserved - // it to avoid changing the flow of this test in case that's important - // for some reason. - plan, diags = ctx.Plan(emptyConfig, state.DeepCopy(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, emptyConfig) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if !state.Empty() { - t.Fatalf("wrong final state %s\nwant empty state", spew.Sdump(state)) - } -} - -func TestContext2Apply_providerComputedVar(t *testing.T) { - m := testModule(t, "apply-provider-computed") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - pTest := testProvider("test") - pTest.ApplyResourceChangeFn = testApplyFn - pTest.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("test"): testProviderFuncFixed(pTest), - }, - }) - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("value") - if val.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) - return - } - return - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_providerConfigureDisabled(t *testing.T) { - m := testModule(t, "apply-provider-configure-disabled") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("value") - if val.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) - } - - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("configure never called") - } -} - -func TestContext2Apply_provisionerModule(t *testing.T) { - m := testModule(t, "apply-provisioner-module") - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - pr := testProvisioner() - pr.GetSchemaResponse = provisioners.GetSchemaResponse{ - Provisioner: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerModuleStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } -} - -func TestContext2Apply_Provisioner_compute(t *testing.T) { - m := testModule(t, "apply-provisioner-compute") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - - val := req.Config.GetAttr("command").AsString() - if val != "computed_value" { - t.Fatalf("bad value for foo: %q", val) - } - req.UIOutput.Output(fmt.Sprintf("Executing: %q", val)) - - return - } - h := new(MockHook) - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "value": &InputValue{ - Value: cty.NumberIntVal(1), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } - - // Verify output was rendered - if !h.ProvisionOutputCalled { - t.Fatalf("ProvisionOutput hook not called") - } - if got, want := h.ProvisionOutputMessage, `Executing: "computed_value"`; got != want { - t.Errorf("expected output to be %q, but was %q", want, got) - } -} - -func TestContext2Apply_provisionerCreateFail(t *testing.T) { - m := testModule(t, "apply-provisioner-fail-create") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - resp := testApplyFn(req) - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) - - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags == nil { - t.Fatal("should error") - } - - got := strings.TrimSpace(state.String()) - want := strings.TrimSpace(testTerraformApplyProvisionerFailCreateStr) - if got != want { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", got, want) - } -} - -func TestContext2Apply_provisionerCreateFailNoId(t *testing.T) { - m := testModule(t, "apply-provisioner-fail-create") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags == nil { - t.Fatal("should error") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerFailCreateNoIdStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_provisionerFail(t *testing.T) { - m := testModule(t, "apply-provisioner-fail") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - pr := testProvisioner() - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("EXPLOSION")) - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags == nil { - t.Fatal("should error") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerFailStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_provisionerFail_createBeforeDestroy(t *testing.T) { - m := testModule(t, "apply-provisioner-fail-create-before") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("EXPLOSION")) - return - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","require_new":"abc"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("should error") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerFailCreateBeforeDestroyStr) - if actual != expected { - t.Fatalf("expected:\n%s\n:got\n%s", expected, actual) - } -} - -func TestContext2Apply_error_createBeforeDestroy(t *testing.T) { - m := testModule(t, "apply-error-create-before") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar", "require_new": "abc","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("placeholder error from ApplyFn")) - return - } - p.PlanResourceChangeFn = testDiffFn - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("should have error") - } - if got, want := diags.Err().Error(), "placeholder error from ApplyFn"; got != want { - // We're looking for our artificial error from ApplyFn above, whose - // message is literally "placeholder error from ApplyFn". - t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyErrorCreateBeforeDestroyStr) - if actual != expected { - t.Fatalf("wrong final state\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_errorDestroy_createBeforeDestroy(t *testing.T) { - m := testModule(t, "apply-error-create-before") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar", "require_new": "abc"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - // Fail the destroy! - if req.PlannedState.IsNull() { - resp.NewState = req.PriorState - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) - return - } - - return testApplyFn(req) - } - p.PlanResourceChangeFn = testDiffFn - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("should have error") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyErrorDestroyCreateBeforeDestroyStr) - if actual != expected { - t.Fatalf("bad: actual:\n%s\n\nexpected:\n%s", actual, expected) - } -} - -func TestContext2Apply_multiDepose_createBeforeDestroy(t *testing.T) { - m := testModule(t, "apply-multi-depose-create-before-destroy") - p := testProvider("aws") - ps := map[addrs.Provider]providers.Factory{addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p)} - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "require_new": {Type: cty.String, Optional: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.web").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: ps, - }) - createdInstanceId := "bar" - // Create works - createFunc := func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - s := req.PlannedState.AsValueMap() - s["id"] = cty.StringVal(createdInstanceId) - resp.NewState = cty.ObjectVal(s) - return - } - - // Destroy starts broken - destroyFunc := func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - resp.NewState = req.PriorState - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("destroy failed")) - return - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - if req.PlannedState.IsNull() { - return destroyFunc(req) - } else { - return createFunc(req) - } - } - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "require_new": &InputValue{ - Value: cty.StringVal("yes"), - }, - }, - }) - assertNoErrors(t, diags) - - // Destroy is broken, so even though CBD successfully replaces the instance, - // we'll have to save the Deposed instance to destroy later - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("should have error") - } - - checkStateString(t, state, ` -aws_instance.web: (1 deposed) - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = yes - Deposed ID 1 = foo - `) - - createdInstanceId = "baz" - ctx = testContext2(t, &ContextOpts{ - Providers: ps, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "require_new": &InputValue{ - Value: cty.StringVal("baz"), - }, - }, - }) - assertNoErrors(t, diags) - - // We're replacing the primary instance once again. Destroy is _still_ - // broken, so the Deposed list gets longer - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("should have error") - } - - // For this one we can't rely on checkStateString because its result is - // not deterministic when multiple deposed objects are present. Instead, - // we will probe the state object directly. - { - is := state.RootModule().Resources["aws_instance.web"].Instances[addrs.NoKey] - if is.Current == nil { - t.Fatalf("no current object for aws_instance web; should have one") - } - if !bytes.Contains(is.Current.AttrsJSON, []byte("baz")) { - t.Fatalf("incorrect current object attrs %s; want id=baz", is.Current.AttrsJSON) - } - if got, want := len(is.Deposed), 2; got != want { - t.Fatalf("wrong number of deposed instances %d; want %d", got, want) - } - var foos, bars int - for _, obj := range is.Deposed { - if bytes.Contains(obj.AttrsJSON, []byte("foo")) { - foos++ - } - if bytes.Contains(obj.AttrsJSON, []byte("bar")) { - bars++ - } - } - if got, want := foos, 1; got != want { - t.Fatalf("wrong number of deposed instances with id=foo %d; want %d", got, want) - } - if got, want := bars, 1; got != want { - t.Fatalf("wrong number of deposed instances with id=bar %d; want %d", got, want) - } - } - - // Destroy partially fixed! - destroyFunc = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - s := req.PriorState.AsValueMap() - id := s["id"].AsString() - if id == "foo" || id == "baz" { - resp.NewState = cty.NullVal(req.PriorState.Type()) - } else { - resp.NewState = req.PriorState - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("destroy partially failed")) - } - return - } - - createdInstanceId = "qux" - ctx = testContext2(t, &ContextOpts{ - Providers: ps, - }) - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "require_new": &InputValue{ - Value: cty.StringVal("qux"), - }, - }, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - // Expect error because 1/2 of Deposed destroys failed - if !diags.HasErrors() { - t.Fatal("should have error") - } - - // foo and baz are now gone, bar sticks around - checkStateString(t, state, ` -aws_instance.web: (1 deposed) - ID = qux - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = qux - Deposed ID 1 = bar - `) - - // Destroy working fully! - destroyFunc = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - resp.NewState = cty.NullVal(req.PriorState.Type()) - return - } - - createdInstanceId = "quux" - ctx = testContext2(t, &ContextOpts{ - Providers: ps, - }) - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "require_new": &InputValue{ - Value: cty.StringVal("quux"), - }, - }, - }) - assertNoErrors(t, diags) - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal("should not have error:", diags.Err()) - } - - // And finally the state is clean - checkStateString(t, state, ` -aws_instance.web: - ID = quux - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = quux - `) -} - -// Verify that a normal provisioner with on_failure "continue" set won't -// taint the resource and continues executing. -func TestContext2Apply_provisionerFailContinue(t *testing.T) { - m := testModule(t, "apply-provisioner-fail-continue") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance - `) - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } -} - -// Verify that a normal provisioner with on_failure "continue" records -// the error with the hook. -func TestContext2Apply_provisionerFailContinueHook(t *testing.T) { - h := new(MockHook) - m := testModule(t, "apply-provisioner-fail-continue") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) - return - } - - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - if !h.PostProvisionInstanceStepCalled { - t.Fatal("PostProvisionInstanceStep not called") - } - if h.PostProvisionInstanceStepErrorArg == nil { - t.Fatal("should have error") - } -} - -func TestContext2Apply_provisionerDestroy(t *testing.T) { - m := testModule(t, "apply-provisioner-destroy") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - val := req.Config.GetAttr("command").AsString() - if val != "destroy a bar" { - t.Fatalf("bad value for foo: %q", val) - } - - return - } - - state := states.NewState() - root := state.RootModule() - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`aws_instance.foo["a"]`).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.DestroyMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ``) - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } -} - -// Verify that on destroy provisioner failure, nothing happens to the instance -func TestContext2Apply_provisionerDestroyFail(t *testing.T) { - m := testModule(t, "apply-provisioner-destroy") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) - return - } - - state := states.NewState() - root := state.RootModule() - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`aws_instance.foo["a"]`).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.DestroyMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags == nil { - t.Fatal("should error") - } - - checkStateString(t, state, ` -aws_instance.foo["a"]: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - `) - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } -} - -// Verify that on destroy provisioner failure with "continue" that -// we continue to the next provisioner. -func TestContext2Apply_provisionerDestroyFailContinue(t *testing.T) { - m := testModule(t, "apply-provisioner-destroy-continue") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - - var l sync.Mutex - var calls []string - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - val := req.Config.GetAttr("command") - if val.IsNull() { - t.Fatalf("bad value for foo: %#v", val) - } - - l.Lock() - defer l.Unlock() - calls = append(calls, val.AsString()) - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) - return - } - - state := states.NewState() - root := state.RootModule() - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`aws_instance.foo["a"]`).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ``) - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } - - expected := []string{"one", "two"} - if !reflect.DeepEqual(calls, expected) { - t.Fatalf("wrong commands\ngot: %#v\nwant: %#v", calls, expected) - } -} - -// Verify that on destroy provisioner failure with "continue" that -// we continue to the next provisioner. But if the next provisioner defines -// to fail, then we fail after running it. -func TestContext2Apply_provisionerDestroyFailContinueFail(t *testing.T) { - m := testModule(t, "apply-provisioner-destroy-fail") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - - var l sync.Mutex - var calls []string - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - val := req.Config.GetAttr("command") - if val.IsNull() { - t.Fatalf("bad value for foo: %#v", val) - } - - l.Lock() - defer l.Unlock() - calls = append(calls, val.AsString()) - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) - return - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags == nil { - t.Fatal("apply succeeded; wanted error from second provisioner") - } - - checkStateString(t, state, ` -aws_instance.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - `) - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } - - expected := []string{"one", "two"} - if !reflect.DeepEqual(calls, expected) { - t.Fatalf("bad: %#v", calls) - } -} - -// Verify destroy provisioners are not run for tainted instances. -func TestContext2Apply_provisionerDestroyTainted(t *testing.T) { - m := testModule(t, "apply-provisioner-destroy") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - destroyCalled := false - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - expected := "create a b" - val := req.Config.GetAttr("command") - if val.AsString() != expected { - t.Fatalf("bad value for command: %#v", val) - } - - return - } - - state := states.NewState() - root := state.RootModule() - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`aws_instance.foo["a"]`).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "input": &InputValue{ - Value: cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("b"), - }), - SourceType: ValueFromInput, - }, - }, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.foo["a"]: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance - `) - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } - - if destroyCalled { - t.Fatal("destroy should not be called") - } -} - -func TestContext2Apply_provisionerResourceRef(t *testing.T) { - m := testModule(t, "apply-provisioner-resource-ref") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - pr := testProvisioner() - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - val := req.Config.GetAttr("command") - if val.AsString() != "2" { - t.Fatalf("bad value for command: %#v", val) - } - - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerResourceRefStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } -} - -func TestContext2Apply_provisionerSelfRef(t *testing.T) { - m := testModule(t, "apply-provisioner-self-ref") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - val := req.Config.GetAttr("command") - if val.AsString() != "bar" { - t.Fatalf("bad value for command: %#v", val) - } - - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerSelfRefStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } -} - -func TestContext2Apply_provisionerMultiSelfRef(t *testing.T) { - var lock sync.Mutex - commands := make([]string, 0, 5) - - m := testModule(t, "apply-provisioner-multi-self-ref") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - lock.Lock() - defer lock.Unlock() - - val := req.Config.GetAttr("command") - if val.IsNull() { - t.Fatalf("bad value for command: %#v", val) - } - - commands = append(commands, val.AsString()) - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerMultiSelfRefStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } - - // Verify our result - sort.Strings(commands) - expectedCommands := []string{"number 0", "number 1", "number 2"} - if !reflect.DeepEqual(commands, expectedCommands) { - t.Fatalf("bad: %#v", commands) - } -} - -func TestContext2Apply_provisionerMultiSelfRefSingle(t *testing.T) { - var lock sync.Mutex - order := make([]string, 0, 5) - - m := testModule(t, "apply-provisioner-multi-self-ref-single") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - lock.Lock() - defer lock.Unlock() - - val := req.Config.GetAttr("order") - if val.IsNull() { - t.Fatalf("no val for order") - } - - order = append(order, val.AsString()) - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerMultiSelfRefSingleStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } - - // Verify our result - sort.Strings(order) - expectedOrder := []string{"0", "1", "2"} - if !reflect.DeepEqual(order, expectedOrder) { - t.Fatalf("bad: %#v", order) - } -} - -func TestContext2Apply_provisionerExplicitSelfRef(t *testing.T) { - m := testModule(t, "apply-provisioner-explicit-self-ref") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - val := req.Config.GetAttr("command") - if val.IsNull() || val.AsString() != "bar" { - t.Fatalf("bad value for command: %#v", val) - } - - return - } - - var state *states.State - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner not invoked") - } - } - - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ``) - } -} - -func TestContext2Apply_provisionerForEachSelfRef(t *testing.T) { - m := testModule(t, "apply-provisioner-for-each-self") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - val := req.Config.GetAttr("command") - if val.IsNull() { - t.Fatalf("bad value for command: %#v", val) - } - - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } -} - -// Provisioner should NOT run on a diff, only create -func TestContext2Apply_Provisioner_Diff(t *testing.T) { - m := testModule(t, "apply-provisioner-diff") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("apply failed") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerDiffStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner was not called on first apply") - } - pr.ProvisionResourceCalled = false - - // Change the state to force a diff - mod := state.RootModule() - obj := mod.Resources["aws_instance.bar"].Instances[addrs.NoKey].Current - var attrs map[string]interface{} - err := json.Unmarshal(obj.AttrsJSON, &attrs) - if err != nil { - t.Fatal(err) - } - attrs["foo"] = "baz" - obj.AttrsJSON, err = json.Marshal(attrs) - if err != nil { - t.Fatal(err) - } - - // Re-create context with state - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags = ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state2, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("apply failed") - } - - actual = strings.TrimSpace(state2.String()) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was NOT invoked - if pr.ProvisionResourceCalled { - t.Fatalf("provisioner was called on second apply; should not have been") - } -} - -func TestContext2Apply_outputDiffVars(t *testing.T) { - m := testModule(t, "apply-good") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.baz").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.PlanResourceChangeFn = testDiffFn - //func(info *InstanceInfo, s *InstanceState, rc *ResourceConfig) (*InstanceDiff, error) { - // d := &InstanceDiff{ - // Attributes: map[string]*ResourceAttrDiff{}, - // } - // if new, ok := rc.Get("value"); ok { - // d.Attributes["value"] = &ResourceAttrDiff{ - // New: new.(string), - // } - // } - // if new, ok := rc.Get("foo"); ok { - // d.Attributes["foo"] = &ResourceAttrDiff{ - // New: new.(string), - // } - // } else if rc.IsComputed("foo") { - // d.Attributes["foo"] = &ResourceAttrDiff{ - // NewComputed: true, - // Type: DiffAttrOutput, // This doesn't actually really do anything anymore, but this test originally set it. - // } - // } - // if new, ok := rc.Get("num"); ok { - // d.Attributes["num"] = &ResourceAttrDiff{ - // New: fmt.Sprintf("%#v", new), - // } - // } - // return d, nil - //} - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} - -func TestContext2Apply_destroyX(t *testing.T) { - m := testModule(t, "apply-destroy") - h := new(HookRecordApplyOrder) - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Next, plan and apply a destroy operation - h.Active = true - ctx = testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Test that things were destroyed - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyDestroyStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Test that things were destroyed _in the right order_ - expected2 := []string{"aws_instance.bar", "aws_instance.foo"} - actual2 := h.IDs - if !reflect.DeepEqual(actual2, expected2) { - t.Fatalf("expected: %#v\n\ngot:%#v", expected2, actual2) - } -} - -func TestContext2Apply_destroyOrder(t *testing.T) { - m := testModule(t, "apply-destroy") - h := new(HookRecordApplyOrder) - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - t.Logf("State 1: %s", state) - - // Next, plan and apply a destroy - h.Active = true - ctx = testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Test that things were destroyed - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyDestroyStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Test that things were destroyed _in the right order_ - expected2 := []string{"aws_instance.bar", "aws_instance.foo"} - actual2 := h.IDs - if !reflect.DeepEqual(actual2, expected2) { - t.Fatalf("expected: %#v\n\ngot:%#v", expected2, actual2) - } -} - -// https://github.com/hashicorp/terraform/issues/2767 -func TestContext2Apply_destroyModulePrefix(t *testing.T) { - m := testModule(t, "apply-destroy-module-resource-prefix") - h := new(MockHook) - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Verify that we got the apply info correct - if v := h.PreApplyAddr.String(); v != "module.child.aws_instance.foo" { - t.Fatalf("bad: %s", v) - } - - // Next, plan and apply a destroy operation and reset the hook - h = new(MockHook) - ctx = testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Test that things were destroyed - if v := h.PreApplyAddr.String(); v != "module.child.aws_instance.foo" { - t.Fatalf("bad: %s", v) - } -} - -func TestContext2Apply_destroyNestedModule(t *testing.T) { - m := testModule(t, "apply-destroy-nested-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Test that things were destroyed - actual := strings.TrimSpace(s.String()) - if actual != "" { - t.Fatalf("expected no state, got: %s", actual) - } -} - -func TestContext2Apply_destroyDeeplyNestedModule(t *testing.T) { - m := testModule(t, "apply-destroy-deeply-nested-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Test that things were destroyed - if !s.Empty() { - t.Fatalf("wrong final state %s\nwant empty state", spew.Sdump(s)) - } -} - -// https://github.com/hashicorp/terraform/issues/5440 -func TestContext2Apply_destroyModuleWithAttrsReferencingResource(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "apply-destroy-module-with-attrs") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - var state *states.State - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("plan diags: %s", diags.Err()) - } else { - t.Logf("Step 1 plan: %s", legacyDiffComparisonString(plan.Changes)) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errs: %s", diags.Err()) - } - - t.Logf("Step 1 state: %s", state) - } - - h := new(HookRecordApplyOrder) - h.Active = true - - { - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("destroy plan err: %s", diags.Err()) - } - - t.Logf("Step 2 plan: %s", legacyDiffComparisonString(plan.Changes)) - - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - } - - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("destroy apply err: %s", diags.Err()) - } - - t.Logf("Step 2 state: %s", state) - } - - //Test that things were destroyed - if state.HasManagedResourceInstanceObjects() { - t.Fatal("expected empty state, got:", state) - } -} - -func TestContext2Apply_destroyWithModuleVariableAndCount(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "apply-destroy-mod-var-and-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - var state *states.State - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply err: %s", diags.Err()) - } - } - - h := new(HookRecordApplyOrder) - h.Active = true - - { - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("destroy plan err: %s", diags.Err()) - } - - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = - map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - } - - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("destroy apply err: %s", diags.Err()) - } - } - - //Test that things were destroyed - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(` -`) - if actual != expected { - t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) - } -} - -func TestContext2Apply_destroyTargetWithModuleVariableAndCount(t *testing.T) { - m := testModule(t, "apply-destroy-mod-var-and-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - var state *states.State - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply err: %s", diags.Err()) - } - } - - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child", addrs.NoKey), - }, - }) - if diags.HasErrors() { - t.Fatalf("plan err: %s", diags) - } - if len(diags) != 1 { - // Should have one warning that -target is in effect. - t.Fatalf("got %d diagnostics in plan; want 1", len(diags)) - } - if got, want := diags[0].Severity(), tfdiags.Warning; got != want { - t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) - } - if got, want := diags[0].Description().Summary, "Resource targeting is in effect"; got != want { - t.Errorf("wrong diagnostic summary %#v; want %#v", got, want) - } - - // Destroy, targeting the module explicitly - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("destroy apply err: %s", diags) - } - if len(diags) != 1 { - t.Fatalf("got %d diagnostics; want 1", len(diags)) - } - if got, want := diags[0].Severity(), tfdiags.Warning; got != want { - t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) - } - if got, want := diags[0].Description().Summary, "Applied changes may be incomplete"; got != want { - t.Errorf("wrong diagnostic summary %#v; want %#v", got, want) - } - } - - //Test that things were destroyed - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(``) - if actual != expected { - t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) - } -} - -func TestContext2Apply_destroyWithModuleVariableAndCountNested(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "apply-destroy-mod-var-and-count-nested") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - var state *states.State - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply err: %s", diags.Err()) - } - } - - h := new(HookRecordApplyOrder) - h.Active = true - - { - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.DestroyMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("destroy plan err: %s", diags.Err()) - } - - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = - map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - } - - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("destroy apply err: %s", diags.Err()) - } - } - - //Test that things were destroyed - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(` -`) - if actual != expected { - t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) - } -} - -func TestContext2Apply_destroyOutputs(t *testing.T) { - m := testModule(t, "apply-destroy-outputs") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - // add the required id - m := req.Config.AsValueMap() - m["id"] = cty.StringVal("foo") - - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(m), - } - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // Next, plan and apply a destroy operation - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) > 0 { - t.Fatalf("expected no resources, got: %#v", mod) - } - - // destroying again should produce no errors - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatal(diags.Err()) - } -} - -func TestContext2Apply_destroyOrphan(t *testing.T) { - m := testModule(t, "apply-error") - p := testProvider("aws") - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.baz").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.PlanResourceChangeFn = testDiffFn - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := s.RootModule() - if _, ok := mod.Resources["aws_instance.baz"]; ok { - t.Fatalf("bad: %#v", mod.Resources) - } -} - -func TestContext2Apply_destroyTaintedProvisioner(t *testing.T) { - m := testModule(t, "apply-destroy-provisioner") - p := testProvider("aws") - pr := testProvisioner() - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if pr.ProvisionResourceCalled { - t.Fatal("provisioner should not be called") - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace("") - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_error(t *testing.T) { - errored := false - - m := testModule(t, "apply-error") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - if errored { - resp.NewState = req.PlannedState - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) - return - } - errored = true - - return testApplyFn(req) - } - p.PlanResourceChangeFn = testDiffFn - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags == nil { - t.Fatal("should have error") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyErrorStr) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestContext2Apply_errorDestroy(t *testing.T) { - m := testModule(t, "empty") - p := testProvider("test") - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - // Should actually be called for this test, because Terraform Core - // constructs the plan for a destroy operation itself. - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - // The apply (in this case, a destroy) always fails, so we can verify - // that the object stays in the state after a destroy fails even though - // we aren't returning a new state object here. - return providers.ApplyResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("failed")), - } - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - state := states.BuildState(func(ss *states.SyncState) { - ss.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("should have error") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(` -test_thing.foo: - ID = baz - provider = provider["registry.terraform.io/hashicorp/test"] -`) // test_thing.foo is still here, even though provider returned no new state along with its error - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestContext2Apply_errorCreateInvalidNew(t *testing.T) { - m := testModule(t, "apply-error") - - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - // We're intentionally returning an inconsistent new state here - // because we want to test that Terraform ignores the inconsistency - // when accompanied by another error. - return providers.ApplyResourceChangeResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "value": cty.StringVal("wrong wrong wrong wrong"), - "foo": cty.StringVal("absolutely brimming over with wrongability"), - }), - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("forced error")), - } - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags == nil { - t.Fatal("should have error") - } - if got, want := len(diags), 1; got != want { - // There should be no additional diagnostics generated by Terraform's own eval logic, - // because the provider's own error supersedes them. - t.Errorf("wrong number of diagnostics %d; want %d\n%s", got, want, diags.Err()) - } - if got, want := diags.Err().Error(), "forced error"; !strings.Contains(got, want) { - t.Errorf("returned error does not contain %q, but it should\n%s", want, diags.Err()) - } - if got, want := len(state.RootModule().Resources), 2; got != want { - t.Errorf("%d resources in state before prune; should have %d\n%s", got, want, spew.Sdump(state)) - } - state.PruneResourceHusks() // aws_instance.bar with no instances gets left behind when we bail out, but that's okay - if got, want := len(state.RootModule().Resources), 1; got != want { - t.Errorf("%d resources in state after prune; should have only one (aws_instance.foo, tainted)\n%s", got, spew.Sdump(state)) - } -} - -func TestContext2Apply_errorUpdateNullNew(t *testing.T) { - m := testModule(t, "apply-error") - - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - // We're intentionally returning no NewState here because we want to - // test that Terraform retains the prior state, rather than treating - // the returned null as "no state" (object deleted). - return providers.ApplyResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("forced error")), - } - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - state := states.BuildState(func(ss *states.SyncState) { - ss.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"value":"old"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - }) - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("should have error") - } - if got, want := len(diags), 1; got != want { - // There should be no additional diagnostics generated by Terraform's own eval logic, - // because the provider's own error supersedes them. - t.Errorf("wrong number of diagnostics %d; want %d\n%s", got, want, diags.Err()) - } - if got, want := diags.Err().Error(), "forced error"; !strings.Contains(got, want) { - t.Errorf("returned error does not contain %q, but it should\n%s", want, diags.Err()) - } - state.PruneResourceHusks() - if got, want := len(state.RootModule().Resources), 1; got != want { - t.Fatalf("%d resources in state; should have only one (aws_instance.foo, unmodified)\n%s", got, spew.Sdump(state)) - } - - is := state.ResourceInstance(addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) - if is == nil { - t.Fatalf("aws_instance.foo is not in the state after apply") - } - if got, want := is.Current.AttrsJSON, []byte(`"old"`); !bytes.Contains(got, want) { - t.Fatalf("incorrect attributes for aws_instance.foo\ngot: %s\nwant: JSON containing %s\n\n%s", got, want, spew.Sdump(is)) - } -} - -func TestContext2Apply_errorPartial(t *testing.T) { - errored := false - - m := testModule(t, "apply-error") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - if errored { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) - return - } - errored = true - - return testApplyFn(req) - } - p.PlanResourceChangeFn = testDiffFn - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags == nil { - t.Fatal("should have error") - } - - mod := s.RootModule() - if len(mod.Resources) != 2 { - t.Fatalf("bad: %#v", mod.Resources) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformApplyErrorPartialStr) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestContext2Apply_hook(t *testing.T) { - m := testModule(t, "apply-good") - h := new(MockHook) - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - if !h.PreApplyCalled { - t.Fatal("should be called") - } - if !h.PostApplyCalled { - t.Fatal("should be called") - } - if !h.PostStateUpdateCalled { - t.Fatalf("should call post state update") - } -} - -func TestContext2Apply_hookOrphan(t *testing.T) { - m := testModule(t, "apply-blank") - h := new(MockHook) - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - if !h.PreApplyCalled { - t.Fatal("should be called") - } - if !h.PostApplyCalled { - t.Fatal("should be called") - } - if !h.PostStateUpdateCalled { - t.Fatalf("should call post state update") - } -} - -func TestContext2Apply_idAttr(t *testing.T) { - m := testModule(t, "apply-idattr") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - mod := state.RootModule() - rs, ok := mod.Resources["aws_instance.foo"] - if !ok { - t.Fatal("not in state") - } - var attrs map[string]interface{} - err := json.Unmarshal(rs.Instances[addrs.NoKey].Current.AttrsJSON, &attrs) - if err != nil { - t.Fatal(err) - } - if got, want := attrs["id"], "foo"; got != want { - t.Fatalf("wrong id\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestContext2Apply_outputBasic(t *testing.T) { - m := testModule(t, "apply-output") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyOutputStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_outputAdd(t *testing.T) { - m1 := testModule(t, "apply-output-add-before") - p1 := testProvider("aws") - p1.ApplyResourceChangeFn = testApplyFn - p1.PlanResourceChangeFn = testDiffFn - ctx1 := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p1), - }, - }) - - plan1, diags := ctx1.Plan(m1, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state1, diags := ctx1.Apply(plan1, m1) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - m2 := testModule(t, "apply-output-add-after") - p2 := testProvider("aws") - p2.ApplyResourceChangeFn = testApplyFn - p2.PlanResourceChangeFn = testDiffFn - ctx2 := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p2), - }, - }) - - plan2, diags := ctx1.Plan(m2, state1, DefaultPlanOpts) - assertNoErrors(t, diags) - - state2, diags := ctx2.Apply(plan2, m2) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state2.String()) - expected := strings.TrimSpace(testTerraformApplyOutputAddStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_outputList(t *testing.T) { - m := testModule(t, "apply-output-list") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyOutputListStr) - if actual != expected { - t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) - } -} - -func TestContext2Apply_outputMulti(t *testing.T) { - m := testModule(t, "apply-output-multi") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyOutputMultiStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_outputMultiIndex(t *testing.T) { - m := testModule(t, "apply-output-multi-index") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyOutputMultiIndexStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_taintX(t *testing.T) { - m := testModule(t, "apply-taint") - p := testProvider("aws") - // destroyCount tests against regression of - // https://github.com/hashicorp/terraform/issues/1056 - var destroyCount = int32(0) - var once sync.Once - simulateProviderDelay := func() { - time.Sleep(10 * time.Millisecond) - } - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - once.Do(simulateProviderDelay) - if req.PlannedState.IsNull() { - atomic.AddInt32(&destroyCount, 1) - } - return testApplyFn(req) - } - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"baz","num": "2", "type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf("plan: %s", legacyDiffComparisonString(plan.Changes)) - } - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformApplyTaintStr) - if actual != expected { - t.Fatalf("bad:\n%s", actual) - } - - if destroyCount != 1 { - t.Fatalf("Expected 1 destroy, got %d", destroyCount) - } -} - -func TestContext2Apply_taintDep(t *testing.T) { - m := testModule(t, "apply-taint-dep") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"baz","num": "2", "type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","num": "2", "type": "aws_instance", "foo": "baz"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf("plan: %s", legacyDiffComparisonString(plan.Changes)) - } - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformApplyTaintDepStr) - if actual != expected { - t.Fatalf("bad:\n%s", actual) - } -} - -func TestContext2Apply_taintDepRequiresNew(t *testing.T) { - m := testModule(t, "apply-taint-dep-requires-new") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"baz","num": "2", "type": "aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","num": "2", "type": "aws_instance", "foo": "baz"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf("plan: %s", legacyDiffComparisonString(plan.Changes)) - } - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformApplyTaintDepRequireNewStr) - if actual != expected { - t.Fatalf("bad:\n%s", actual) - } -} - -func TestContext2Apply_targeted(t *testing.T) { - m := testModule(t, "apply-targeted") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) != 1 { - t.Fatalf("expected 1 resource, got: %#v", mod.Resources) - } - - checkStateString(t, state, ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - `) -} - -func TestContext2Apply_targetedCount(t *testing.T) { - m := testModule(t, "apply-targeted-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.foo.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.foo.2: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - `) -} - -func TestContext2Apply_targetedCountIndex(t *testing.T) { - m := testModule(t, "apply-targeted-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(1), - ), - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.foo.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - `) -} - -func TestContext2Apply_targetedDestroy(t *testing.T) { - m := testModule(t, "destroy-targeted") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetOutputValue("out", cty.StringVal("bar"), false) - - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-bcd345"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - if diags := ctx.Validate(m); diags.HasErrors() { - t.Fatalf("validate errors: %s", diags.Err()) - } - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "a", - ), - }, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) != 0 { - t.Fatalf("expected 0 resources, got: %#v", mod.Resources) - } - - // the root output should not get removed; only the targeted resource. - // - // Note: earlier versions of this test expected 0 outputs, but it turns out - // that was because Validate - not apply or destroy - removed the output - // (which depends on the targeted resource) from state. That version of this - // test did not match actual terraform behavior: the output remains in - // state. - // - // The reason it remains in the state is that we prune out the root module - // output values from the destroy graph as part of pruning out the "update" - // nodes for the resources, because otherwise the root module output values - // force the resources to stay in the graph and can therefore cause - // unwanted dependency cycles. - // - // TODO: Future refactoring may enable us to remove the output from state in - // this case, and that would be Just Fine - this test can be modified to - // expect 0 outputs. - if len(mod.OutputValues) != 1 { - t.Fatalf("expected 1 outputs, got: %#v", mod.OutputValues) - } - - // the module instance should remain - mod = state.Module(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - if len(mod.Resources) != 1 { - t.Fatalf("expected 1 resources, got: %#v", mod.Resources) - } -} - -func TestContext2Apply_targetedDestroyCountDeps(t *testing.T) { - m := testModule(t, "apply-destroy-targeted-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-bcd345"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ``) -} - -// https://github.com/hashicorp/terraform/issues/4462 -func TestContext2Apply_targetedDestroyModule(t *testing.T) { - m := testModule(t, "apply-targeted-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-bcd345"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-bcd345"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.bar: - ID = i-abc123 - provider = provider["registry.terraform.io/hashicorp/aws"] -aws_instance.foo: - ID = i-bcd345 - provider = provider["registry.terraform.io/hashicorp/aws"] - -module.child: - aws_instance.bar: - ID = i-abc123 - provider = provider["registry.terraform.io/hashicorp/aws"] - `) -} - -func TestContext2Apply_targetedDestroyCountIndex(t *testing.T) { - m := testModule(t, "apply-targeted-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - foo := &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-bcd345"}`), - } - bar := &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - foo, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - foo, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[2]").Resource, - foo, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[0]").Resource, - bar, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[1]").Resource, - bar, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[2]").Resource, - bar, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(2), - ), - addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "bar", addrs.IntKey(1), - ), - }, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.bar.0: - ID = i-abc123 - provider = provider["registry.terraform.io/hashicorp/aws"] -aws_instance.bar.2: - ID = i-abc123 - provider = provider["registry.terraform.io/hashicorp/aws"] -aws_instance.foo.0: - ID = i-bcd345 - provider = provider["registry.terraform.io/hashicorp/aws"] -aws_instance.foo.1: - ID = i-bcd345 - provider = provider["registry.terraform.io/hashicorp/aws"] - `) -} - -func TestContext2Apply_targetedModule(t *testing.T) { - m := testModule(t, "apply-targeted-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child", addrs.NoKey), - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.Module(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - if mod == nil { - t.Fatalf("no child module found in the state!\n\n%#v", state) - } - if len(mod.Resources) != 2 { - t.Fatalf("expected 2 resources, got: %#v", mod.Resources) - } - - checkStateString(t, state, ` - -module.child: - aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - `) -} - -// GH-1858 -func TestContext2Apply_targetedModuleDep(t *testing.T) { - m := testModule(t, "apply-targeted-module-dep") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf("Diff: %s", legacyDiffComparisonString(plan.Changes)) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance - - Dependencies: - module.child.aws_instance.mod - -module.child: - aws_instance.mod: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - - Outputs: - - output = foo - `) -} - -// GH-10911 untargeted outputs should not be in the graph, and therefore -// not execute. -func TestContext2Apply_targetedModuleUnrelatedOutputs(t *testing.T) { - m := testModule(t, "apply-targeted-module-unrelated-outputs") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - state := states.NewState() - _ = state.EnsureModule(addrs.RootModuleInstance.Child("child2", addrs.NoKey)) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child2", addrs.NoKey), - }, - }) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // - module.child1's instance_id output is dropped because we don't preserve - // non-root module outputs between runs (they can be recalculated from config) - // - module.child2's instance_id is updated because its dependency is updated - // - child2_id is updated because if its transitive dependency via module.child2 - checkStateString(t, s, ` - -Outputs: - -child2_id = foo - -module.child2: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - - Outputs: - - instance_id = foo -`) -} - -func TestContext2Apply_targetedModuleResource(t *testing.T) { - m := testModule(t, "apply-targeted-module-resource") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.Module(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - if mod == nil || len(mod.Resources) != 1 { - t.Fatalf("expected 1 resource, got: %#v", mod) - } - - checkStateString(t, state, ` - -module.child: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - `) -} - -func TestContext2Apply_targetedResourceOrphanModule(t *testing.T) { - m := testModule(t, "apply-targeted-resource-orphan-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("parent", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_unknownAttribute(t *testing.T) { - m := testModule(t, "apply-unknown") - p := testProvider("aws") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp = testDiffFn(req) - planned := resp.PlannedState.AsValueMap() - planned["unknown"] = cty.UnknownVal(cty.String) - resp.PlannedState = cty.ObjectVal(planned) - return resp - } - p.ApplyResourceChangeFn = testApplyFn - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "num": {Type: cty.Number, Optional: true}, - "unknown": {Type: cty.String, Computed: true}, - "type": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Error("should error, because attribute 'unknown' is still unknown after apply") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyUnknownAttrStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_unknownAttributeInterpolate(t *testing.T) { - m := testModule(t, "apply-unknown-interpolate") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - if _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts); diags == nil { - t.Fatal("should error") - } -} - -func TestContext2Apply_vars(t *testing.T) { - fixture := contextFixtureApplyVars(t) - opts := fixture.ContextOpts() - ctx := testContext2(t, opts) - m := fixture.Config - - diags := ctx.Validate(m) - if len(diags) != 0 { - t.Fatalf("bad: %s", diags.ErrWithWarnings()) - } - - variables := InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("us-east-1"), - SourceType: ValueFromCaller, - }, - "bar": &InputValue{ - // This one is not explicitly set but that's okay because it - // has a declared default, which Terraform Core will use instead. - Value: cty.NilVal, - SourceType: ValueFromCaller, - }, - "test_list": &InputValue{ - Value: cty.ListVal([]cty.Value{ - cty.StringVal("Hello"), - cty.StringVal("World"), - }), - SourceType: ValueFromCaller, - }, - "test_map": &InputValue{ - Value: cty.MapVal(map[string]cty.Value{ - "Hello": cty.StringVal("World"), - "Foo": cty.StringVal("Bar"), - "Baz": cty.StringVal("Foo"), - }), - SourceType: ValueFromCaller, - }, - "amis": &InputValue{ - Value: cty.MapVal(map[string]cty.Value{ - "us-east-1": cty.StringVal("override"), - }), - SourceType: ValueFromCaller, - }, - } - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: variables, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - got := strings.TrimSpace(state.String()) - want := strings.TrimSpace(testTerraformApplyVarsStr) - if got != want { - t.Errorf("wrong result\n\ngot:\n%s\n\nwant:\n%s", got, want) - } -} - -func TestContext2Apply_varsEnv(t *testing.T) { - fixture := contextFixtureApplyVarsEnv(t) - opts := fixture.ContextOpts() - ctx := testContext2(t, opts) - m := fixture.Config - - diags := ctx.Validate(m) - if len(diags) != 0 { - t.Fatalf("bad: %s", diags.ErrWithWarnings()) - } - - variables := InputValues{ - "string": &InputValue{ - Value: cty.StringVal("baz"), - SourceType: ValueFromEnvVar, - }, - "list": &InputValue{ - Value: cty.ListVal([]cty.Value{ - cty.StringVal("Hello"), - cty.StringVal("World"), - }), - SourceType: ValueFromEnvVar, - }, - "map": &InputValue{ - Value: cty.MapVal(map[string]cty.Value{ - "Hello": cty.StringVal("World"), - "Foo": cty.StringVal("Bar"), - "Baz": cty.StringVal("Foo"), - }), - SourceType: ValueFromEnvVar, - }, - } - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: variables, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyVarsEnvStr) - if actual != expected { - t.Errorf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_createBefore_depends(t *testing.T) { - m := testModule(t, "apply-depends-create-before") - h := new(HookRecordApplyOrder) - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "web", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","require_new":"ami-old"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "lb", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz","instance":"bar"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "web", - }, - Module: addrs.RootModule, - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("plan failed") - } else { - t.Logf("plan:\n%s", legacyDiffComparisonString(plan.Changes)) - } - - h.Active = true - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("apply failed") - } - - mod := state.RootModule() - if len(mod.Resources) < 2 { - t.Logf("state after apply:\n%s", state.String()) - t.Fatalf("only %d resources in root module; want at least 2", len(mod.Resources)) - } - - got := strings.TrimSpace(state.String()) - want := strings.TrimSpace(testTerraformApplyDependsCreateBeforeStr) - if got != want { - t.Fatalf("wrong final state\ngot:\n%s\n\nwant:\n%s", got, want) - } - - // Test that things were managed _in the right order_ - order := h.States - - diffs := h.Diffs - if !order[0].IsNull() || diffs[0].Action == plans.Delete { - t.Fatalf("should create new instance first: %#v", order) - } - - if order[1].GetAttr("id").AsString() != "baz" { - t.Fatalf("update must happen after create: %#v", order[1]) - } - - if order[2].GetAttr("id").AsString() != "bar" || diffs[2].Action != plans.Delete { - t.Fatalf("destroy must happen after update: %#v", order[2]) - } -} - -func TestContext2Apply_singleDestroy(t *testing.T) { - m := testModule(t, "apply-depends-create-before") - h := new(HookRecordApplyOrder) - p := testProvider("aws") - invokeCount := 0 - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - invokeCount++ - switch invokeCount { - case 1: - if req.PlannedState.IsNull() { - t.Fatalf("should not destroy") - } - if id := req.PlannedState.GetAttr("id"); id.IsKnown() { - t.Fatalf("should not have ID") - } - case 2: - if req.PlannedState.IsNull() { - t.Fatalf("should not destroy") - } - if id := req.PlannedState.GetAttr("id"); id.AsString() != "baz" { - t.Fatalf("should have id") - } - case 3: - if !req.PlannedState.IsNull() { - t.Fatalf("should destroy") - } - default: - t.Fatalf("bad invoke count %d", invokeCount) - } - return testApplyFn(req) - } - - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "web", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","require_new":"ami-old"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "lb", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz","instance":"bar"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "web", - }, - Module: addrs.RootModule, - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - h.Active = true - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - if invokeCount != 3 { - t.Fatalf("bad: %d", invokeCount) - } -} - -// GH-7824 -func TestContext2Apply_issue7824(t *testing.T) { - p := testProvider("template") - p.PlanResourceChangeFn = testDiffFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "template_file": { - Attributes: map[string]*configschema.Attribute{ - "template": {Type: cty.String, Optional: true}, - "__template_requires_new": {Type: cty.Bool, Optional: true}, - }, - }, - }, - }) - - m, snap := testModuleWithSnapshot(t, "issue-7824") - - // Apply cleanly step 0 - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - // Write / Read plan to simulate running it through a Plan file - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = - map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), - } - - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } -} - -// This deals with the situation where a splat expression is used referring -// to another resource whose count is non-constant. -func TestContext2Apply_issue5254(t *testing.T) { - // Create a provider. We use "template" here just to match the repro - // we got from the issue itself. - p := testProvider("template") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "template_file": { - Attributes: map[string]*configschema.Attribute{ - "template": {Type: cty.String, Optional: true}, - "__template_requires_new": {Type: cty.Bool, Optional: true}, - "id": {Type: cty.String, Computed: true}, - "type": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - // Apply cleanly step 0 - m := testModule(t, "issue-5254/step-0") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - m, snap := testModuleWithSnapshot(t, "issue-5254/step-1") - - // Application success. Now make the modification and store a plan - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - // Write / Read plan to simulate running it through a Plan file - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), - } - - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(` -template_file.child: - ID = foo - provider = provider["registry.terraform.io/hashicorp/template"] - __template_requires_new = true - template = Hi - type = template_file - - Dependencies: - template_file.parent -template_file.parent.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/template"] - template = Hi - type = template_file -`) - if actual != expected { - t.Fatalf("wrong final state\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Apply_targetedWithTaintedInState(t *testing.T) { - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - m, snap := testModuleWithSnapshot(t, "apply-tainted-targets") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.ifailedprovisioners").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"ifailedprovisioners"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "iambeingadded", - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - // Write / Read plan to simulate running it through a Plan file - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - } - - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(` -aws_instance.iambeingadded: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.ifailedprovisioners: (tainted) - ID = ifailedprovisioners - provider = provider["registry.terraform.io/hashicorp/aws"] - `) - if actual != expected { - t.Fatalf("expected state: \n%s\ngot: \n%s", expected, actual) - } -} - -// Higher level test exposing the bug this covers in -// TestResource_ignoreChangesRequired -func TestContext2Apply_ignoreChangesCreate(t *testing.T) { - m := testModule(t, "apply-ignore-changes-create") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - instanceSchema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - instanceSchema.Attributes["required_field"] = &configschema.Attribute{ - Type: cty.String, - Required: true, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - mod := state.RootModule() - if len(mod.Resources) != 1 { - t.Fatalf("bad: %s", state) - } - - actual := strings.TrimSpace(state.String()) - // Expect no changes from original state - expected := strings.TrimSpace(` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - required_field = set - type = aws_instance -`) - if actual != expected { - t.Fatalf("expected:\n%s\ngot:\n%s", expected, actual) - } -} - -func TestContext2Apply_ignoreChangesWithDep(t *testing.T) { - m := testModule(t, "apply-ignore-changes-dep") - p := testProvider("aws") - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - - switch req.TypeName { - case "aws_instance": - resp.RequiresReplace = append(resp.RequiresReplace, cty.Path{cty.GetAttrStep{Name: "ami"}}) - case "aws_eip": - return testDiffFn(req) - default: - t.Fatalf("Unexpected type: %s", req.TypeName) - } - return - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123","ami":"ami-abcd1234"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-bcd234","ami":"i-bcd234"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_eip.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"eip-abc123","instance":"i-abc123"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }, - Module: addrs.RootModule, - }, - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_eip.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"eip-bcd234","instance":"i-bcd234"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }, - Module: addrs.RootModule, - }, - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state.DeepCopy(), DefaultPlanOpts) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(state.String()) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestContext2Apply_ignoreChangesAll(t *testing.T) { - m := testModule(t, "apply-ignore-changes-all") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - instanceSchema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - instanceSchema.Attributes["required_field"] = &configschema.Attribute{ - Type: cty.String, - Required: true, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("plan failed") - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - mod := state.RootModule() - if len(mod.Resources) != 1 { - t.Fatalf("bad: %s", state) - } - - actual := strings.TrimSpace(state.String()) - // Expect no changes from original state - expected := strings.TrimSpace(` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - required_field = set - type = aws_instance -`) - if actual != expected { - t.Fatalf("expected:\n%s\ngot:\n%s", expected, actual) - } -} - -// https://github.com/hashicorp/terraform/issues/7378 -func TestContext2Apply_destroyNestedModuleWithAttrsReferencingResource(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "apply-destroy-nested-module-with-attrs") - p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn - - var state *states.State - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - // First plan and apply a create operation - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply err: %s", diags.Err()) - } - } - - { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("destroy plan err: %s", diags.Err()) - } - - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - } - - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("destroy apply err: %s", diags.Err()) - } - } - - if !state.Empty() { - t.Fatalf("state after apply: %s\nwant empty state", spew.Sdump(state)) - } -} - -// If a data source explicitly depends on another resource, it's because we need -// that resource to be applied first. -func TestContext2Apply_dataDependsOn(t *testing.T) { - p := testProvider("null") - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "null_instance" "write" { - foo = "attribute" -} - -data "null_data_source" "read" { - count = 1 - depends_on = ["null_instance.write"] -} - -resource "null_instance" "depends" { - foo = data.null_data_source.read[0].foo -} -`}) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - // the "provisioner" here writes to this variable, because the intent is to - // create a dependency which can't be viewed through the graph, and depends - // solely on the configuration providing "depends_on" - provisionerOutput := "" - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - // the side effect of the resource being applied - provisionerOutput = "APPLIED" - return testApplyFn(req) - } - - p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("boop"), - "foo": cty.StringVal(provisionerOutput), - }), - } - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - root := state.Module(addrs.RootModuleInstance) - is := root.ResourceInstance(addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "null_data_source", - Name: "read", - }.Instance(addrs.IntKey(0))) - if is == nil { - t.Fatal("data resource instance is not present in state; should be") - } - var attrs map[string]interface{} - err := json.Unmarshal(is.Current.AttrsJSON, &attrs) - if err != nil { - t.Fatal(err) - } - actual := attrs["foo"] - expected := "APPLIED" - if actual != expected { - t.Fatalf("bad:\n%s", strings.TrimSpace(state.String())) - } - - // run another plan to make sure the data source doesn't show as a change - plan, diags = ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - for _, c := range plan.Changes.Resources { - if c.Action != plans.NoOp { - t.Fatalf("unexpected change for %s", c.Addr) - } - } - - // now we cause a change in the first resource, which should trigger a plan - // in the data source, and the resource that depends on the data source - // must plan a change as well. - m = testModuleInline(t, map[string]string{ - "main.tf": ` -resource "null_instance" "write" { - foo = "new" -} - -data "null_data_source" "read" { - depends_on = ["null_instance.write"] -} - -resource "null_instance" "depends" { - foo = data.null_data_source.read.foo -} -`}) - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - // the side effect of the resource being applied - provisionerOutput = "APPLIED_AGAIN" - return testApplyFn(req) - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - expectedChanges := map[string]plans.Action{ - "null_instance.write": plans.Update, - "data.null_data_source.read": plans.Read, - "null_instance.depends": plans.Update, - } - - for _, c := range plan.Changes.Resources { - if c.Action != expectedChanges[c.Addr.String()] { - t.Errorf("unexpected %s for %s", c.Action, c.Addr) - } - } -} - -func TestContext2Apply_terraformWorkspace(t *testing.T) { - m := testModule(t, "apply-terraform-workspace") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Meta: &ContextMeta{Env: "foo"}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - actual := state.RootModule().OutputValues["output"] - expected := cty.StringVal("foo") - if actual == nil || actual.Value != expected { - t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) - } -} - -// verify that multiple config references only create a single depends_on entry -func TestContext2Apply_multiRef(t *testing.T) { - m := testModule(t, "apply-multi-ref") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - deps := state.Modules[""].Resources["aws_instance.other"].Instances[addrs.NoKey].Current.Dependencies - if len(deps) != 1 || deps[0].String() != "aws_instance.create" { - t.Fatalf("expected 1 depends_on entry for aws_instance.create, got %q", deps) - } -} - -func TestContext2Apply_targetedModuleRecursive(t *testing.T) { - m := testModule(t, "apply-targeted-module-recursive") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child", addrs.NoKey), - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - mod := state.Module( - addrs.RootModuleInstance.Child("child", addrs.NoKey).Child("subchild", addrs.NoKey), - ) - if mod == nil { - t.Fatalf("no subchild module found in the state!\n\n%#v", state) - } - if len(mod.Resources) != 1 { - t.Fatalf("expected 1 resources, got: %#v", mod.Resources) - } - - checkStateString(t, state, ` - -module.child.subchild: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - `) -} - -func TestContext2Apply_localVal(t *testing.T) { - m := testModule(t, "apply-local-val") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{}, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("error during apply: %s", diags.Err()) - } - - got := strings.TrimSpace(state.String()) - want := strings.TrimSpace(` - -Outputs: - -result_1 = hello -result_3 = hello world -`) - if got != want { - t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s", got, want) - } -} - -func TestContext2Apply_destroyWithLocals(t *testing.T) { - m := testModule(t, "apply-destroy-with-locals") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetOutputValue("name", cty.StringVal("test-bar"), false) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - s, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("error during apply: %s", diags.Err()) - } - - got := strings.TrimSpace(s.String()) - want := strings.TrimSpace(``) - if got != want { - t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s", got, want) - } -} - -func TestContext2Apply_providerWithLocals(t *testing.T) { - m := testModule(t, "provider-with-locals") - p := testProvider("aws") - - providerRegion := "" - // this should not be overridden during destroy - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - val := req.Config.GetAttr("region") - if !val.IsNull() { - providerRegion = val.AsString() - } - - return - } - - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - if state.HasManagedResourceInstanceObjects() { - t.Fatal("expected no state, got:", state) - } - - if providerRegion != "bar" { - t.Fatalf("expected region %q, got: %q", "bar", providerRegion) - } -} - -func TestContext2Apply_destroyWithProviders(t *testing.T) { - m := testModule(t, "destroy-module-with-provider") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - removed := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey).Child("removed", addrs.NoKey)) - removed.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.child").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"].baz`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // test that we can't destroy if the provider is missing - if _, diags := ctx.Plan(m, state, &PlanOpts{Mode: plans.DestroyMode}); diags == nil { - t.Fatal("expected plan error, provider.aws.baz doesn't exist") - } - - // correct the state - state.Modules["module.mod.module.removed"].Resources["aws_instance.child"].ProviderConfig = mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"].bar`) - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("error during apply: %s", diags.Err()) - } - - got := strings.TrimSpace(state.String()) - - want := strings.TrimSpace("") - if got != want { - t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s", got, want) - } -} - -func TestContext2Apply_providersFromState(t *testing.T) { - m := configs.NewEmptyConfig() - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - implicitProviderState := states.NewState() - impRoot := implicitProviderState.EnsureModule(addrs.RootModuleInstance) - impRoot.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - aliasedProviderState := states.NewState() - aliasRoot := aliasedProviderState.EnsureModule(addrs.RootModuleInstance) - aliasRoot.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"].bar`), - ) - - moduleProviderState := states.NewState() - moduleProviderRoot := moduleProviderState.EnsureModule(addrs.RootModuleInstance) - moduleProviderRoot.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`module.child.provider["registry.terraform.io/hashicorp/aws"]`), - ) - - for _, tc := range []struct { - name string - state *states.State - output string - err bool - }{ - { - name: "add implicit provider", - state: implicitProviderState, - err: false, - output: "", - }, - - // an aliased provider must be in the config to remove a resource - { - name: "add aliased provider", - state: aliasedProviderState, - err: true, - }, - - // a provider in a module implies some sort of config, so this isn't - // allowed even without an alias - { - name: "add unaliased module provider", - state: moduleProviderState, - err: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, tc.state, DefaultPlanOpts) - if tc.err { - if diags == nil { - t.Fatal("expected error") - } else { - return - } - } - if !tc.err && diags.HasErrors() { - t.Fatal(diags.Err()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - checkStateString(t, state, "") - - }) - } -} - -func TestContext2Apply_plannedInterpolatedCount(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "apply-interpolated-count") - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - Providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.test").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: Providers, - }) - - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("plan failed: %s", diags.Err()) - } - - // We'll marshal and unmarshal the plan here, to ensure that we have - // a clean new context as would be created if we separately ran - // terraform plan -out=tfplan && terraform apply tfplan - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = Providers - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - // Applying the plan should now succeed - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply failed: %s", diags.Err()) - } -} - -func TestContext2Apply_plannedDestroyInterpolatedCount(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "plan-destroy-interpolated-count") - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetOutputValue("out", cty.ListVal([]cty.Value{cty.StringVal("foo"), cty.StringVal("foo")}), false) - - ctx := testContext2(t, &ContextOpts{ - Providers: providers, - }) - - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.DestroyMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("plan failed: %s", diags.Err()) - } - - // We'll marshal and unmarshal the plan here, to ensure that we have - // a clean new context as would be created if we separately ran - // terraform plan -out=tfplan && terraform apply tfplan - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } - - ctxOpts.Providers = providers - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - // Applying the plan should now succeed - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply failed: %s", diags.Err()) - } - if !state.Empty() { - t.Fatalf("state not empty: %s\n", state) - } -} - -func TestContext2Apply_scaleInMultivarRef(t *testing.T) { - m := testModule(t, "apply-resource-scale-in") - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - Providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.one").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.two").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: Providers, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "instance_count": { - Value: cty.NumberIntVal(0), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - { - addr := mustResourceInstanceAddr("aws_instance.one[0]") - change := plan.Changes.ResourceInstance(addr) - if change == nil { - t.Fatalf("no planned change for %s", addr) - } - // This test was originally written with Terraform v0.11 and earlier - // in mind, so it declares a no-key instance of aws_instance.one, - // but its configuration sets count (to zero) and so we end up first - // moving the no-key instance to the zero key and then planning to - // destroy the zero key. - if got, want := change.PrevRunAddr, mustResourceInstanceAddr("aws_instance.one"); !want.Equal(got) { - t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) - } - if got, want := change.Action, plans.Delete; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseCountIndex; got != want { - t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) - } - } - { - addr := mustResourceInstanceAddr("aws_instance.two") - change := plan.Changes.ResourceInstance(addr) - if change == nil { - t.Fatalf("no planned change for %s", addr) - } - if got, want := change.PrevRunAddr, mustResourceInstanceAddr("aws_instance.two"); !want.Equal(got) { - t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) - } - if got, want := change.Action, plans.Update; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) - } - } - - // Applying the plan should now succeed - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) -} - -func TestContext2Apply_inconsistentWithPlan(t *testing.T) { - m := testModule(t, "apply-inconsistent-with-plan") - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("before"), - }), - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - return providers.ApplyResourceChangeResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - // This is intentionally incorrect: because id was fixed at "before" - // during plan, it must not change during apply. - "id": cty.StringVal("after"), - }), - } - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatalf("apply succeeded; want error") - } - if got, want := diags.Err().Error(), "Provider produced inconsistent result after apply"; !strings.Contains(got, want) { - t.Fatalf("wrong error\ngot: %s\nshould contain: %s", got, want) - } -} - -// Issue 19908 was about retaining an existing object in the state when an -// update to it fails and the provider does not return a partially-updated -// value for it. Previously we were incorrectly removing it from the state -// in that case, but instead it should be retained so the update can be -// retried. -func TestContext2Apply_issue19908(t *testing.T) { - m := testModule(t, "apply-issue19908") - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test": { - Attributes: map[string]*configschema.Attribute{ - "baz": {Type: cty.String, Required: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - var diags tfdiags.Diagnostics - diags = diags.Append(fmt.Errorf("update failed")) - return providers.ApplyResourceChangeResponse{ - Diagnostics: diags, - } - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"baz":"old"}`), - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatalf("apply succeeded; want error") - } - if got, want := diags.Err().Error(), "update failed"; !strings.Contains(got, want) { - t.Fatalf("wrong error\ngot: %s\nshould contain: %s", got, want) - } - - mod := state.RootModule() - rs := mod.Resources["test.foo"] - if rs == nil { - t.Fatalf("test.foo not in state after apply, but should be") - } - is := rs.Instances[addrs.NoKey] - if is == nil { - t.Fatalf("test.foo not in state after apply, but should be") - } - obj := is.Current - if obj == nil { - t.Fatalf("test.foo has no current object in state after apply, but should do") - } - - if got, want := obj.Status, states.ObjectReady; got != want { - t.Errorf("test.foo has wrong status %s after apply; want %s", got, want) - } - if got, want := obj.AttrsJSON, []byte(`"old"`); !bytes.Contains(got, want) { - t.Errorf("test.foo attributes JSON doesn't contain %s after apply\ngot: %s", want, got) - } -} - -func TestContext2Apply_invalidIndexRef(t *testing.T) { - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true, Computed: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = testDiffFn - - m := testModule(t, "apply-invalid-index") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected validation failure: %s", diags.Err()) - } - - wantErr := `The given key does not identify an element in this collection value` - _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) - - if !diags.HasErrors() { - t.Fatalf("plan succeeded; want error") - } - gotErr := diags.Err().Error() - - if !strings.Contains(gotErr, wantErr) { - t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErr, wantErr) - } -} - -func TestContext2Apply_moduleReplaceCycle(t *testing.T) { - for _, mode := range []string{"normal", "cbd"} { - var m *configs.Config - - switch mode { - case "normal": - m = testModule(t, "apply-module-replace-cycle") - case "cbd": - m = testModule(t, "apply-module-replace-cycle-cbd") - } - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - instanceSchema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "require_new": {Type: cty.String, Optional: true}, - }, - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": instanceSchema, - }, - }) - - state := states.NewState() - modA := state.EnsureModule(addrs.RootModuleInstance.Child("a", addrs.NoKey)) - modA.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "a", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a","require_new":"old"}`), - CreateBeforeDestroy: mode == "cbd", - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - modB := state.EnsureModule(addrs.RootModuleInstance.Child("b", addrs.NoKey)) - modB.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "b", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b","require_new":"old"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - aBefore, _ := plans.NewDynamicValue( - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("a"), - "require_new": cty.StringVal("old"), - }), instanceSchema.ImpliedType()) - aAfter, _ := plans.NewDynamicValue( - cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "require_new": cty.StringVal("new"), - }), instanceSchema.ImpliedType()) - bBefore, _ := plans.NewDynamicValue( - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("b"), - "require_new": cty.StringVal("old"), - }), instanceSchema.ImpliedType()) - bAfter, _ := plans.NewDynamicValue( - cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "require_new": cty.UnknownVal(cty.String), - }), instanceSchema.ImpliedType()) - - var aAction plans.Action - switch mode { - case "normal": - aAction = plans.DeleteThenCreate - case "cbd": - aAction = plans.CreateThenDelete - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "a", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("a", addrs.NoKey)), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: aAction, - Before: aBefore, - After: aAfter, - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "b", - }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance.Child("b", addrs.NoKey)), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: plans.DeleteThenCreate, - Before: bBefore, - After: bAfter, - }, - }, - }, - } - - plan := &plans.Plan{ - UIMode: plans.NormalMode, - Changes: changes, - PriorState: state.DeepCopy(), - PrevRunState: state.DeepCopy(), - } - - t.Run(mode, func(t *testing.T) { - _, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - }) - } -} - -func TestContext2Apply_destroyDataCycle(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "apply-destroy-data-cycle") - p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("new"), - "foo": cty.NullVal(cty.String), - }), - } - } - - tp := testProvider("test") - tp.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "a", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("null"), - Module: addrs.RootModule, - }, - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "a", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "null_data_source", - Name: "d", - }, - Module: addrs.RootModule, - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "null_data_source", - Name: "d", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"old"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("null"), - Module: addrs.RootModule, - }, - ) - - Providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("test"): testProviderFuncFixed(tp), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: Providers, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - diags.HasErrors() - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // We'll marshal and unmarshal the plan here, to ensure that we have - // a clean new context as would be created if we separately ran - // terraform plan -out=tfplan && terraform apply tfplan - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatal(err) - } - ctxOpts.Providers = Providers - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("failed to create context for plan: %s", diags.Err()) - } - - tp.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - foo := req.Config.GetAttr("foo") - if !foo.IsKnown() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown config value foo")) - return resp - } - - if foo.AsString() != "new" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("wrong config value: %q", foo.AsString())) - } - return resp - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } -} - -func TestContext2Apply_taintedDestroyFailure(t *testing.T) { - m := testModule(t, "apply-destroy-tainted") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - // All destroys fail. - if req.PlannedState.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("failure")) - return - } - - // c will also fail to create, meaning the existing tainted instance - // becomes deposed, ans is then promoted back to current. - // only C has a foo attribute - planned := req.PlannedState.AsValueMap() - foo, ok := planned["foo"] - if ok && !foo.IsNull() && foo.AsString() == "c" { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("failure")) - return - } - - return testApplyFn(req) - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "a", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"a","foo":"a"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "b", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"b","foo":"b"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "c", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"c","foo":"old"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - Providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: Providers, - Hooks: []Hook{&testHook{}}, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - diags.HasErrors() - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("expected error") - } - - root = state.Module(addrs.RootModuleInstance) - - // the instance that failed to destroy should remain tainted - a := root.ResourceInstance(addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "a", - }.Instance(addrs.NoKey)) - - if a.Current.Status != states.ObjectTainted { - t.Fatal("test_instance.a should be tainted") - } - - // b is create_before_destroy, and the destroy failed, so there should be 1 - // deposed instance. - b := root.ResourceInstance(addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "b", - }.Instance(addrs.NoKey)) - - if b.Current.Status != states.ObjectReady { - t.Fatal("test_instance.b should be Ready") - } - - if len(b.Deposed) != 1 { - t.Fatal("test_instance.b failed to keep deposed instance") - } - - // the desposed c instance should be promoted back to Current, and remain - // tainted - c := root.ResourceInstance(addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "c", - }.Instance(addrs.NoKey)) - - if c.Current == nil { - t.Fatal("test_instance.c has no current instance, but it should") - } - - if c.Current.Status != states.ObjectTainted { - t.Fatal("test_instance.c should be tainted") - } - - if len(c.Deposed) != 0 { - t.Fatal("test_instance.c should have no deposed instances") - } - - if string(c.Current.AttrsJSON) != `{"foo":"old","id":"c"}` { - t.Fatalf("unexpected attrs for c: %q\n", c.Current.AttrsJSON) - } -} - -func TestContext2Apply_plannedConnectionRefs(t *testing.T) { - m := testModule(t, "apply-plan-connection-refs") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - s := req.PlannedState.AsValueMap() - // delay "a" slightly, so if the reference edge is missing the "b" - // provisioner will see an unknown value. - if s["foo"].AsString() == "a" { - time.Sleep(500 * time.Millisecond) - } - - s["id"] = cty.StringVal("ID") - if ty, ok := s["type"]; ok && !ty.IsKnown() { - s["type"] = cty.StringVal(req.TypeName) - } - resp.NewState = cty.ObjectVal(s) - return resp - } - - provisionerFactory := func() (provisioners.Interface, error) { - pr := testProvisioner() - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - host := req.Connection.GetAttr("host") - if host.IsNull() || !host.IsKnown() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("invalid host value: %#v", host)) - } - - return resp - } - return pr, nil - } - - Providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - } - - provisioners := map[string]provisioners.Factory{ - "shell": provisionerFactory, - } - - hook := &testHook{} - ctx := testContext2(t, &ContextOpts{ - Providers: Providers, - Provisioners: provisioners, - Hooks: []Hook{hook}, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - diags.HasErrors() - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } -} - -func TestContext2Apply_cbdCycle(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "apply-cbd-cycle") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "a", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a","require_new":"old","foo":"b"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "b", - }, - Module: addrs.RootModule, - }, - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "c", - }, - Module: addrs.RootModule, - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "b", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b","require_new":"old","foo":"c"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "c", - }, - Module: addrs.RootModule, - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "c", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"c","require_new":"old"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - Providers := map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - } - - hook := &testHook{} - ctx := testContext2(t, &ContextOpts{ - Providers: Providers, - Hooks: []Hook{hook}, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - diags.HasErrors() - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - // We'll marshal and unmarshal the plan here, to ensure that we have - // a clean new context as would be created if we separately ran - // terraform plan -out=tfplan && terraform apply tfplan - ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatal(err) - } - ctxOpts.Providers = Providers - ctx, diags = NewContext(ctxOpts) - if diags.HasErrors() { - t.Fatalf("failed to create context for plan: %s", diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } -} - -func TestContext2Apply_ProviderMeta_apply_set(t *testing.T) { - m := testModule(t, "provider-meta-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - - var pmMu sync.Mutex - arcPMs := map[string]cty.Value{} - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - pmMu.Lock() - defer pmMu.Unlock() - arcPMs[req.TypeName] = req.ProviderMeta - - s := req.PlannedState.AsValueMap() - s["id"] = cty.StringVal("ID") - if ty, ok := s["type"]; ok && !ty.IsKnown() { - s["type"] = cty.StringVal(req.TypeName) - } - return providers.ApplyResourceChangeResponse{ - NewState: cty.ObjectVal(s), - } - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) - - if !p.ApplyResourceChangeCalled { - t.Fatalf("ApplyResourceChange not called") - } - - expectations := map[string]cty.Value{} - - if pm, ok := arcPMs["test_resource"]; !ok { - t.Fatalf("sub-module ApplyResourceChange not called") - } else if pm.IsNull() { - t.Fatalf("null ProviderMeta in sub-module ApplyResourceChange") - } else { - expectations["quux-submodule"] = pm - } - - if pm, ok := arcPMs["test_instance"]; !ok { - t.Fatalf("root module ApplyResourceChange not called") - } else if pm.IsNull() { - t.Fatalf("null ProviderMeta in root module ApplyResourceChange") - } else { - expectations["quux"] = pm - } - - type metaStruct struct { - Baz string `cty:"baz"` - } - - for expected, v := range expectations { - var meta metaStruct - err := gocty.FromCtyValue(v, &meta) - if err != nil { - t.Fatalf("Error parsing cty value: %s", err) - } - if meta.Baz != expected { - t.Fatalf("Expected meta.Baz to be %q, got %q", expected, meta.Baz) - } - } -} - -func TestContext2Apply_ProviderMeta_apply_unset(t *testing.T) { - m := testModule(t, "provider-meta-unset") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - var pmMu sync.Mutex - arcPMs := map[string]cty.Value{} - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - pmMu.Lock() - defer pmMu.Unlock() - arcPMs[req.TypeName] = req.ProviderMeta - - s := req.PlannedState.AsValueMap() - s["id"] = cty.StringVal("ID") - if ty, ok := s["type"]; ok && !ty.IsKnown() { - s["type"] = cty.StringVal(req.TypeName) - } - return providers.ApplyResourceChangeResponse{ - NewState: cty.ObjectVal(s), - } - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) - - if !p.ApplyResourceChangeCalled { - t.Fatalf("ApplyResourceChange not called") - } - - if pm, ok := arcPMs["test_resource"]; !ok { - t.Fatalf("sub-module ApplyResourceChange not called") - } else if !pm.IsNull() { - t.Fatalf("non-null ProviderMeta in sub-module ApplyResourceChange: %+v", pm) - } - - if pm, ok := arcPMs["test_instance"]; !ok { - t.Fatalf("root module ApplyResourceChange not called") - } else if !pm.IsNull() { - t.Fatalf("non-null ProviderMeta in root module ApplyResourceChange: %+v", pm) - } -} - -func TestContext2Apply_ProviderMeta_plan_set(t *testing.T) { - m := testModule(t, "provider-meta-set") - p := testProvider("test") - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - prcPMs := map[string]cty.Value{} - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - prcPMs[req.TypeName] = req.ProviderMeta - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if !p.PlanResourceChangeCalled { - t.Fatalf("PlanResourceChange not called") - } - - expectations := map[string]cty.Value{} - - if pm, ok := prcPMs["test_resource"]; !ok { - t.Fatalf("sub-module PlanResourceChange not called") - } else if pm.IsNull() { - t.Fatalf("null ProviderMeta in sub-module PlanResourceChange") - } else { - expectations["quux-submodule"] = pm - } - - if pm, ok := prcPMs["test_instance"]; !ok { - t.Fatalf("root module PlanResourceChange not called") - } else if pm.IsNull() { - t.Fatalf("null ProviderMeta in root module PlanResourceChange") - } else { - expectations["quux"] = pm - } - - type metaStruct struct { - Baz string `cty:"baz"` - } - - for expected, v := range expectations { - var meta metaStruct - err := gocty.FromCtyValue(v, &meta) - if err != nil { - t.Fatalf("Error parsing cty value: %s", err) - } - if meta.Baz != expected { - t.Fatalf("Expected meta.Baz to be %q, got %q", expected, meta.Baz) - } - } -} - -func TestContext2Apply_ProviderMeta_plan_unset(t *testing.T) { - m := testModule(t, "provider-meta-unset") - p := testProvider("test") - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - prcPMs := map[string]cty.Value{} - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - prcPMs[req.TypeName] = req.ProviderMeta - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if !p.PlanResourceChangeCalled { - t.Fatalf("PlanResourceChange not called") - } - - if pm, ok := prcPMs["test_resource"]; !ok { - t.Fatalf("sub-module PlanResourceChange not called") - } else if !pm.IsNull() { - t.Fatalf("non-null ProviderMeta in sub-module PlanResourceChange: %+v", pm) - } - - if pm, ok := prcPMs["test_instance"]; !ok { - t.Fatalf("root module PlanResourceChange not called") - } else if !pm.IsNull() { - t.Fatalf("non-null ProviderMeta in root module PlanResourceChange: %+v", pm) - } -} - -func TestContext2Apply_ProviderMeta_plan_setNoSchema(t *testing.T) { - m := testModule(t, "provider-meta-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("plan supposed to error, has no errors") - } - - var rootErr, subErr bool - errorSummary := "The resource test_%s.bar belongs to a provider that doesn't support provider_meta blocks" - for _, diag := range diags { - if diag.Description().Summary != "Provider registry.terraform.io/hashicorp/test doesn't support provider_meta" { - t.Errorf("Unexpected error: %+v", diag.Description()) - } - switch diag.Description().Detail { - case fmt.Sprintf(errorSummary, "instance"): - rootErr = true - case fmt.Sprintf(errorSummary, "resource"): - subErr = true - default: - t.Errorf("Unexpected error: %s", diag.Description()) - } - } - if !rootErr { - t.Errorf("Expected unsupported provider_meta block error for root module, none received") - } - if !subErr { - t.Errorf("Expected unsupported provider_meta block error for sub-module, none received") - } -} - -func TestContext2Apply_ProviderMeta_plan_setInvalid(t *testing.T) { - m := testModule(t, "provider-meta-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "quux": { - Type: cty.String, - Required: true, - }, - }, - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("plan supposed to error, has no errors") - } - - var reqErr, invalidErr bool - for _, diag := range diags { - switch diag.Description().Summary { - case "Missing required argument": - if diag.Description().Detail == `The argument "quux" is required, but no definition was found.` { - reqErr = true - } else { - t.Errorf("Unexpected error %+v", diag.Description()) - } - case "Unsupported argument": - if diag.Description().Detail == `An argument named "baz" is not expected here.` { - invalidErr = true - } else { - t.Errorf("Unexpected error %+v", diag.Description()) - } - default: - t.Errorf("Unexpected error %+v", diag.Description()) - } - } - if !reqErr { - t.Errorf("Expected missing required argument error, none received") - } - if !invalidErr { - t.Errorf("Expected unsupported argument error, none received") - } -} - -func TestContext2Apply_ProviderMeta_refresh_set(t *testing.T) { - m := testModule(t, "provider-meta-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - rrcPMs := map[string]cty.Value{} - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - rrcPMs[req.TypeName] = req.ProviderMeta - newState, err := p.GetProviderSchemaResponse.ResourceTypes[req.TypeName].Block.CoerceValue(req.PriorState) - if err != nil { - panic(err) - } - resp.NewState = newState - return resp - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - _, diags = ctx.Refresh(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - if !p.ReadResourceCalled { - t.Fatalf("ReadResource not called") - } - - expectations := map[string]cty.Value{} - - if pm, ok := rrcPMs["test_resource"]; !ok { - t.Fatalf("sub-module ReadResource not called") - } else if pm.IsNull() { - t.Fatalf("null ProviderMeta in sub-module ReadResource") - } else { - expectations["quux-submodule"] = pm - } - - if pm, ok := rrcPMs["test_instance"]; !ok { - t.Fatalf("root module ReadResource not called") - } else if pm.IsNull() { - t.Fatalf("null ProviderMeta in root module ReadResource") - } else { - expectations["quux"] = pm - } - - type metaStruct struct { - Baz string `cty:"baz"` - } - - for expected, v := range expectations { - var meta metaStruct - err := gocty.FromCtyValue(v, &meta) - if err != nil { - t.Fatalf("Error parsing cty value: %s", err) - } - if meta.Baz != expected { - t.Fatalf("Expected meta.Baz to be %q, got %q", expected, meta.Baz) - } - } -} - -func TestContext2Apply_ProviderMeta_refresh_setNoSchema(t *testing.T) { - m := testModule(t, "provider-meta-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - // we need a schema for plan/apply so they don't error - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // drop the schema before refresh, to test that it errors - schema.ProviderMeta = nil - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags = ctx.Refresh(m, state, DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("refresh supposed to error, has no errors") - } - - var rootErr, subErr bool - errorSummary := "The resource test_%s.bar belongs to a provider that doesn't support provider_meta blocks" - for _, diag := range diags { - if diag.Description().Summary != "Provider registry.terraform.io/hashicorp/test doesn't support provider_meta" { - t.Errorf("Unexpected error: %+v", diag.Description()) - } - switch diag.Description().Detail { - case fmt.Sprintf(errorSummary, "instance"): - rootErr = true - case fmt.Sprintf(errorSummary, "resource"): - subErr = true - default: - t.Errorf("Unexpected error: %s", diag.Description()) - } - } - if !rootErr { - t.Errorf("Expected unsupported provider_meta block error for root module, none received") - } - if !subErr { - t.Errorf("Expected unsupported provider_meta block error for sub-module, none received") - } -} - -func TestContext2Apply_ProviderMeta_refresh_setInvalid(t *testing.T) { - m := testModule(t, "provider-meta-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - // we need a matching schema for plan/apply so they don't error - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // change the schema before refresh, to test that it errors - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "quux": { - Type: cty.String, - Required: true, - }, - }, - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags = ctx.Refresh(m, state, DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("refresh supposed to error, has no errors") - } - - var reqErr, invalidErr bool - for _, diag := range diags { - switch diag.Description().Summary { - case "Missing required argument": - if diag.Description().Detail == `The argument "quux" is required, but no definition was found.` { - reqErr = true - } else { - t.Errorf("Unexpected error %+v", diag.Description()) - } - case "Unsupported argument": - if diag.Description().Detail == `An argument named "baz" is not expected here.` { - invalidErr = true - } else { - t.Errorf("Unexpected error %+v", diag.Description()) - } - default: - t.Errorf("Unexpected error %+v", diag.Description()) - } - } - if !reqErr { - t.Errorf("Expected missing required argument error, none received") - } - if !invalidErr { - t.Errorf("Expected unsupported argument error, none received") - } -} - -func TestContext2Apply_ProviderMeta_refreshdata_set(t *testing.T) { - m := testModule(t, "provider-meta-data-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - rdsPMs := map[string]cty.Value{} - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - rdsPMs[req.TypeName] = req.ProviderMeta - switch req.TypeName { - case "test_data_source": - log.Printf("[TRACE] test_data_source RDSR returning") - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yo"), - "foo": cty.StringVal("bar"), - }), - } - case "test_file": - log.Printf("[TRACE] test_file RDSR returning") - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "rendered": cty.StringVal("baz"), - "template": cty.StringVal(""), - }), - } - default: - // config drift, oops - log.Printf("[TRACE] unknown request TypeName: %q", req.TypeName) - return providers.ReadDataSourceResponse{} - } - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - _, diags = ctx.Refresh(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - if !p.ReadDataSourceCalled { - t.Fatalf("ReadDataSource not called") - } - - expectations := map[string]cty.Value{} - - if pm, ok := rdsPMs["test_file"]; !ok { - t.Fatalf("sub-module ReadDataSource not called") - } else if pm.IsNull() { - t.Fatalf("null ProviderMeta in sub-module ReadDataSource") - } else { - expectations["quux-submodule"] = pm - } - - if pm, ok := rdsPMs["test_data_source"]; !ok { - t.Fatalf("root module ReadDataSource not called") - } else if pm.IsNull() { - t.Fatalf("null ProviderMeta in root module ReadDataSource") - } else { - expectations["quux"] = pm - } - - type metaStruct struct { - Baz string `cty:"baz"` - } - - for expected, v := range expectations { - var meta metaStruct - err := gocty.FromCtyValue(v, &meta) - if err != nil { - t.Fatalf("Error parsing cty value: %s", err) - } - if meta.Baz != expected { - t.Fatalf("Expected meta.Baz to be %q, got %q", expected, meta.Baz) - } - } -} - -func TestContext2Apply_ProviderMeta_refreshdata_unset(t *testing.T) { - m := testModule(t, "provider-meta-data-unset") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": { - Type: cty.String, - Required: true, - }, - }, - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - rdsPMs := map[string]cty.Value{} - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - rdsPMs[req.TypeName] = req.ProviderMeta - switch req.TypeName { - case "test_data_source": - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yo"), - "foo": cty.StringVal("bar"), - }), - } - case "test_file": - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "rendered": cty.StringVal("baz"), - "template": cty.StringVal(""), - }), - } - default: - // config drift, oops - return providers.ReadDataSourceResponse{} - } - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) - - if !p.ReadDataSourceCalled { - t.Fatalf("ReadDataSource not called") - } - - if pm, ok := rdsPMs["test_file"]; !ok { - t.Fatalf("sub-module ReadDataSource not called") - } else if !pm.IsNull() { - t.Fatalf("non-null ProviderMeta in sub-module ReadDataSource") - } - - if pm, ok := rdsPMs["test_data_source"]; !ok { - t.Fatalf("root module ReadDataSource not called") - } else if !pm.IsNull() { - t.Fatalf("non-null ProviderMeta in root module ReadDataSource") - } -} - -func TestContext2Apply_ProviderMeta_refreshdata_setNoSchema(t *testing.T) { - m := testModule(t, "provider-meta-data-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yo"), - "foo": cty.StringVal("bar"), - }), - } - - _, diags := ctx.Refresh(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("refresh supposed to error, has no errors") - } - - var rootErr, subErr bool - errorSummary := "The resource data.test_%s.foo belongs to a provider that doesn't support provider_meta blocks" - for _, diag := range diags { - if diag.Description().Summary != "Provider registry.terraform.io/hashicorp/test doesn't support provider_meta" { - t.Errorf("Unexpected error: %+v", diag.Description()) - } - switch diag.Description().Detail { - case fmt.Sprintf(errorSummary, "data_source"): - rootErr = true - case fmt.Sprintf(errorSummary, "file"): - subErr = true - default: - t.Errorf("Unexpected error: %s", diag.Description()) - } - } - if !rootErr { - t.Errorf("Expected unsupported provider_meta block error for root module, none received") - } - if !subErr { - t.Errorf("Expected unsupported provider_meta block error for sub-module, none received") - } -} - -func TestContext2Apply_ProviderMeta_refreshdata_setInvalid(t *testing.T) { - m := testModule(t, "provider-meta-data-set") - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - schema := p.ProviderSchema() - schema.ProviderMeta = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "quux": { - Type: cty.String, - Required: true, - }, - }, - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("yo"), - "foo": cty.StringVal("bar"), - }), - } - - _, diags := ctx.Refresh(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("refresh supposed to error, has no errors") - } - - var reqErr, invalidErr bool - for _, diag := range diags { - switch diag.Description().Summary { - case "Missing required argument": - if diag.Description().Detail == `The argument "quux" is required, but no definition was found.` { - reqErr = true - } else { - t.Errorf("Unexpected error %+v", diag.Description()) - } - case "Unsupported argument": - if diag.Description().Detail == `An argument named "baz" is not expected here.` { - invalidErr = true - } else { - t.Errorf("Unexpected error %+v", diag.Description()) - } - default: - t.Errorf("Unexpected error %+v", diag.Description()) - } - } - if !reqErr { - t.Errorf("Expected missing required argument error, none received") - } - if !invalidErr { - t.Errorf("Expected unsupported argument error, none received") - } -} - -func TestContext2Apply_expandModuleVariables(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod1" { - for_each = toset(["a"]) - source = "./mod" -} - -module "mod2" { - source = "./mod" - in = module.mod1["a"].out -} -`, - "mod/main.tf": ` -resource "aws_instance" "foo" { - foo = var.in -} - -variable "in" { - type = string - default = "default" -} - -output "out" { - value = aws_instance.foo.id -} -`, - }) - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - expected := ` -module.mod1["a"]: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = default - type = aws_instance - - Outputs: - - out = foo -module.mod2: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance - - Dependencies: - module.mod1.aws_instance.foo` - - if state.String() != expected { - t.Fatalf("expected:\n%s\ngot:\n%s\n", expected, state) - } -} - -func TestContext2Apply_inheritAndStoreCBD(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "aws_instance" "foo" { -} - -resource "aws_instance" "cbd" { - foo = aws_instance.foo.id - lifecycle { - create_before_destroy = true - } -} -`, - }) - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - foo := state.ResourceInstance(mustResourceInstanceAddr("aws_instance.foo")) - if !foo.Current.CreateBeforeDestroy { - t.Fatal("aws_instance.foo should also be create_before_destroy") - } -} - -func TestContext2Apply_moduleDependsOn(t *testing.T) { - m := testModule(t, "apply-module-depends-on") - - p := testProvider("test") - - // each instance being applied should happen in sequential order - applied := int64(0) - - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - cfg := req.Config.AsValueMap() - foo := cfg["foo"].AsString() - ord := atomic.LoadInt64(&applied) - - resp := providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("data"), - "foo": cfg["foo"], - }), - } - - if foo == "a" && ord < 4 { - // due to data source "a"'s module depending on instance 4, this - // should not be less than 4 - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source a read too early")) - } - if foo == "b" && ord < 1 { - // due to data source "b"'s module depending on instance 1, this - // should not be less than 1 - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source b read too early")) - } - return resp - } - p.PlanResourceChangeFn = testDiffFn - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - state := req.PlannedState.AsValueMap() - num, _ := state["num"].AsBigFloat().Float64() - ord := int64(num) - if !atomic.CompareAndSwapInt64(&applied, ord-1, ord) { - actual := atomic.LoadInt64(&applied) - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("instance %d was applied after %d", ord, actual)) - } - - state["id"] = cty.StringVal(fmt.Sprintf("test_%d", ord)) - state["type"] = cty.StringVal("test_instance") - resp.NewState = cty.ObjectVal(state) - - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - plan, diags = ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.NoOp { - t.Fatalf("expected NoOp, got %s for %s", res.Action, res.Addr) - } - } -} - -func TestContext2Apply_moduleSelfReference(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "test" { - source = "./test" - - a = module.test.b -} - -output "c" { - value = module.test.c -} -`, - "test/main.tf": ` -variable "a" {} - -resource "test_instance" "test" { -} - -output "b" { - value = test_instance.test.id -} - -output "c" { - value = var.a -}`}) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - if !state.Empty() { - t.Fatal("expected empty state, got:", state) - } -} - -func TestContext2Apply_moduleExpandDependsOn(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "child" { - count = 1 - source = "./child" - - depends_on = [test_instance.a, test_instance.b] -} - -resource "test_instance" "a" { -} - - -resource "test_instance" "b" { -} -`, - "child/main.tf": ` -resource "test_instance" "foo" { -} - -output "myoutput" { - value = "literal string" -} -`}) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - if !state.Empty() { - t.Fatal("expected empty state, got:", state) - } -} - -func TestContext2Apply_scaleInCBD(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "ct" { - type = number -} - -resource "test_instance" "a" { - count = var.ct -} - -resource "test_instance" "b" { - require_new = local.removable - lifecycle { - create_before_destroy = true - } -} - -resource "test_instance" "c" { - require_new = test_instance.b.id - lifecycle { - create_before_destroy = true - } -} - -output "out" { - value = join(".", test_instance.a[*].id) -} - -locals { - removable = join(".", test_instance.a[*].id) -} -`}) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.a[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a0"}`), - Dependencies: []addrs.ConfigResource{}, - CreateBeforeDestroy: true, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.a[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a1"}`), - Dependencies: []addrs.ConfigResource{}, - CreateBeforeDestroy: true, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b", "require_new":"old.old"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_instance.a")}, - CreateBeforeDestroy: true, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.c").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"c", "require_new":"b"}`), - Dependencies: []addrs.ConfigResource{ - mustConfigResourceAddr("test_instance.a"), - mustConfigResourceAddr("test_instance.b"), - }, - CreateBeforeDestroy: true, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - p := testProvider("test") - - p.PlanResourceChangeFn = func(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - // this is a destroy plan - if r.ProposedNewState.IsNull() { - resp.PlannedState = r.ProposedNewState - resp.PlannedPrivate = r.PriorPrivate - return resp - } - - n := r.ProposedNewState.AsValueMap() - - if r.PriorState.IsNull() { - n["id"] = cty.UnknownVal(cty.String) - resp.PlannedState = cty.ObjectVal(n) - return resp - } - - p := r.PriorState.AsValueMap() - - priorRN := p["require_new"] - newRN := n["require_new"] - - if eq := priorRN.Equals(newRN); !eq.IsKnown() || eq.False() { - resp.RequiresReplace = []cty.Path{{cty.GetAttrStep{Name: "require_new"}}} - n["id"] = cty.UnknownVal(cty.String) - } - - resp.PlannedState = cty.ObjectVal(n) - return resp - } - - // reduce the count to 1 - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "ct": &InputValue{ - Value: cty.NumberIntVal(1), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - { - addr := mustResourceInstanceAddr("test_instance.a[0]") - change := plan.Changes.ResourceInstance(addr) - if change == nil { - t.Fatalf("no planned change for %s", addr) - } - if got, want := change.PrevRunAddr, mustResourceInstanceAddr("test_instance.a[0]"); !want.Equal(got) { - t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) - } - if got, want := change.Action, plans.NoOp; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) - } - } - { - addr := mustResourceInstanceAddr("test_instance.a[1]") - change := plan.Changes.ResourceInstance(addr) - if change == nil { - t.Fatalf("no planned change for %s", addr) - } - if got, want := change.PrevRunAddr, mustResourceInstanceAddr("test_instance.a[1]"); !want.Equal(got) { - t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) - } - if got, want := change.Action, plans.Delete; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseCountIndex; got != want { - t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) - } - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - log.Fatal(diags.ErrWithWarnings()) - } - - // check the output, as those can't cause an error planning the value - out := state.RootModule().OutputValues["out"].Value.AsString() - if out != "a0" { - t.Fatalf(`expected output "a0", got: %q`, out) - } - - // reduce the count to 0 - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "ct": &InputValue{ - Value: cty.NumberIntVal(0), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - { - addr := mustResourceInstanceAddr("test_instance.a[0]") - change := plan.Changes.ResourceInstance(addr) - if change == nil { - t.Fatalf("no planned change for %s", addr) - } - if got, want := change.PrevRunAddr, mustResourceInstanceAddr("test_instance.a[0]"); !want.Equal(got) { - t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) - } - if got, want := change.Action, plans.Delete; got != want { - t.Errorf("wrong action for %s %s; want %s", addr, got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseCountIndex; got != want { - t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) - } - } - { - addr := mustResourceInstanceAddr("test_instance.a[1]") - change := plan.Changes.ResourceInstance(addr) - if change != nil { - // It was already removed in the previous plan/apply - t.Errorf("unexpected planned change for %s", addr) - } - } - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - // check the output, as those can't cause an error planning the value - out = state.RootModule().OutputValues["out"].Value.AsString() - if out != "" { - t.Fatalf(`expected output "", got: %q`, out) - } -} - -// Ensure that we can destroy when a provider references a resource that will -// also be destroyed -func TestContext2Apply_destroyProviderReference(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -provider "null" { - value = "" -} - -module "mod" { - source = "./mod" -} - -provider "test" { - value = module.mod.output -} - -resource "test_instance" "bar" { -} -`, - "mod/main.tf": ` -data "null_data_source" "foo" { - count = 1 -} - - -output "output" { - value = data.null_data_source.foo[0].output -} -`}) - - schemaFn := func(name string) *ProviderSchema { - return &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": { - Type: cty.String, - Required: true, - }, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - name + "_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - name + "_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "output": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - } - } - - testP := new(MockProvider) - testP.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{NewState: req.PriorState} - } - testP.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schemaFn("test")) - - providerConfig := "" - testP.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - value := req.Config.GetAttr("value") - if value.IsKnown() && !value.IsNull() { - providerConfig = value.AsString() - } else { - providerConfig = "" - } - return resp - } - testP.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - if providerConfig != "valid" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provider config is %q", providerConfig)) - return - } - return testApplyFn(req) - } - testP.PlanResourceChangeFn = testDiffFn - - nullP := new(MockProvider) - nullP.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{NewState: req.PriorState} - } - nullP.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schemaFn("null")) - - nullP.ApplyResourceChangeFn = testApplyFn - nullP.PlanResourceChangeFn = testDiffFn - - nullP.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("ID"), - "output": cty.StringVal("valid"), - }), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(testP), - addrs.NewDefaultProvider("null"): testProviderFuncFixed(nullP), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(testP), - addrs.NewDefaultProvider("null"): testProviderFuncFixed(nullP), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("destroy apply errors: %s", diags.Err()) - } -} - -// Destroying properly requires pruning out all unneeded config nodes to -// prevent incorrect expansion evaluation. -func TestContext2Apply_destroyInterModuleExpansion(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -data "test_data_source" "a" { - for_each = { - one = "thing" - } -} - -locals { - module_input = { - for k, v in data.test_data_source.a : k => v.id - } -} - -module "mod1" { - source = "./mod" - input = local.module_input -} - -module "mod2" { - source = "./mod" - input = module.mod1.outputs -} - -resource "test_instance" "bar" { - for_each = module.mod2.outputs -} - -output "module_output" { - value = module.mod2.outputs -} -output "test_instances" { - value = test_instance.bar -} -`, - "mod/main.tf": ` -variable "input" { -} - -data "test_data_source" "foo" { - for_each = var.input -} - -output "outputs" { - value = data.test_data_source.foo -} -`}) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("data_source"), - "foo": cty.StringVal("output"), - }), - } - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - destroy := func() { - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("destroy apply errors: %s", diags.Err()) - } - } - - destroy() - // Destroying again from the empty state should not cause any errors either - destroy() -} - -func TestContext2Apply_createBeforeDestroyWithModule(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "v" {} - -module "mod" { - source = "./mod" - in = var.v -} - -resource "test_resource" "a" { - value = var.v - depends_on = [module.mod] - lifecycle { - create_before_destroy = true - } -} -`, - "mod/main.tf": ` -variable "in" {} - -resource "test_resource" "a" { - value = var.in -} -`}) - - p := testProvider("test") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - // this is a destroy plan - if req.ProposedNewState.IsNull() { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp - } - - proposed := req.ProposedNewState.AsValueMap() - proposed["id"] = cty.UnknownVal(cty.String) - - resp.PlannedState = cty.ObjectVal(proposed) - resp.RequiresReplace = []cty.Path{{cty.GetAttrStep{Name: "value"}}} - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "v": &InputValue{ - Value: cty.StringVal("A"), - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "v": &InputValue{ - Value: cty.StringVal("B"), - }, - }, - }) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_forcedCBD(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "v" {} - -resource "test_instance" "a" { - require_new = var.v -} - -resource "test_instance" "b" { - depends_on = [test_instance.a] - lifecycle { - create_before_destroy = true - } -} -`}) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "v": &InputValue{ - Value: cty.StringVal("A"), - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "v": &InputValue{ - Value: cty.StringVal("B"), - }, - }, - }) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_removeReferencedResource(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "ct" { -} - -resource "test_resource" "to_remove" { - count = var.ct -} - -resource "test_resource" "c" { - value = join("", test_resource.to_remove[*].id) -} -`}) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "ct": &InputValue{ - Value: cty.NumberIntVal(1), - }, - }, - }) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "ct": &InputValue{ - Value: cty.NumberIntVal(0), - }, - }, - }) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_variableSensitivity(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "sensitive_var" { - default = "foo" - sensitive = true -} - -variable "sensitive_id" { - default = "secret id" - sensitive = true -} - -resource "test_resource" "foo" { - value = var.sensitive_var - - network_interface { - network_interface_id = var.sensitive_id - } -}`, - }) - - p := new(MockProvider) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{NewState: req.PriorState} - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "value": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "network_interface_id": {Type: cty.String, Optional: true}, - "device_index": {Type: cty.Number, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }) - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - // Run a second apply with no changes - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - state, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - // Now change the variable value for sensitive_var - ctx = testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags = ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "sensitive_id": &InputValue{Value: cty.NilVal}, - "sensitive_var": &InputValue{ - Value: cty.StringVal("bar"), - }, - }, - }) - assertNoErrors(t, diags) - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } -} - -func TestContext2Apply_variableSensitivityPropagation(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "sensitive_map" { - type = map(string) - default = { - "x" = "foo" - } - sensitive = true -} - -resource "test_resource" "foo" { - value = var.sensitive_map.x -} -`, - }) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("plan errors: %s", diags.Err()) - } - - verifySensitiveValue := func(pvms []cty.PathValueMarks) { - if len(pvms) != 1 { - t.Fatalf("expected 1 sensitive path, got %d", len(pvms)) - } - pvm := pvms[0] - if gotPath, wantPath := pvm.Path, cty.GetAttrPath("value"); !gotPath.Equals(wantPath) { - t.Errorf("wrong path\n got: %#v\nwant: %#v", gotPath, wantPath) - } - if gotMarks, wantMarks := pvm.Marks, cty.NewValueMarks(marks.Sensitive); !gotMarks.Equal(wantMarks) { - t.Errorf("wrong marks\n got: %#v\nwant: %#v", gotMarks, wantMarks) - } - } - - addr := mustResourceInstanceAddr("test_resource.foo") - fooChangeSrc := plan.Changes.ResourceInstance(addr) - verifySensitiveValue(fooChangeSrc.AfterValMarks) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - fooState := state.ResourceInstance(addr) - verifySensitiveValue(fooState.Current.AttrSensitivePaths) -} - -func TestContext2Apply_variableSensitivityProviders(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_resource" "foo" { - sensitive_value = "should get marked" -} - -resource "test_resource" "bar" { - value = test_resource.foo.sensitive_value - random = test_resource.foo.id # not sensitive - - nesting_single { - value = "abc" - sensitive_value = "xyz" - } -} - -resource "test_resource" "baz" { - value = test_resource.bar.nesting_single.sensitive_value -} -`, - }) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("plan errors: %s", diags.Err()) - } - - verifySensitiveValue := func(pvms []cty.PathValueMarks) { - if len(pvms) != 1 { - t.Fatalf("expected 1 sensitive path, got %d", len(pvms)) - } - pvm := pvms[0] - if gotPath, wantPath := pvm.Path, cty.GetAttrPath("value"); !gotPath.Equals(wantPath) { - t.Errorf("wrong path\n got: %#v\nwant: %#v", gotPath, wantPath) - } - if gotMarks, wantMarks := pvm.Marks, cty.NewValueMarks(marks.Sensitive); !gotMarks.Equal(wantMarks) { - t.Errorf("wrong marks\n got: %#v\nwant: %#v", gotMarks, wantMarks) - } - } - - // Sensitive attributes (defined by the provider) are marked - // as sensitive when referenced from another resource - // "bar" references sensitive resources in "foo" - barAddr := mustResourceInstanceAddr("test_resource.bar") - barChangeSrc := plan.Changes.ResourceInstance(barAddr) - verifySensitiveValue(barChangeSrc.AfterValMarks) - - bazAddr := mustResourceInstanceAddr("test_resource.baz") - bazChangeSrc := plan.Changes.ResourceInstance(bazAddr) - verifySensitiveValue(bazChangeSrc.AfterValMarks) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - barState := state.ResourceInstance(barAddr) - verifySensitiveValue(barState.Current.AttrSensitivePaths) - - bazState := state.ResourceInstance(bazAddr) - verifySensitiveValue(bazState.Current.AttrSensitivePaths) -} - -func TestContext2Apply_variableSensitivityChange(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "sensitive_var" { - default = "hello" - sensitive = true -} - -resource "test_resource" "foo" { - value = var.sensitive_var -}`, - }) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo", "value":"hello"}`), - // No AttrSensitivePaths present - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - addr := mustResourceInstanceAddr("test_resource.foo") - - state, diags = ctx.Apply(plan, m) - assertNoErrors(t, diags) - - fooState := state.ResourceInstance(addr) - - if len(fooState.Current.AttrSensitivePaths) != 1 { - t.Fatalf("wrong number of sensitive paths, expected 1, got, %v", len(fooState.Current.AttrSensitivePaths)) - } - got := fooState.Current.AttrSensitivePaths[0] - want := cty.PathValueMarks{ - Path: cty.GetAttrPath("value"), - Marks: cty.NewValueMarks(marks.Sensitive), - } - - if !got.Equal(want) { - t.Fatalf("wrong value marks; got:\n%#v\n\nwant:\n%#v\n", got, want) - } - - m2 := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "sensitive_var" { - default = "hello" - sensitive = false -} - -resource "test_resource" "foo" { - value = var.sensitive_var -}`, - }) - - ctx2 := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - // NOTE: Prior to our refactoring to make the state an explicit argument - // of Plan, as opposed to hidden state inside Context, this test was - // calling ctx.Apply instead of ctx2.Apply and thus using the previous - // plan instead of this new plan. "Fixing" it to use the new plan seems - // to break the test, so we've preserved that oddity here by saving the - // old plan as oldPlan and essentially discarding the new plan entirely, - // but this seems rather suspicious and we should ideally figure out what - // this test was originally intending to do and make it do that. - oldPlan := plan - _, diags = ctx2.Plan(m2, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - stateWithoutSensitive, diags := ctx.Apply(oldPlan, m) - assertNoErrors(t, diags) - - fooState2 := stateWithoutSensitive.ResourceInstance(addr) - if len(fooState2.Current.AttrSensitivePaths) > 0 { - t.Fatalf( - "wrong number of sensitive paths, expected 0, got, %v\n%s", - len(fooState2.Current.AttrSensitivePaths), - spew.Sdump(fooState2.Current.AttrSensitivePaths), - ) - } -} - -func TestContext2Apply_moduleVariableOptionalAttributes(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "in" { - type = object({ - required = string - optional = optional(string) - default = optional(bool, true) - nested = optional( - map(object({ - a = optional(string, "foo") - b = optional(number, 5) - })), { - "boop": {} - } - ) - }) -} - -output "out" { - value = var.in -} -`}) - - ctx := testContext2(t, &ContextOpts{}) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "in": &InputValue{ - Value: cty.MapVal(map[string]cty.Value{ - "required": cty.StringVal("boop"), - }), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - got := state.RootModule().OutputValues["out"].Value - want := cty.ObjectVal(map[string]cty.Value{ - "required": cty.StringVal("boop"), - - // Because "optional" was marked as optional, it got silently filled - // in as a null value of string type rather than returning an error. - "optional": cty.NullVal(cty.String), - - // Similarly, "default" was marked as optional with a default value, - // and since it was omitted should be filled in with that default. - "default": cty.True, - - // Nested is a complex structure which has fully described defaults, - // so again it should be filled with the default structure. - "nested": cty.MapVal(map[string]cty.Value{ - "boop": cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("foo"), - "b": cty.NumberIntVal(5), - }), - }), - }) - if !want.RawEquals(got) { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestContext2Apply_moduleVariableOptionalAttributesDefault(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "in" { - type = object({ - required = string - optional = optional(string) - default = optional(bool, true) - }) - default = { - required = "boop" - } -} - -output "out" { - value = var.in -} -`}) - - ctx := testContext2(t, &ContextOpts{}) - - // We don't specify a value for the variable here, relying on its defined - // default. - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - got := state.RootModule().OutputValues["out"].Value - want := cty.ObjectVal(map[string]cty.Value{ - "required": cty.StringVal("boop"), - - // "optional" is not present in the variable default, so it is filled - // with null. - "optional": cty.NullVal(cty.String), - - // Similarly, "default" is not present in the variable default, so its - // value is replaced with the type's specified default. - "default": cty.True, - }) - if !want.RawEquals(got) { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestContext2Apply_moduleVariableOptionalAttributesDefaultNull(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "in" { - type = object({ - required = string - optional = optional(string) - default = optional(bool, true) - }) - default = null -} - -# Wrap the input variable in a tuple because a null output value is elided from -# the plan, which prevents us from testing its type. -output "out" { - value = [var.in] -} -`}) - - ctx := testContext2(t, &ContextOpts{}) - - // We don't specify a value for the variable here, relying on its defined - // default. - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - got := state.RootModule().OutputValues["out"].Value - // The null default value should be bound, after type converting to the - // full object type - want := cty.TupleVal([]cty.Value{cty.NullVal(cty.Object(map[string]cty.Type{ - "required": cty.String, - "optional": cty.String, - "default": cty.Bool, - }))}) - if !want.RawEquals(got) { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestContext2Apply_moduleVariableOptionalAttributesDefaultChild(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "in" { - type = list(object({ - a = optional(set(string)) - })) - default = [ - { a = [ "foo" ] }, - { }, - ] -} - -module "child" { - source = "./child" - in = var.in -} - -output "out" { - value = module.child.out -} -`, - "child/main.tf": ` -variable "in" { - type = list(object({ - a = optional(set(string), []) - })) - default = [] -} - -output "out" { - value = var.in -} -`, - }) - - ctx := testContext2(t, &ContextOpts{}) - - // We don't specify a value for the variable here, relying on its defined - // default. - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - got := state.RootModule().OutputValues["out"].Value - want := cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "a": cty.SetVal([]cty.Value{cty.StringVal("foo")}), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.SetValEmpty(cty.String), - }), - }) - if !want.RawEquals(got) { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestContext2Apply_provisionerSensitive(t *testing.T) { - m := testModule(t, "apply-provisioner-sensitive") - p := testProvider("aws") - - pr := testProvisioner() - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - if req.Config.ContainsMarked() { - t.Fatalf("unexpectedly marked config value: %#v", req.Config) - } - command := req.Config.GetAttr("command") - if command.IsMarked() { - t.Fatalf("unexpectedly marked command argument: %#v", command.Marks()) - } - req.UIOutput.Output(fmt.Sprintf("Executing: %q", command.AsString())) - return - } - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn - - h := new(MockHook) - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "password": &InputValue{ - Value: cty.StringVal("secret"), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - // "restart" provisioner - pr.CloseCalled = false - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - logDiagnostics(t, diags) - t.Fatal("apply failed") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testTerraformApplyProvisionerSensitiveStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - // Verify apply was invoked - if !pr.ProvisionResourceCalled { - t.Fatalf("provisioner was not called on apply") - } - - // Verify output was suppressed - if !h.ProvisionOutputCalled { - t.Fatalf("ProvisionOutput hook not called") - } - if got, doNotWant := h.ProvisionOutputMessage, "secret"; strings.Contains(got, doNotWant) { - t.Errorf("sensitive value %q included in output:\n%s", doNotWant, got) - } - if got, want := h.ProvisionOutputMessage, "output suppressed"; !strings.Contains(got, want) { - t.Errorf("expected hook to be called with %q, but was:\n%s", want, got) - } -} - -func TestContext2Apply_warnings(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_resource" "foo" { -}`, - }) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - resp := testApplyFn(req) - - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("warning")) - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - state, diags := ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } - - inst := state.ResourceInstance(mustResourceInstanceAddr("test_resource.foo")) - if inst == nil { - t.Fatal("missing 'test_resource.foo' in state:", state) - } -} - -func TestContext2Apply_rpcDiagnostics(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { -} -`, - }) - - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - resp = testApplyFn(req) - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("don't frobble")) - return resp - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if len(diags) == 0 { - t.Fatal("expected warnings") - } - - for _, d := range diags { - des := d.Description().Summary - if !strings.Contains(des, "frobble") { - t.Fatalf(`expected frobble, got %q`, des) - } - } -} - -func TestContext2Apply_dataSensitive(t *testing.T) { - m := testModule(t, "apply-data-sensitive") - p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - // add the required id - m := req.Config.AsValueMap() - m["id"] = cty.StringVal("foo") - - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(m), - } - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("diags: %s", diags.Err()) - } else { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - addr := mustResourceInstanceAddr("data.null_data_source.testing") - - dataSourceState := state.ResourceInstance(addr) - pvms := dataSourceState.Current.AttrSensitivePaths - if len(pvms) != 1 { - t.Fatalf("expected 1 sensitive path, got %d", len(pvms)) - } - pvm := pvms[0] - if gotPath, wantPath := pvm.Path, cty.GetAttrPath("foo"); !gotPath.Equals(wantPath) { - t.Errorf("wrong path\n got: %#v\nwant: %#v", gotPath, wantPath) - } - if gotMarks, wantMarks := pvm.Marks, cty.NewValueMarks(marks.Sensitive); !gotMarks.Equal(wantMarks) { - t.Errorf("wrong marks\n got: %#v\nwant: %#v", gotMarks, wantMarks) - } -} - -func TestContext2Apply_errorRestorePrivateData(t *testing.T) { - // empty config to remove our resource - m := testModuleInline(t, map[string]string{ - "main.tf": "", - }) - - p := simpleMockProvider() - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ - // we error during apply, which will trigger core to preserve the last - // known state, including private data - Diagnostics: tfdiags.Diagnostics(nil).Append(errors.New("oops")), - } - - addr := mustResourceInstanceAddr("test_object.a") - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - Private: []byte("private"), - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - state, _ = ctx.Apply(plan, m) - if string(state.ResourceInstance(addr).Current.Private) != "private" { - t.Fatal("missing private data in state") - } -} - -func TestContext2Apply_errorRestoreStatus(t *testing.T) { - // empty config to remove our resource - m := testModuleInline(t, map[string]string{ - "main.tf": "", - }) - - p := simpleMockProvider() - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - // We error during apply, but return the current object state. - resp.Diagnostics = resp.Diagnostics.Append(errors.New("oops")) - // return a warning too to make sure it isn't dropped - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("warned")) - resp.NewState = req.PriorState - resp.Private = req.PlannedPrivate - return resp - } - - addr := mustResourceInstanceAddr("test_object.a") - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"foo"}`), - Private: []byte("private"), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.b")}, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - state, diags = ctx.Apply(plan, m) - - errString := diags.ErrWithWarnings().Error() - if !strings.Contains(errString, "oops") || !strings.Contains(errString, "warned") { - t.Fatalf("error missing expected info: %q", errString) - } - - if len(diags) != 2 { - t.Fatalf("expected 1 error and 1 warning, got: %q", errString) - } - - res := state.ResourceInstance(addr) - if res == nil { - t.Fatal("resource was removed from state") - } - - if res.Current.Status != states.ObjectTainted { - t.Fatal("resource should still be tainted in the state") - } - - if len(res.Current.Dependencies) != 1 || !res.Current.Dependencies[0].Equal(mustConfigResourceAddr("test_object.b")) { - t.Fatalf("incorrect dependencies, got %q", res.Current.Dependencies) - } - - if string(res.Current.Private) != "private" { - t.Fatalf("incorrect private data, got %q", res.Current.Private) - } -} - -func TestContext2Apply_nonConformingResponse(t *testing.T) { - // empty config to remove our resource - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { - test_string = "x" -} -`, - }) - - p := simpleMockProvider() - respDiags := tfdiags.Diagnostics(nil).Append(tfdiags.SimpleWarning("warned")) - respDiags = respDiags.Append(errors.New("oops")) - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ - // Don't lose these diagnostics - Diagnostics: respDiags, - // This state is missing required attributes, and should produce an error - NewState: cty.ObjectVal(map[string]cty.Value{ - "test_string": cty.StringVal("x"), - }), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - errString := diags.ErrWithWarnings().Error() - if !strings.Contains(errString, "oops") || !strings.Contains(errString, "warned") { - t.Fatalf("error missing expected info: %q", errString) - } - - // we should have more than the ones returned from the provider, and they - // should not be coalesced into a single value - if len(diags) < 3 { - t.Fatalf("incorrect diagnostics, got %d values with %s", len(diags), diags.ErrWithWarnings()) - } -} - -func TestContext2Apply_nilResponse(t *testing.T) { - // empty config to remove our resource - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { -} -`, - }) - - p := simpleMockProvider() - p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{} - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - _, diags = ctx.Apply(plan, m) - if !diags.HasErrors() { - t.Fatal("expected and error") - } - - errString := diags.ErrWithWarnings().Error() - if !strings.Contains(errString, "invalid nil value") { - t.Fatalf("error missing expected info: %q", errString) - } -} - -//////////////////////////////////////////////////////////////////////////////// -// NOTE: Due to the size of this file, new tests should be added to -// context_apply2_test.go. -//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/terraform/context_eval.go b/internal/terraform/context_eval.go deleted file mode 100644 index f9d0f649338b..000000000000 --- a/internal/terraform/context_eval.go +++ /dev/null @@ -1,96 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -type EvalOpts struct { - SetVariables InputValues -} - -// Eval produces a scope in which expressions can be evaluated for -// the given module path. -// -// This method must first evaluate any ephemeral values (input variables, local -// values, and output values) in the configuration. These ephemeral values are -// not included in the persisted state, so they must be re-computed using other -// values in the state before they can be properly evaluated. The updated -// values are retained in the main state associated with the receiving context. -// -// This function takes no action against remote APIs but it does need access -// to all provider and provisioner instances in order to obtain their schemas -// for type checking. -// -// The result is an evaluation scope that can be used to resolve references -// against the root module. If the returned diagnostics contains errors then -// the returned scope may be nil. If it is not nil then it may still be used -// to attempt expression evaluation or other analysis, but some expressions -// may not behave as expected. -func (c *Context) Eval(config *configs.Config, state *states.State, moduleAddr addrs.ModuleInstance, opts *EvalOpts) (*lang.Scope, tfdiags.Diagnostics) { - // This is intended for external callers such as the "terraform console" - // command. Internally, we create an evaluator in c.walk before walking - // the graph, and create scopes in ContextGraphWalker. - - var diags tfdiags.Diagnostics - defer c.acquireRun("eval")() - - // Start with a copy of state so that we don't affect the instance that - // the caller is holding. - state = state.DeepCopy() - var walker *ContextGraphWalker - - variables := opts.SetVariables - - // By the time we get here, we should have values defined for all of - // the root module variables, even if some of them are "unknown". It's the - // caller's responsibility to have already handled the decoding of these - // from the various ways the CLI allows them to be set and to produce - // user-friendly error messages if they are not all present, and so - // the error message from checkInputVariables should never be seen and - // includes language asking the user to report a bug. - varDiags := checkInputVariables(config.Module.Variables, variables) - diags = diags.Append(varDiags) - - log.Printf("[DEBUG] Building and walking 'eval' graph") - - graph, moreDiags := (&EvalGraphBuilder{ - Config: config, - State: state, - RootVariableValues: variables, - Plugins: c.plugins, - }).Build(addrs.RootModuleInstance) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return nil, diags - } - - walkOpts := &graphWalkOpts{ - InputState: state, - Config: config, - } - - walker, moreDiags = c.walk(graph, walkEval, walkOpts) - diags = diags.Append(moreDiags) - if walker != nil { - diags = diags.Append(walker.NonFatalDiagnostics) - } else { - // If we skipped walking the graph (due to errors) then we'll just - // use a placeholder graph walker here, which'll refer to the - // unmodified state. - walker = c.graphWalker(walkEval, walkOpts) - } - - // This is a bit weird since we don't normally evaluate outside of - // the context of a walk, but we'll "re-enter" our desired path here - // just to get hold of an EvalContext for it. ContextGraphWalker - // caches its contexts, so we should get hold of the context that was - // previously used for evaluation here, unless we skipped walking. - evalCtx := walker.EnterPath(moduleAddr) - return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags -} diff --git a/internal/terraform/context_eval_test.go b/internal/terraform/context_eval_test.go deleted file mode 100644 index ac828dbcc424..000000000000 --- a/internal/terraform/context_eval_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestContextEval(t *testing.T) { - // This test doesn't check the "Want" value for impure funcs, so the value - // on those doesn't matter. - tests := []struct { - Input string - Want cty.Value - ImpureFunc bool - }{ - { // An impure function: allowed in the console, but the result is nondeterministic - `bcrypt("example")`, - cty.NilVal, - true, - }, - { - `keys(var.map)`, - cty.ListVal([]cty.Value{ - cty.StringVal("foo"), - cty.StringVal("baz"), - }), - true, - }, - { - `local.result`, - cty.NumberIntVal(6), - false, - }, - { - `module.child.result`, - cty.NumberIntVal(6), - false, - }, - } - - // This module has a little bit of everything (and if it is missing somehitng, add to it): - // resources, variables, locals, modules, output - m := testModule(t, "eval-context-basic") - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - scope, diags := ctx.Eval(m, states.NewState(), addrs.RootModuleInstance, &EvalOpts{ - SetVariables: testInputValuesUnset(m.Module.Variables), - }) - if diags.HasErrors() { - t.Fatalf("Eval errors: %s", diags.Err()) - } - - // Since we're testing 'eval' (used by terraform console), impure functions - // should be allowed by the scope. - if scope.PureOnly == true { - t.Fatal("wrong result: eval should allow impure funcs") - } - - for _, test := range tests { - t.Run(test.Input, func(t *testing.T) { - // Parse the test input as an expression - expr, _ := hclsyntax.ParseExpression([]byte(test.Input), "", hcl.Pos{Line: 1, Column: 1}) - got, diags := scope.EvalExpr(expr, cty.DynamicPseudoType) - - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - - if !test.ImpureFunc { - if !got.RawEquals(test.Want) { - t.Fatalf("wrong result: want %#v, got %#v", test.Want, got) - } - } - }) - } -} - -// ensure that we can execute a console when outputs have preconditions -func TestContextEval_outputsWithPreconditions(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - source = "./mod" - input = "ok" -} - -output "out" { - value = module.mod.out -} -`, - - "./mod/main.tf": ` -variable "input" { - type = string -} - -output "out" { - value = var.input - - precondition { - condition = var.input != "" - error_message = "error" - } -} -`, - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Eval(m, states.NewState(), addrs.RootModuleInstance, &EvalOpts{ - SetVariables: testInputValuesUnset(m.Module.Variables), - }) - assertNoErrors(t, diags) -} diff --git a/internal/terraform/context_fixtures_test.go b/internal/terraform/context_fixtures_test.go deleted file mode 100644 index 2e9e9c27511d..000000000000 --- a/internal/terraform/context_fixtures_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/zclconf/go-cty/cty" -) - -// contextTestFixture is a container for a set of objects that work together -// to create a base testing scenario. This is used to represent some common -// situations used as the basis for multiple tests. -type contextTestFixture struct { - Config *configs.Config - Providers map[addrs.Provider]providers.Factory - Provisioners map[string]provisioners.Factory -} - -// ContextOpts returns a ContextOps pre-populated with the elements of this -// fixture. Each call returns a distinct object, so callers can apply further -// _shallow_ modifications to the options as needed. -func (f *contextTestFixture) ContextOpts() *ContextOpts { - return &ContextOpts{ - Providers: f.Providers, - Provisioners: f.Provisioners, - } -} - -// contextFixtureApplyVars builds and returns a test fixture for testing -// input variables, primarily during the apply phase. The configuration is -// loaded from testdata/apply-vars, and the provider resolver is -// configured with a resource type schema for aws_instance that matches -// what's used in that configuration. -func contextFixtureApplyVars(t *testing.T) *contextTestFixture { - c := testModule(t, "apply-vars") - p := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, - "baz": {Type: cty.String, Optional: true}, - "num": {Type: cty.Number, Optional: true}, - "list": {Type: cty.List(cty.String), Optional: true}, - "map": {Type: cty.Map(cty.String), Optional: true}, - }, - }) - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - return &contextTestFixture{ - Config: c, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - } -} - -// contextFixtureApplyVarsEnv builds and returns a test fixture for testing -// input variables set from the environment. The configuration is -// loaded from testdata/apply-vars-env, and the provider resolver is -// configured with a resource type schema for aws_instance that matches -// what's used in that configuration. -func contextFixtureApplyVarsEnv(t *testing.T) *contextTestFixture { - c := testModule(t, "apply-vars-env") - p := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "string": {Type: cty.String, Optional: true}, - "list": {Type: cty.List(cty.String), Optional: true}, - "map": {Type: cty.Map(cty.String), Optional: true}, - "id": {Type: cty.String, Computed: true}, - "type": {Type: cty.String, Computed: true}, - }, - }) - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - return &contextTestFixture{ - Config: c, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - } -} diff --git a/internal/terraform/context_import.go b/internal/terraform/context_import.go deleted file mode 100644 index ac469e3cb85b..000000000000 --- a/internal/terraform/context_import.go +++ /dev/null @@ -1,92 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ImportOpts are used as the configuration for Import. -type ImportOpts struct { - // Targets are the targets to import - Targets []*ImportTarget - - // SetVariables are the variables set outside of the configuration, - // such as on the command line, in variables files, etc. - SetVariables InputValues -} - -// ImportTarget is a single resource to import. -type ImportTarget struct { - // Addr is the address for the resource instance that the new object should - // be imported into. - Addr addrs.AbsResourceInstance - - // ID is the ID of the resource to import. This is resource-specific. - ID string - - // ProviderAddr is the address of the provider that should handle the import. - ProviderAddr addrs.AbsProviderConfig -} - -// Import takes already-created external resources and brings them -// under Terraform management. Import requires the exact type, name, and ID -// of the resources to import. -// -// This operation is idempotent. If the requested resource is already -// imported, no changes are made to the state. -// -// Further, this operation also gracefully handles partial state. If during -// an import there is a failure, all previously imported resources remain -// imported. -func (c *Context) Import(config *configs.Config, prevRunState *states.State, opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Hold a lock since we can modify our own state here - defer c.acquireRun("import")() - - // Don't modify our caller's state - state := prevRunState.DeepCopy() - - log.Printf("[DEBUG] Building and walking import graph") - - variables := opts.SetVariables - - // Initialize our graph builder - builder := &PlanGraphBuilder{ - ImportTargets: opts.Targets, - Config: config, - State: state, - RootVariableValues: variables, - Plugins: c.plugins, - Operation: walkImport, - } - - // Build the graph - graph, graphDiags := builder.Build(addrs.RootModuleInstance) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return state, diags - } - - // Walk it - walker, walkDiags := c.walk(graph, walkImport, &graphWalkOpts{ - Config: config, - InputState: state, - }) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return state, diags - } - - // Data sources which could not be read during the import plan will be - // unknown. We need to strip those objects out so that the state can be - // serialized. - walker.State.RemovePlannedResourceInstanceObjects() - - newState := walker.State.Close() - return newState, diags -} diff --git a/internal/terraform/context_import_test.go b/internal/terraform/context_import_test.go deleted file mode 100644 index 920fe234384e..000000000000 --- a/internal/terraform/context_import_test.go +++ /dev/null @@ -1,1042 +0,0 @@ -package terraform - -import ( - "errors" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestContextImport_basic(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-provider") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportStr) - if actual != expected { - t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s", actual, expected) - } -} - -// import 1 of count instances in the configuration -func TestContextImport_countIndex(t *testing.T) { - p := testProvider("aws") - m := testModuleInline(t, map[string]string{ - "main.tf": ` -provider "aws" { - foo = "bar" -} - -resource "aws_instance" "foo" { - count = 2 -} -`}) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(0), - ), - ID: "bar", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportCountIndexStr) - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_collision(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-provider") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "bar", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - state, diags := ctx.Import(m, state, &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if !diags.HasErrors() { - t.Fatalf("succeeded; want an error indicating that the resource already exists in state") - } - - actual := strings.TrimSpace(state.String()) - expected := `aws_instance.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"]` - - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_missingType(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-provider") - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("should error") - } - - actual := strings.TrimSpace(state.String()) - expected := "" - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_moduleProvider(t *testing.T) { - p := testProvider("aws") - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - foo := req.Config.GetAttr("foo").AsString() - if foo != "bar" { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("not bar")) - } - - return - } - - m := testModule(t, "import-provider") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("didn't configure provider") - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportStr) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -// Importing into a module requires a provider config in that module. -func TestContextImport_providerModule(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-module") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - foo := req.Config.GetAttr("foo").AsString() - if foo != "bar" { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("not bar")) - } - - return - } - - _, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.Child("child", addrs.NoKey).ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("didn't configure provider") - } -} - -// Test that import will interpolate provider configuration and use -// that configuration for import. -func TestContextImport_providerConfig(t *testing.T) { - testCases := map[string]struct { - module string - value string - }{ - "variables": { - module: "import-provider-vars", - value: "bar", - }, - "locals": { - module: "import-provider-locals", - value: "baz-bar", - }, - } - for name, test := range testCases { - t.Run(name, func(t *testing.T) { - p := testProvider("aws") - m := testModule(t, test.module) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("bar"), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !p.ConfigureProviderCalled { - t.Fatal("didn't configure provider") - } - - if foo := p.ConfigureProviderRequest.Config.GetAttr("foo").AsString(); foo != test.value { - t.Fatalf("bad value %#v; want %#v", foo, test.value) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportStr) - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } - }) - } -} - -// Test that provider configs can't reference resources. -func TestContextImport_providerConfigResources(t *testing.T) { - p := testProvider("aws") - pTest := testProvider("test") - m := testModule(t, "import-provider-resources") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("test"): testProviderFuncFixed(pTest), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - _, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("should error") - } - if got, want := diags.Err().Error(), `The configuration for provider["registry.terraform.io/hashicorp/aws"] depends on values that cannot be determined until apply.`; !strings.Contains(got, want) { - t.Errorf("wrong error\n got: %s\nwant: %s", got, want) - } -} - -func TestContextImport_refresh(t *testing.T) { - p := testProvider("aws") - m := testModuleInline(t, map[string]string{ - "main.tf": ` -provider "aws" { - foo = "bar" -} - -resource "aws_instance" "foo" { -} - - -// we are only importing aws_instance.foo, so these resources will be unknown -resource "aws_instance" "bar" { -} -data "aws_data_source" "bar" { - foo = aws_instance.bar.id -} -`}) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("id"), - "foo": cty.UnknownVal(cty.String), - }), - } - - p.ReadResourceFn = nil - - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - "foo": cty.StringVal("bar"), - }), - } - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if d := state.ResourceInstance(mustResourceInstanceAddr("data.aws_data_source.bar")); d != nil { - t.Errorf("data.aws_data_source.bar has a status of ObjectPlanned and should not be in the state\ngot:%#v\n", d.Current) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportRefreshStr) - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_refreshNil(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-provider") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{ - NewState: cty.NullVal(cty.DynamicPseudoType), - } - } - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("should error") - } - - actual := strings.TrimSpace(state.String()) - expected := "" - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_module(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-module") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.Child("child", addrs.IntKey(0)).ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportModuleStr) - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_moduleDepth2(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-module") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.Child("child", addrs.IntKey(0)).Child("nested", addrs.NoKey).ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "baz", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportModuleDepth2Str) - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_moduleDiff(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-module") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - }, - } - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.Child("child", addrs.IntKey(0)).ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "baz", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportModuleStr) - if actual != expected { - t.Fatalf("\nexpected: %q\ngot: %q\n", expected, actual) - } -} - -func TestContextImport_multiState(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-provider") - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - "aws_instance_thing": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - { - TypeName: "aws_instance_thing", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - }), - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportMultiStr) - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_multiStateSame(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "import-provider") - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - "aws_instance_thing": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - }, - { - TypeName: "aws_instance_thing", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - }), - }, - { - TypeName: "aws_instance_thing", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("qux"), - }), - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, - ), - ID: "bar", - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(state.String()) - expected := strings.TrimSpace(testImportMultiSameStr) - if actual != expected { - t.Fatalf("bad: \n%s", actual) - } -} - -func TestContextImport_nestedModuleImport(t *testing.T) { - p := testProvider("aws") - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - xs = toset(["foo"]) -} - -module "a" { - for_each = local.xs - source = "./a" -} - -module "b" { - for_each = local.xs - source = "./b" - y = module.a[each.key].y -} - -resource "test_resource" "test" { -} -`, - "a/main.tf": ` -output "y" { - value = "bar" -} -`, - "b/main.tf": ` -variable "y" { - type = string -} - -resource "test_resource" "unused" { - value = var.y - // missing required, but should not error -} -`, - }) - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "required": {Type: cty.String, Required: true}, - }, - }, - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "test_resource", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("test"), - "required": cty.StringVal("value"), - }), - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "test_resource", "test", addrs.NoKey, - ), - ID: "test", - }, - }, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - ri := state.ResourceInstance(mustResourceInstanceAddr("test_resource.test")) - expected := `{"id":"test","required":"value"}` - if ri == nil || ri.Current == nil { - t.Fatal("no state is recorded for resource instance test_resource.test") - } - if string(ri.Current.AttrsJSON) != expected { - t.Fatalf("expected %q, got %q\n", expected, ri.Current.AttrsJSON) - } -} - -// New resources in the config during import won't exist for evaluation -// purposes (until import is upgraded to using a complete plan). This means -// that references to them are unknown, but in the case of single instances, we -// can at least know the type of unknown value. -func TestContextImport_newResourceUnknown(t *testing.T) { - p := testProvider("aws") - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_resource" "one" { -} - -resource "test_resource" "two" { - count = length(flatten([test_resource.one.id])) -} - -resource "test_resource" "test" { -} -`}) - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "test_resource", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("test"), - }), - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ - Targets: []*ImportTarget{ - { - Addr: addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "test_resource", "test", addrs.NoKey, - ), - ID: "test", - }, - }, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - ri := state.ResourceInstance(mustResourceInstanceAddr("test_resource.test")) - expected := `{"id":"test"}` - if ri == nil || ri.Current == nil { - t.Fatal("no state is recorded for resource instance test_resource.test") - } - if string(ri.Current.AttrsJSON) != expected { - t.Fatalf("expected %q, got %q\n", expected, ri.Current.AttrsJSON) - } -} - -const testImportStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] -` - -const testImportCountIndexStr = ` -aws_instance.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] -` - -const testImportModuleStr = ` - -module.child[0]: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] -` - -const testImportModuleDepth2Str = ` - -module.child[0].nested: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] -` - -const testImportMultiStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] -aws_instance_thing.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] -` - -const testImportMultiSameStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] -aws_instance_thing.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] -aws_instance_thing.foo-1: - ID = qux - provider = provider["registry.terraform.io/hashicorp/aws"] -` - -const testImportRefreshStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar -` diff --git a/internal/terraform/context_input.go b/internal/terraform/context_input.go deleted file mode 100644 index 153546d2868b..000000000000 --- a/internal/terraform/context_input.go +++ /dev/null @@ -1,206 +0,0 @@ -package terraform - -import ( - "context" - "log" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Input asks for input to fill unset required arguments in provider -// configurations. -// -// Unlike the other better-behaved operation methods, this one actually -// modifies some internal state inside the receving context so that the -// captured values will be implicitly available to a subsequent call to Plan, -// or to some other operation entry point. Hopefully a future iteration of -// this will change design to make that data flow more explicit. -// -// Because Input saves the results inside the Context object, asking for -// input twice on the same Context is invalid and will lead to undefined -// behavior. -// -// Once you've called Input with a particular config, it's invalid to call -// any other Context method with a different config, because the aforementioned -// modified internal state won't match. Again, this is an architectural wart -// that we'll hopefully resolve in future. -func (c *Context) Input(config *configs.Config, mode InputMode) tfdiags.Diagnostics { - // This function used to be responsible for more than it is now, so its - // interface is more general than its current functionality requires. - // It now exists only to handle interactive prompts for provider - // configurations, with other prompts the responsibility of the CLI - // layer prior to calling in to this package. - // - // (Hopefully in future the remaining functionality here can move to the - // CLI layer too in order to avoid this odd situation where core code - // produces UI input prompts.) - - var diags tfdiags.Diagnostics - defer c.acquireRun("input")() - - schemas, moreDiags := c.Schemas(config, nil) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return diags - } - - if c.uiInput == nil { - log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") - return diags - } - - ctx := context.Background() - - if mode&InputModeProvider != 0 { - log.Printf("[TRACE] Context.Input: Prompting for provider arguments") - - // We prompt for input only for provider configurations defined in - // the root module. Provider configurations in other modules are a - // legacy thing we no longer recommend, and even if they weren't we - // can't practically prompt for their inputs here because we've not - // yet done "expansion" and so we don't know whether the modules are - // using count or for_each. - - pcs := make(map[string]*configs.Provider) - pas := make(map[string]addrs.LocalProviderConfig) - for _, pc := range config.Module.ProviderConfigs { - addr := pc.Addr() - pcs[addr.String()] = pc - pas[addr.String()] = addr - log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) - } - // We also need to detect _implied_ provider configs from resources. - // These won't have *configs.Provider objects, but they will still - // exist in the map and we'll just treat them as empty below. - for _, rc := range config.Module.ManagedResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) - } - } - for _, rc := range config.Module.DataResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) - } - } - - for pk, pa := range pas { - pc := pcs[pk] // will be nil if this is an implied config - - // Wrap the input into a namespace - input := &PrefixUIInput{ - IdPrefix: pk, - QueryPrefix: pk + ".", - UIInput: c.uiInput, - } - - providerFqn := config.Module.ProviderForLocalConfig(pa) - schema := schemas.ProviderConfig(providerFqn) - if schema == nil { - // Could either be an incorrect config or just an incomplete - // mock in tests. We'll let a later pass decide, and just - // ignore this for the purposes of gathering input. - log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.LocalName) - continue - } - - // For our purposes here we just want to detect if attrbutes are - // set in config at all, so rather than doing a full decode - // (which would require us to prepare an evalcontext, etc) we'll - // use the low-level HCL API to process only the top-level - // structure. - var attrExprs hcl.Attributes // nil if there is no config - if pc != nil && pc.Config != nil { - lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) - content, _, diags := pc.Config.PartialContent(lowLevelSchema) - if diags.HasErrors() { - log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) - continue - } - attrExprs = content.Attributes - } - - keys := make([]string, 0, len(schema.Attributes)) - for key := range schema.Attributes { - keys = append(keys, key) - } - sort.Strings(keys) - - vals := map[string]cty.Value{} - for _, key := range keys { - attrS := schema.Attributes[key] - if attrS.Optional { - continue - } - if attrExprs != nil { - if _, exists := attrExprs[key]; exists { - continue - } - } - if !attrS.Type.Equals(cty.String) { - continue - } - - log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) - rawVal, err := input.Input(ctx, &InputOpts{ - Id: key, - Query: key, - Description: attrS.Description, - }) - if err != nil { - log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) - continue - } - - vals[key] = cty.StringVal(rawVal) - } - - absConfigAddr := addrs.AbsProviderConfig{ - Provider: providerFqn, - Alias: pa.Alias, - Module: config.Path, - } - c.providerInputConfig[absConfigAddr.String()] = vals - - log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) - } - } - - return diags -} - -// schemaForInputSniffing returns a transformed version of a given schema -// that marks all attributes as optional, which the Context.Input method can -// use to detect whether a required argument is set without missing arguments -// themselves generating errors. -func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} diff --git a/internal/terraform/context_input_test.go b/internal/terraform/context_input_test.go deleted file mode 100644 index 5216efb5965f..000000000000 --- a/internal/terraform/context_input_test.go +++ /dev/null @@ -1,469 +0,0 @@ -package terraform - -import ( - "reflect" - "strings" - "sync" - "testing" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" -) - -func TestContext2Input_provider(t *testing.T) { - m := testModule(t, "input-provider") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Required: true, - Description: "something something", - }, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - inp := &MockUIInput{ - InputReturnMap: map[string]string{ - "provider.aws.foo": "bar", - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - UIInput: inp, - }) - - var actual interface{} - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - actual = req.Config.GetAttr("foo").AsString() - return - } - - if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { - t.Fatalf("input errors: %s", diags.Err()) - } - - if !inp.InputCalled { - t.Fatal("no input prompt; want prompt for argument \"foo\"") - } - if got, want := inp.InputOpts.Description, "something something"; got != want { - t.Errorf("wrong description\ngot: %q\nwant: %q", got, want) - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - if !reflect.DeepEqual(actual, "bar") { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", actual, "bar") - } -} - -func TestContext2Input_providerMulti(t *testing.T) { - m := testModule(t, "input-provider-multi") - - getProviderSchemaResponse := getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Required: true, - Description: "something something", - }, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - // In order to update the provider to check only the configure calls during - // apply, we will need to inject a new factory function after plan. We must - // use a closure around the factory, because in order for the inputs to - // work during apply we need to maintain the same context value, preventing - // us from assigning a new Providers map. - providerFactory := func() (providers.Interface, error) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponse - return p, nil - } - - inp := &MockUIInput{ - InputReturnMap: map[string]string{ - "provider.aws.foo": "bar", - "provider.aws.east.foo": "bar", - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): func() (providers.Interface, error) { - return providerFactory() - }, - }, - UIInput: inp, - }) - - var actual []interface{} - var lock sync.Mutex - - if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { - t.Fatalf("input errors: %s", diags.Err()) - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - providerFactory = func() (providers.Interface, error) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponse - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - lock.Lock() - defer lock.Unlock() - actual = append(actual, req.Config.GetAttr("foo").AsString()) - return - } - return p, nil - } - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - expected := []interface{}{"bar", "bar"} - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", actual, expected) - } -} - -func TestContext2Input_providerOnce(t *testing.T) { - m := testModule(t, "input-provider-once") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { - t.Fatalf("input errors: %s", diags.Err()) - } -} - -func TestContext2Input_providerId(t *testing.T) { - input := new(MockUIInput) - - m := testModule(t, "input-provider") - - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Required: true, - Description: "something something", - }, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - var actual interface{} - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - actual = req.Config.GetAttr("foo").AsString() - return - } - - input.InputReturnMap = map[string]string{ - "provider.aws.foo": "bar", - } - - if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { - t.Fatalf("input errors: %s", diags.Err()) - } - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - if !reflect.DeepEqual(actual, "bar") { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", actual, "bar") - } -} - -func TestContext2Input_providerOnly(t *testing.T) { - input := new(MockUIInput) - - m := testModule(t, "input-provider-vars") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Required: true, - }, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Required: true}, - "id": {Type: cty.String, Computed: true}, - "type": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - input.InputReturnMap = map[string]string{ - "provider.aws.foo": "bar", - } - - var actual interface{} - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - actual = req.Config.GetAttr("foo").AsString() - return - } - - if err := ctx.Input(m, InputModeProvider); err != nil { - t.Fatalf("err: %s", err) - } - - // NOTE: This is a stale test case from an older version of Terraform - // where Input was responsible for prompting for both input variables _and_ - // provider configuration arguments, where it was trying to test the case - // where we were turning off the mode of prompting for input variables. - // That's now always disabled, and so this is essentially the same as the - // normal Input test, but we're preserving it until we have time to review - // and make sure this isn't inadvertently providing unique test coverage - // other than what it set out to test. - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("us-west-2"), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - state, err := ctx.Apply(plan, m) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !reflect.DeepEqual(actual, "bar") { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", actual, "bar") - } - - actualStr := strings.TrimSpace(state.String()) - expectedStr := strings.TrimSpace(testTerraformInputProviderOnlyStr) - if actualStr != expectedStr { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actualStr, expectedStr) - } -} - -func TestContext2Input_providerVars(t *testing.T) { - input := new(MockUIInput) - m := testModule(t, "input-provider-with-vars") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - input.InputReturnMap = map[string]string{ - "var.foo": "bar", - } - - var actual interface{} - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - actual = req.Config.GetAttr("foo").AsString() - return - } - if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { - t.Fatalf("input errors: %s", diags.Err()) - } - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("bar"), - SourceType: ValueFromCaller, - }, - }, - }) - assertNoErrors(t, diags) - - if _, diags := ctx.Apply(plan, m); diags.HasErrors() { - t.Fatalf("apply errors: %s", diags.Err()) - } - - if !reflect.DeepEqual(actual, "bar") { - t.Fatalf("bad: %#v", actual) - } -} - -func TestContext2Input_providerVarsModuleInherit(t *testing.T) { - input := new(MockUIInput) - m := testModule(t, "input-provider-with-vars-and-module") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { - t.Fatalf("input errors: %s", diags.Err()) - } -} - -// adding a list interpolation in fails to interpolate the count variable -func TestContext2Input_submoduleTriggersInvalidCount(t *testing.T) { - input := new(MockUIInput) - m := testModule(t, "input-submodule-count") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { - t.Fatalf("input errors: %s", diags.Err()) - } -} - -// In this case, a module variable can't be resolved from a data source until -// it's refreshed, but it can't be refreshed during Input. -func TestContext2Input_dataSourceRequiresRefresh(t *testing.T) { - input := new(MockUIInput) - p := testProvider("null") - m := testModule(t, "input-module-data-vars") - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - DataSources: map[string]*configschema.Block{ - "null_data_source": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.List(cty.String), Optional: true}, - }, - }, - }, - }) - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: req.Config, - } - } - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "null_data_source", - Name: "bar", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "-", - "foo.#": "1", - "foo.0": "a", - // foo.1 exists in the data source, but needs to be refreshed. - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("null"), - Module: addrs.RootModule, - }, - ) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { - t.Fatalf("input errors: %s", diags.Err()) - } - - // ensure that plan works after Refresh. This is a legacy test that - // doesn't really make sense anymore, because Refresh is really just - // a wrapper around plan anyway, but we're keeping it until we get a - // chance to review and check whether it's giving us any additional - // test coverage aside from what it's specifically intending to test. - if _, diags := ctx.Refresh(m, state, DefaultPlanOpts); diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - if _, diags := ctx.Plan(m, state, DefaultPlanOpts); diags.HasErrors() { - t.Fatalf("plan errors: %s", diags.Err()) - } -} diff --git a/internal/terraform/context_plan.go b/internal/terraform/context_plan.go deleted file mode 100644 index 416232a48031..000000000000 --- a/internal/terraform/context_plan.go +++ /dev/null @@ -1,867 +0,0 @@ -package terraform - -import ( - "bytes" - "fmt" - "log" - "sort" - "strings" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang/globalref" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/refactoring" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// PlanOpts are the various options that affect the details of how Terraform -// will build a plan. -type PlanOpts struct { - // Mode defines what variety of plan the caller wishes to create. - // Refer to the documentation of the plans.Mode type and its values - // for more information. - Mode plans.Mode - - // SkipRefresh specifies to trust that the current values for managed - // resource instances in the prior state are accurate and to therefore - // disable the usual step of fetching updated values for each resource - // instance using its corresponding provider. - SkipRefresh bool - - // PreDestroyRefresh indicated that this is being passed to a plan used to - // refresh the state immediately before a destroy plan. - // FIXME: This is a temporary fix to allow the pre-destroy refresh to - // succeed. The refreshing operation during destroy must be a special case, - // which can allow for missing instances in the state, and avoid blocking - // on failing condition tests. The destroy plan itself should be - // responsible for this special case of refreshing, and the separate - // pre-destroy plan removed entirely. - PreDestroyRefresh bool - - // SetVariables are the raw values for root module variables as provided - // by the user who is requesting the run, prior to any normalization or - // substitution of defaults. See the documentation for the InputValue - // type for more information on how to correctly populate this. - SetVariables InputValues - - // If Targets has a non-zero length then it activates targeted planning - // mode, where Terraform will take actions only for resource instances - // mentioned in this set and any other objects those resource instances - // depend on. - // - // Targeted planning mode is intended for exceptional use only, - // and so populating this field will cause Terraform to generate extra - // warnings as part of the planning result. - Targets []addrs.Targetable - - // ForceReplace is a set of resource instance addresses whose corresponding - // objects should be forced planned for replacement if the provider's - // plan would otherwise have been to either update the object in-place or - // to take no action on it at all. - // - // A typical use of this argument is to ask Terraform to replace an object - // which the user has determined is somehow degraded (via information from - // outside of Terraform), thereby hopefully replacing it with a - // fully-functional new object. - ForceReplace []addrs.AbsResourceInstance -} - -// Plan generates an execution plan by comparing the given configuration -// with the given previous run state. -// -// The given planning options allow control of various other details of the -// planning process that are not represented directly in the configuration. -// You can use terraform.DefaultPlanOpts to generate a normal plan with no -// special options. -// -// If the returned diagnostics contains no errors then the returned plan is -// applyable, although Terraform cannot guarantee that applying it will fully -// succeed. If the returned diagnostics contains errors but this method -// still returns a non-nil Plan then the plan describes the subset of actions -// planned so far, which is not safe to apply but could potentially be used -// by the UI layer to give extra context to support understanding of the -// returned error messages. -func (c *Context) Plan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { - defer c.acquireRun("plan")() - var diags tfdiags.Diagnostics - - // Save the downstream functions from needing to deal with these broken situations. - // No real callers should rely on these, but we have a bunch of old and - // sloppy tests that don't always populate arguments properly. - if config == nil { - config = configs.NewEmptyConfig() - } - if prevRunState == nil { - prevRunState = states.NewState() - } - if opts == nil { - opts = &PlanOpts{ - Mode: plans.NormalMode, - } - } - - moreDiags := c.checkConfigDependencies(config) - diags = diags.Append(moreDiags) - // If required dependencies are not available then we'll bail early since - // otherwise we're likely to just see a bunch of other errors related to - // incompatibilities, which could be overwhelming for the user. - if diags.HasErrors() { - return nil, diags - } - - switch opts.Mode { - case plans.NormalMode, plans.DestroyMode: - // OK - case plans.RefreshOnlyMode: - if opts.SkipRefresh { - // The CLI layer (and other similar callers) should prevent this - // combination of options. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Incompatible plan options", - "Cannot skip refreshing in refresh-only mode. This is a bug in Terraform.", - )) - return nil, diags - } - default: - // The CLI layer (and other similar callers) should not try to - // create a context for a mode that Terraform Core doesn't support. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported plan mode", - fmt.Sprintf("Terraform Core doesn't know how to handle plan mode %s. This is a bug in Terraform.", opts.Mode), - )) - return nil, diags - } - if len(opts.ForceReplace) > 0 && opts.Mode != plans.NormalMode { - // The other modes don't generate no-op or update actions that we might - // upgrade to be "replace", so doesn't make sense to combine those. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported plan mode", - "Forcing resource instance replacement (with -replace=...) is allowed only in normal planning mode.", - )) - return nil, diags - } - - // By the time we get here, we should have values defined for all of - // the root module variables, even if some of them are "unknown". It's the - // caller's responsibility to have already handled the decoding of these - // from the various ways the CLI allows them to be set and to produce - // user-friendly error messages if they are not all present, and so - // the error message from checkInputVariables should never be seen and - // includes language asking the user to report a bug. - varDiags := checkInputVariables(config.Module.Variables, opts.SetVariables) - diags = diags.Append(varDiags) - - if len(opts.Targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - var plan *plans.Plan - var planDiags tfdiags.Diagnostics - switch opts.Mode { - case plans.NormalMode: - plan, planDiags = c.plan(config, prevRunState, opts) - case plans.DestroyMode: - plan, planDiags = c.destroyPlan(config, prevRunState, opts) - case plans.RefreshOnlyMode: - plan, planDiags = c.refreshOnlyPlan(config, prevRunState, opts) - default: - panic(fmt.Sprintf("unsupported plan mode %s", opts.Mode)) - } - diags = diags.Append(planDiags) - // NOTE: We're intentionally not returning early when diags.HasErrors - // here because we'll still populate other metadata below on a best-effort - // basis to try to give the UI some extra context to return alongside the - // error messages. - - // convert the variables into the format expected for the plan - varVals := make(map[string]plans.DynamicValue, len(opts.SetVariables)) - for k, iv := range opts.SetVariables { - if iv.Value == cty.NilVal { - continue // We only record values that the caller actually set - } - - // We use cty.DynamicPseudoType here so that we'll save both the - // value _and_ its dynamic type in the plan, so we can recover - // exactly the same value later. - dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to prepare variable value for plan", - fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), - )) - continue - } - varVals[k] = dv - } - - // insert the run-specific data from the context into the plan; variables, - // targets and provider SHAs. - if plan != nil { - plan.VariableValues = varVals - plan.TargetAddrs = opts.Targets - } else if !diags.HasErrors() { - panic("nil plan but no errors") - } - - if plan != nil { - relevantAttrs, rDiags := c.relevantResourceAttrsForPlan(config, plan) - diags = diags.Append(rDiags) - plan.RelevantAttributes = relevantAttrs - } - - if diags.HasErrors() { - // We can't proceed further with an invalid plan, because an invalid - // plan isn't applyable by definition. - if plan != nil { - // We'll explicitly mark our plan as errored so that it can't - // be accidentally applied even though it's incomplete. - plan.Errored = true - } - return plan, diags - } - - diags = diags.Append(c.checkApplyGraph(plan, config)) - - return plan, diags -} - -// checkApplyGraph builds the apply graph out of the current plan to -// check for any errors that may arise once the planned changes are added to -// the graph. This allows terraform to report errors (mostly cycles) during -// plan that would otherwise only crop up during apply -func (c *Context) checkApplyGraph(plan *plans.Plan, config *configs.Config) tfdiags.Diagnostics { - if plan.Changes.Empty() { - log.Println("[DEBUG] no planned changes, skipping apply graph check") - return nil - } - log.Println("[DEBUG] building apply graph to check for errors") - _, _, diags := c.applyGraph(plan, config, true) - return diags -} - -var DefaultPlanOpts = &PlanOpts{ - Mode: plans.NormalMode, -} - -// SimplePlanOpts is a constructor to help with creating "simple" values of -// PlanOpts which only specify a mode and input variables. -// -// This helper function is primarily intended for use in straightforward -// tests that don't need any of the more "esoteric" planning options. For -// handling real user requests to run Terraform, it'd probably be better -// to construct a *PlanOpts value directly and provide a way for the user -// to set values for all of its fields. -// -// The "mode" and "setVariables" arguments become the values of the "Mode" -// and "SetVariables" fields in the result. Refer to the PlanOpts type -// documentation to learn about the meanings of those fields. -func SimplePlanOpts(mode plans.Mode, setVariables InputValues) *PlanOpts { - return &PlanOpts{ - Mode: mode, - SetVariables: setVariables, - } -} - -func (c *Context) plan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if opts.Mode != plans.NormalMode { - panic(fmt.Sprintf("called Context.plan with %s", opts.Mode)) - } - - plan, walkDiags := c.planWalk(config, prevRunState, opts) - diags = diags.Append(walkDiags) - - return plan, diags -} - -func (c *Context) refreshOnlyPlan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if opts.Mode != plans.RefreshOnlyMode { - panic(fmt.Sprintf("called Context.refreshOnlyPlan with %s", opts.Mode)) - } - - plan, walkDiags := c.planWalk(config, prevRunState, opts) - diags = diags.Append(walkDiags) - if diags.HasErrors() { - // Non-nil plan along with errors indicates a non-applyable partial - // plan that's only suitable to be shown to the user as extra context - // to help understand the errors. - return plan, diags - } - - // If the graph builder and graph nodes correctly obeyed our directive - // to refresh only, the set of resource changes should always be empty. - // We'll safety-check that here so we can return a clear message about it, - // rather than probably just generating confusing output at the UI layer. - if len(plan.Changes.Resources) != 0 { - // Some extra context in the logs in case the user reports this message - // as a bug, as a starting point for debugging. - for _, rc := range plan.Changes.Resources { - if depKey := rc.DeposedKey; depKey == states.NotDeposed { - log.Printf("[DEBUG] Refresh-only plan includes %s change for %s", rc.Action, rc.Addr) - } else { - log.Printf("[DEBUG] Refresh-only plan includes %s change for %s deposed object %s", rc.Action, rc.Addr, depKey) - } - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid refresh-only plan", - "Terraform generated planned resource changes in a refresh-only plan. This is a bug in Terraform.", - )) - } - - // We don't populate RelevantResources for a refresh-only plan, because - // they never have any planned actions and so no resource can ever be - // "relevant" per the intended meaning of that field. - - return plan, diags -} - -func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if opts.Mode != plans.DestroyMode { - panic(fmt.Sprintf("called Context.destroyPlan with %s", opts.Mode)) - } - - priorState := prevRunState - - // A destroy plan starts by running Refresh to read any pending data - // sources, and remove missing managed resources. This is required because - // a "destroy plan" is only creating delete changes, and is essentially a - // local operation. - // - // NOTE: if skipRefresh _is_ set then we'll rely on the destroy-plan walk - // below to upgrade the prevRunState and priorState both to the latest - // resource type schemas, so NodePlanDestroyableResourceInstance.Execute - // must coordinate with this by taking that action only when c.skipRefresh - // _is_ set. This coupling between the two is unfortunate but necessary - // to work within our current structure. - if !opts.SkipRefresh && !prevRunState.Empty() { - log.Printf("[TRACE] Context.destroyPlan: calling Context.plan to get the effect of refreshing the prior state") - refreshOpts := *opts - refreshOpts.Mode = plans.NormalMode - refreshOpts.PreDestroyRefresh = true - - // FIXME: A normal plan is required here to refresh the state, because - // the state and configuration may not match during a destroy, and a - // normal refresh plan can fail with evaluation errors. In the future - // the destroy plan should take care of refreshing instances itself, - // where the special cases of evaluation and skipping condition checks - // can be done. - refreshPlan, refreshDiags := c.plan(config, prevRunState, &refreshOpts) - if refreshDiags.HasErrors() { - // NOTE: Normally we'd append diagnostics regardless of whether - // there are errors, just in case there are warnings we'd want to - // preserve, but we're intentionally _not_ doing that here because - // if the first plan succeeded then we'll be running another plan - // in DestroyMode below, and we don't want to double-up any - // warnings that both plan walks would generate. - // (This does mean we won't show any warnings that would've been - // unique to only this walk, but we're assuming here that if the - // warnings aren't also applicable to a destroy plan then we'd - // rather not show them here, because this non-destroy plan for - // refreshing is largely an implementation detail.) - diags = diags.Append(refreshDiags) - return nil, diags - } - - // We'll use the refreshed state -- which is the "prior state" from - // the perspective of this "destroy plan" -- as the starting state - // for our destroy-plan walk, so it can take into account if we - // detected during refreshing that anything was already deleted outside - // of Terraform. - priorState = refreshPlan.PriorState.DeepCopy() - - // The refresh plan may have upgraded state for some resources, make - // sure we store the new version. - prevRunState = refreshPlan.PrevRunState.DeepCopy() - log.Printf("[TRACE] Context.destroyPlan: now _really_ creating a destroy plan") - } - - destroyPlan, walkDiags := c.planWalk(config, priorState, opts) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - // Non-nil plan along with errors indicates a non-applyable partial - // plan that's only suitable to be shown to the user as extra context - // to help understand the errors. - return destroyPlan, diags - } - - if !opts.SkipRefresh { - // If we didn't skip refreshing then we want the previous run state to - // be the one we originally fed into the c.refreshOnlyPlan call above, - // not the refreshed version we used for the destroy planWalk. - destroyPlan.PrevRunState = prevRunState - } - - relevantAttrs, rDiags := c.relevantResourceAttrsForPlan(config, destroyPlan) - diags = diags.Append(rDiags) - - destroyPlan.RelevantAttributes = relevantAttrs - return destroyPlan, diags -} - -func (c *Context) prePlanFindAndApplyMoves(config *configs.Config, prevRunState *states.State, targets []addrs.Targetable) ([]refactoring.MoveStatement, refactoring.MoveResults) { - explicitMoveStmts := refactoring.FindMoveStatements(config) - implicitMoveStmts := refactoring.ImpliedMoveStatements(config, prevRunState, explicitMoveStmts) - var moveStmts []refactoring.MoveStatement - if stmtsLen := len(explicitMoveStmts) + len(implicitMoveStmts); stmtsLen > 0 { - moveStmts = make([]refactoring.MoveStatement, 0, stmtsLen) - moveStmts = append(moveStmts, explicitMoveStmts...) - moveStmts = append(moveStmts, implicitMoveStmts...) - } - moveResults := refactoring.ApplyMoves(moveStmts, prevRunState) - return moveStmts, moveResults -} - -func (c *Context) prePlanVerifyTargetedMoves(moveResults refactoring.MoveResults, targets []addrs.Targetable) tfdiags.Diagnostics { - if len(targets) < 1 { - return nil // the following only matters when targeting - } - - var diags tfdiags.Diagnostics - - var excluded []addrs.AbsResourceInstance - for _, result := range moveResults.Changes.Values() { - fromMatchesTarget := false - toMatchesTarget := false - for _, targetAddr := range targets { - if targetAddr.TargetContains(result.From) { - fromMatchesTarget = true - } - if targetAddr.TargetContains(result.To) { - toMatchesTarget = true - } - } - if !fromMatchesTarget { - excluded = append(excluded, result.From) - } - if !toMatchesTarget { - excluded = append(excluded, result.To) - } - } - if len(excluded) > 0 { - sort.Slice(excluded, func(i, j int) bool { - return excluded[i].Less(excluded[j]) - }) - - var listBuf strings.Builder - var prevResourceAddr addrs.AbsResource - for _, instAddr := range excluded { - // Targeting generally ends up selecting whole resources rather - // than individual instances, because we don't factor in - // individual instances until DynamicExpand, so we're going to - // always show whole resource addresses here, excluding any - // instance keys. (This also neatly avoids dealing with the - // different quoting styles required for string instance keys - // on different shells, which is handy.) - // - // To avoid showing duplicates when we have multiple instances - // of the same resource, we'll remember the most recent - // resource we rendered in prevResource, which is sufficient - // because we sorted the list of instance addresses above, and - // our sort order always groups together instances of the same - // resource. - resourceAddr := instAddr.ContainingResource() - if resourceAddr.Equal(prevResourceAddr) { - continue - } - fmt.Fprintf(&listBuf, "\n -target=%q", resourceAddr.String()) - prevResourceAddr = resourceAddr - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Moved resource instances excluded by targeting", - fmt.Sprintf( - "Resource instances in your current state have moved to new addresses in the latest configuration. Terraform must include those resource instances while planning in order to ensure a correct result, but your -target=... options to not fully cover all of those resource instances.\n\nTo create a valid plan, either remove your -target=... options altogether or add the following additional target options:%s\n\nNote that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.", - listBuf.String(), - ), - )) - } - - return diags -} - -func (c *Context) postPlanValidateMoves(config *configs.Config, stmts []refactoring.MoveStatement, allInsts instances.Set) tfdiags.Diagnostics { - return refactoring.ValidateMoves(stmts, config, allInsts) -} - -func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - log.Printf("[DEBUG] Building and walking plan graph for %s", opts.Mode) - - prevRunState = prevRunState.DeepCopy() // don't modify the caller's object when we process the moves - moveStmts, moveResults := c.prePlanFindAndApplyMoves(config, prevRunState, opts.Targets) - - // If resource targeting is in effect then it might conflict with the - // move result. - diags = diags.Append(c.prePlanVerifyTargetedMoves(moveResults, opts.Targets)) - if diags.HasErrors() { - // We'll return early here, because if we have any moved resource - // instances excluded by targeting then planning is likely to encounter - // strange problems that may lead to confusing error messages. - return nil, diags - } - - graph, walkOp, moreDiags := c.planGraph(config, prevRunState, opts) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - return nil, diags - } - - // If we get here then we should definitely have a non-nil "graph", which - // we can now walk. - changes := plans.NewChanges() - walker, walkDiags := c.walk(graph, walkOp, &graphWalkOpts{ - Config: config, - InputState: prevRunState, - Changes: changes, - MoveResults: moveResults, - }) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - moveValidateDiags := c.postPlanValidateMoves(config, moveStmts, walker.InstanceExpander.AllInstances()) - if moveValidateDiags.HasErrors() { - // If any of the move statements are invalid then those errors take - // precedence over any other errors because an incomplete move graph - // is quite likely to be the _cause_ of various errors. This oddity - // comes from the fact that we need to apply the moves before we - // actually validate them, because validation depends on the result - // of first trying to plan. - return nil, moveValidateDiags - } - diags = diags.Append(moveValidateDiags) // might just contain warnings - - if moveResults.Blocked.Len() > 0 && !diags.HasErrors() { - // If we had blocked moves and we're not going to be returning errors - // then we'll report the blockers as a warning. We do this only in the - // absense of errors because invalid move statements might well be - // the root cause of the blockers, and so better to give an actionable - // error message than a less-actionable warning. - diags = diags.Append(blockedMovesWarningDiag(moveResults)) - } - - // If we reach this point with error diagnostics then "changes" is a - // representation of the subset of changes we were able to plan before - // we encountered errors, which we'll return as part of a non-nil plan - // so that e.g. the UI can show what was planned so far in case that extra - // context helps the user to understand the error messages we're returning. - prevRunState = walker.PrevRunState.Close() - - // The refreshed state may have data resource objects which were deferred - // to apply and cannot be serialized. - walker.RefreshState.RemovePlannedResourceInstanceObjects() - priorState := walker.RefreshState.Close() - - driftedResources, driftDiags := c.driftedResources(config, prevRunState, priorState, moveResults) - diags = diags.Append(driftDiags) - - plan := &plans.Plan{ - UIMode: opts.Mode, - Changes: changes, - DriftedResources: driftedResources, - PrevRunState: prevRunState, - PriorState: priorState, - Checks: states.NewCheckResults(walker.Checks), - - // Other fields get populated by Context.Plan after we return - } - return plan, diags -} - -func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*Graph, walkOperation, tfdiags.Diagnostics) { - switch mode := opts.Mode; mode { - case plans.NormalMode: - graph, diags := (&PlanGraphBuilder{ - Config: config, - State: prevRunState, - RootVariableValues: opts.SetVariables, - Plugins: c.plugins, - Targets: opts.Targets, - ForceReplace: opts.ForceReplace, - skipRefresh: opts.SkipRefresh, - preDestroyRefresh: opts.PreDestroyRefresh, - Operation: walkPlan, - }).Build(addrs.RootModuleInstance) - return graph, walkPlan, diags - case plans.RefreshOnlyMode: - graph, diags := (&PlanGraphBuilder{ - Config: config, - State: prevRunState, - RootVariableValues: opts.SetVariables, - Plugins: c.plugins, - Targets: opts.Targets, - skipRefresh: opts.SkipRefresh, - skipPlanChanges: true, // this activates "refresh only" mode. - Operation: walkPlan, - }).Build(addrs.RootModuleInstance) - return graph, walkPlan, diags - case plans.DestroyMode: - graph, diags := (&PlanGraphBuilder{ - Config: config, - State: prevRunState, - RootVariableValues: opts.SetVariables, - Plugins: c.plugins, - Targets: opts.Targets, - skipRefresh: opts.SkipRefresh, - Operation: walkPlanDestroy, - }).Build(addrs.RootModuleInstance) - return graph, walkPlanDestroy, diags - default: - // The above should cover all plans.Mode values - panic(fmt.Sprintf("unsupported plan mode %s", mode)) - } -} - -// driftedResources is a best-effort attempt to compare the current and prior -// state. If we cannot decode the prior state for some reason, this should only -// return warnings to help the user correlate any missing resources in the -// report. This is known to happen when targeting a subset of resources, -// because the excluded instances will have been removed from the plan and -// not upgraded. -func (c *Context) driftedResources(config *configs.Config, oldState, newState *states.State, moves refactoring.MoveResults) ([]*plans.ResourceInstanceChangeSrc, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if newState.ManagedResourcesEqual(oldState) && moves.Changes.Len() == 0 { - // Nothing to do, because we only detect and report drift for managed - // resource instances. - return nil, diags - } - - schemas, schemaDiags := c.Schemas(config, newState) - diags = diags.Append(schemaDiags) - if diags.HasErrors() { - return nil, diags - } - - var drs []*plans.ResourceInstanceChangeSrc - - for _, ms := range oldState.Modules { - for _, rs := range ms.Resources { - if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { - // Drift reporting is only for managed resources - continue - } - - provider := rs.ProviderConfig.Provider - for key, oldIS := range rs.Instances { - if oldIS.Current == nil { - // Not interested in instances that only have deposed objects - continue - } - addr := rs.Addr.Instance(key) - - // Previous run address defaults to the current address, but - // can differ if the resource moved before refreshing - prevRunAddr := addr - if move, ok := moves.Changes.GetOk(addr); ok { - prevRunAddr = move.From - } - - newIS := newState.ResourceInstance(addr) - - schema, _ := schemas.ResourceTypeConfig( - provider, - addr.Resource.Resource.Mode, - addr.Resource.Resource.Type, - ) - if schema == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Missing resource schema from provider", - fmt.Sprintf("No resource schema found for %s when decoding prior state", addr.Resource.Resource.Type), - )) - continue - } - ty := schema.ImpliedType() - - oldObj, err := oldIS.Current.Decode(ty) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Failed to decode resource from state", - fmt.Sprintf("Error decoding %q from prior state: %s", addr.String(), err), - )) - continue - } - - var newObj *states.ResourceInstanceObject - if newIS != nil && newIS.Current != nil { - newObj, err = newIS.Current.Decode(ty) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Failed to decode resource from state", - fmt.Sprintf("Error decoding %q from prior state: %s", addr.String(), err), - )) - continue - } - } - - var oldVal, newVal cty.Value - oldVal = oldObj.Value - if newObj != nil { - newVal = newObj.Value - } else { - newVal = cty.NullVal(ty) - } - - if oldVal.RawEquals(newVal) && addr.Equal(prevRunAddr) { - // No drift if the two values are semantically equivalent - // and no move has happened - continue - } - - // We can detect three types of changes after refreshing state, - // only two of which are easily understood as "drift": - // - // - Resources which were deleted outside of Terraform; - // - Resources where the object value has changed outside of - // Terraform; - // - Resources which have been moved without other changes. - // - // All of these are returned as drift, to allow refresh-only plans - // to present a full set of changes which will be applied. - var action plans.Action - switch { - case newVal.IsNull(): - action = plans.Delete - case !oldVal.RawEquals(newVal): - action = plans.Update - default: - action = plans.NoOp - } - - change := &plans.ResourceInstanceChange{ - Addr: addr, - PrevRunAddr: prevRunAddr, - ProviderAddr: rs.ProviderConfig, - Change: plans.Change{ - Action: action, - Before: oldVal, - After: newVal, - }, - } - - changeSrc, err := change.Encode(ty) - if err != nil { - diags = diags.Append(err) - return nil, diags - } - - drs = append(drs, changeSrc) - } - } - } - - return drs, diags -} - -// PlanGraphForUI is a last vestage of graphs in the public interface of Context -// (as opposed to graphs as an implementation detail) intended only for use -// by the "terraform graph" command when asked to render a plan-time graph. -// -// The result of this is intended only for rendering ot the user as a dot -// graph, and so may change in future in order to make the result more useful -// in that context, even if drifts away from the physical graph that Terraform -// Core currently uses as an implementation detail of planning. -func (c *Context) PlanGraphForUI(config *configs.Config, prevRunState *states.State, mode plans.Mode) (*Graph, tfdiags.Diagnostics) { - // For now though, this really is just the internal graph, confusing - // implementation details and all. - - var diags tfdiags.Diagnostics - - opts := &PlanOpts{Mode: mode} - - graph, _, moreDiags := c.planGraph(config, prevRunState, opts) - diags = diags.Append(moreDiags) - return graph, diags -} - -func blockedMovesWarningDiag(results refactoring.MoveResults) tfdiags.Diagnostic { - if results.Blocked.Len() < 1 { - // Caller should check first - panic("request to render blocked moves warning without any blocked moves") - } - - var itemsBuf bytes.Buffer - for _, blocked := range results.Blocked.Values() { - fmt.Fprintf(&itemsBuf, "\n - %s could not move to %s", blocked.Actual, blocked.Wanted) - } - - return tfdiags.Sourceless( - tfdiags.Warning, - "Unresolved resource instance address changes", - fmt.Sprintf( - "Terraform tried to adjust resource instance addresses in the prior state based on change information recorded in the configuration, but some adjustments did not succeed due to existing objects already at the intended addresses:%s\n\nTerraform has planned to destroy these objects. If Terraform's proposed changes aren't appropriate, you must first resolve the conflicts using the \"terraform state\" subcommands and then create a new plan.", - itemsBuf.String(), - ), - ) -} - -// referenceAnalyzer returns a globalref.Analyzer object to help with -// global analysis of references within the configuration that's attached -// to the receiving context. -func (c *Context) referenceAnalyzer(config *configs.Config, state *states.State) (*globalref.Analyzer, tfdiags.Diagnostics) { - schemas, diags := c.Schemas(config, state) - if diags.HasErrors() { - return nil, diags - } - return globalref.NewAnalyzer(config, schemas.Providers), diags -} - -// relevantResourcesForPlan implements the heuristic we use to populate the -// RelevantResources field of returned plans. -func (c *Context) relevantResourceAttrsForPlan(config *configs.Config, plan *plans.Plan) ([]globalref.ResourceAttr, tfdiags.Diagnostics) { - azr, diags := c.referenceAnalyzer(config, plan.PriorState) - if diags.HasErrors() { - return nil, diags - } - - var refs []globalref.Reference - for _, change := range plan.Changes.Resources { - if change.Action == plans.NoOp { - continue - } - - moreRefs := azr.ReferencesFromResourceInstance(change.Addr) - refs = append(refs, moreRefs...) - } - - for _, change := range plan.Changes.Outputs { - if change.Action == plans.NoOp { - continue - } - - moreRefs := azr.ReferencesFromOutputValue(change.Addr) - refs = append(refs, moreRefs...) - } - - var contributors []globalref.ResourceAttr - - for _, ref := range azr.ContributingResourceReferences(refs...) { - if res, ok := ref.ResourceAttr(); ok { - contributors = append(contributors, res) - } - } - - return contributors, diags -} diff --git a/internal/terraform/context_plan2_test.go b/internal/terraform/context_plan2_test.go deleted file mode 100644 index 4e3d52f0793b..000000000000 --- a/internal/terraform/context_plan2_test.go +++ /dev/null @@ -1,4037 +0,0 @@ -package terraform - -import ( - "bytes" - "errors" - "fmt" - "strings" - "sync" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -func TestContext2Plan_removedDuringRefresh(t *testing.T) { - // This tests the situation where an object tracked in the previous run - // state has been deleted outside of Terraform, which we should detect - // during the refresh step and thus ultimately produce a plan to recreate - // the object, since it's still present in the configuration. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { -} -`, - }) - - p := simpleMockProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "test_object": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "arg": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - resp.NewState = cty.NullVal(req.PriorState.Type()) - return resp - } - p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - // We should've been given the prior state JSON as our input to upgrade. - if !bytes.Contains(req.RawStateJSON, []byte("previous_run")) { - t.Fatalf("UpgradeResourceState request doesn't contain the previous run object\n%s", req.RawStateJSON) - } - - // We'll put something different in "arg" as part of upgrading, just - // so that we can verify below that PrevRunState contains the upgraded - // (but NOT refreshed) version of the object. - resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ - "arg": cty.StringVal("upgraded"), - }) - return resp - } - - addr := mustResourceInstanceAddr("test_object.a") - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"arg":"previous_run"}`), - Status: states.ObjectTainted, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - if !p.UpgradeResourceStateCalled { - t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") - } - if !p.ReadResourceCalled { - t.Errorf("Provider's ReadResource wasn't called; should've been") - } - - // The object should be absent from the plan's prior state, because that - // records the result of refreshing. - if got := plan.PriorState.ResourceInstance(addr); got != nil { - t.Errorf( - "instance %s is in the prior state after planning; should've been removed\n%s", - addr, spew.Sdump(got), - ) - } - - // However, the object should still be in the PrevRunState, because - // that reflects what we believed to exist before refreshing. - if got := plan.PrevRunState.ResourceInstance(addr); got == nil { - t.Errorf( - "instance %s is missing from the previous run state after planning; should've been preserved", - addr, - ) - } else { - if !bytes.Contains(got.Current.AttrsJSON, []byte("upgraded")) { - t.Fatalf("previous run state has non-upgraded object\n%s", got.Current.AttrsJSON) - } - } - - // This situation should result in a drifted resource change. - var drifted *plans.ResourceInstanceChangeSrc - for _, dr := range plan.DriftedResources { - if dr.Addr.Equal(addr) { - drifted = dr - break - } - } - - if drifted == nil { - t.Errorf("instance %s is missing from the drifted resource changes", addr) - } else { - if got, want := drifted.Action, plans.Delete; got != want { - t.Errorf("unexpected instance %s drifted resource change action. got: %s, want: %s", addr, got, want) - } - } - - // Because the configuration still mentions test_object.a, we should've - // planned to recreate it in order to fix the drift. - for _, c := range plan.Changes.Resources { - if c.Action != plans.Create { - t.Fatalf("expected Create action for missing %s, got %s", c.Addr, c.Action) - } - } -} - -func TestContext2Plan_noChangeDataSourceSensitiveNestedSet(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "bar" { - sensitive = true - default = "baz" -} - -data "test_data_source" "foo" { - foo { - bar = var.bar - } -} -`, - }) - - p := new(MockProvider) - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - DataSources: map[string]*configschema.Block{ - "test_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }) - - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("data_id"), - "foo": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")})}), - }), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("data.test_data_source.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"data_id", "foo":[{"bar":"baz"}]}`), - AttrSensitivePaths: []cty.PathValueMarks{ - { - Path: cty.GetAttrPath("foo"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - assertNoErrors(t, diags) - - for _, res := range plan.Changes.Resources { - if res.Action != plans.NoOp { - t.Fatalf("expected NoOp, got: %q %s", res.Addr, res.Action) - } - } -} - -func TestContext2Plan_orphanDataInstance(t *testing.T) { - // ensure the planned replacement of the data source is evaluated properly - m := testModuleInline(t, map[string]string{ - "main.tf": ` -data "test_object" "a" { - for_each = { new = "ok" } -} - -output "out" { - value = [ for k, _ in data.test_object.a: k ] -} -`, - }) - - p := simpleMockProvider() - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - resp.State = req.Config - return resp - } - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(mustResourceInstanceAddr(`data.test_object.a["old"]`), &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"test_string":"foo"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - change, err := plan.Changes.Outputs[0].Decode() - if err != nil { - t.Fatal(err) - } - - expected := cty.TupleVal([]cty.Value{cty.StringVal("new")}) - - if change.After.Equals(expected).False() { - t.Fatalf("expected %#v, got %#v\n", expected, change.After) - } -} - -func TestContext2Plan_basicConfigurationAliases(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -provider "test" { - alias = "z" - test_string = "config" -} - -module "mod" { - source = "./mod" - providers = { - test.x = test.z - } -} -`, - - "mod/main.tf": ` -terraform { - required_providers { - test = { - source = "registry.terraform.io/hashicorp/test" - configuration_aliases = [ test.x ] - } - } -} - -resource "test_object" "a" { - provider = test.x -} - -`, - }) - - p := simpleMockProvider() - - // The resource within the module should be using the provider configured - // from the root module. We should never see an empty configuration. - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - if req.Config.GetAttr("test_string").IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing test_string value")) - } - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) -} - -func TestContext2Plan_dataReferencesResourceInModules(t *testing.T) { - p := testProvider("test") - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - cfg := req.Config.AsValueMap() - cfg["id"] = cty.StringVal("d") - resp.State = cty.ObjectVal(cfg) - return resp - } - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - things = { - old = "first" - new = "second" - } -} - -module "mod" { - source = "./mod" - for_each = local.things -} -`, - - "./mod/main.tf": ` -resource "test_resource" "a" { -} - -data "test_data_source" "d" { - depends_on = [test_resource.a] -} - -resource "test_resource" "b" { - value = data.test_data_source.d.id -} -`}) - - oldDataAddr := mustResourceInstanceAddr(`module.mod["old"].data.test_data_source.d`) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`module.mod["old"].test_resource.a`), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"a"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - s.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`module.mod["old"].test_resource.b`), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"b","value":"d"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - s.SetResourceInstanceCurrent( - oldDataAddr, - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"d"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - oldMod := oldDataAddr.Module - - for _, c := range plan.Changes.Resources { - // there should be no changes from the old module instance - if c.Addr.Module.Equal(oldMod) && c.Action != plans.NoOp { - t.Errorf("unexpected change %s for %s\n", c.Action, c.Addr) - } - } -} - -func TestContext2Plan_resourceChecksInExpandedModule(t *testing.T) { - // When a resource is in a nested module we have two levels of expansion - // to do: first expand the module the resource is declared in, and then - // expand the resource itself. - // - // In earlier versions of Terraform we did that expansion as two levels - // of DynamicExpand, which led to a bug where we didn't have any central - // location from which to register all of the instances of a checkable - // resource. - // - // We now handle the full expansion all in one graph node and one dynamic - // subgraph, which avoids the problem. This is a regression test for the - // earlier bug. If this test is panicking with "duplicate checkable objects - // report" then that suggests the bug is reintroduced and we're now back - // to reporting each module instance separately again, which is incorrect. - - p := testProvider("test") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{}, - }, - ResourceTypes: map[string]providers.Schema{ - "test": { - Block: &configschema.Block{}, - }, - }, - } - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - resp.NewState = req.PriorState - return resp - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = cty.EmptyObjectVal - return resp - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - resp.NewState = req.PlannedState - return resp - } - - m := testModuleInline(t, map[string]string{ - "main.tf": ` - module "child" { - source = "./child" - count = 2 # must be at least 2 for this test to be valid - } - `, - "child/child.tf": ` - locals { - a = "a" - } - - resource "test" "test1" { - lifecycle { - postcondition { - # It doesn't matter what this checks as long as it - # passes, because if we don't handle expansion properly - # then we'll crash before we even get to evaluating this. - condition = local.a == local.a - error_message = "Postcondition failed." - } - } - } - - resource "test" "test2" { - count = 2 - - lifecycle { - postcondition { - # It doesn't matter what this checks as long as it - # passes, because if we don't handle expansion properly - # then we'll crash before we even get to evaluating this. - condition = local.a == local.a - error_message = "Postcondition failed." - } - } - } - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - priorState := states.NewState() - plan, diags := ctx.Plan(m, priorState, DefaultPlanOpts) - assertNoErrors(t, diags) - - resourceInsts := []addrs.AbsResourceInstance{ - mustResourceInstanceAddr("module.child[0].test.test1"), - mustResourceInstanceAddr("module.child[0].test.test2[0]"), - mustResourceInstanceAddr("module.child[0].test.test2[1]"), - mustResourceInstanceAddr("module.child[1].test.test1"), - mustResourceInstanceAddr("module.child[1].test.test2[0]"), - mustResourceInstanceAddr("module.child[1].test.test2[1]"), - } - - for _, instAddr := range resourceInsts { - t.Run(fmt.Sprintf("results for %s", instAddr), func(t *testing.T) { - if rc := plan.Changes.ResourceInstance(instAddr); rc != nil { - if got, want := rc.Action, plans.Create; got != want { - t.Errorf("wrong action for %s\ngot: %s\nwant: %s", instAddr, got, want) - } - if got, want := rc.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", instAddr, got, want) - } - } else { - t.Errorf("no planned change for %s", instAddr) - } - - if checkResult := plan.Checks.GetObjectResult(instAddr); checkResult != nil { - if got, want := checkResult.Status, checks.StatusPass; got != want { - t.Errorf("wrong check status for %s\ngot: %s\nwant: %s", instAddr, got, want) - } - } else { - t.Errorf("no check result for %s", instAddr) - } - }) - } -} - -func TestContext2Plan_dataResourceChecksManagedResourceChange(t *testing.T) { - // This tests the situation where the remote system contains data that - // isn't valid per a data resource postcondition, but that the - // configuration is destined to make the remote system valid during apply - // and so we must defer reading the data resource and checking its - // conditions until the apply step. - // - // This is an exception to the rule tested in - // TestContext2Plan_dataReferencesResourceIndirectly which is relevant - // whenever there's at least one precondition or postcondition attached - // to a data resource. - // - // See TestContext2Plan_managedResourceChecksOtherManagedResourceChange for - // an incorrect situation where a data resource is used only indirectly - // to drive a precondition elsewhere, which therefore doesn't achieve this - // special exception. - - p := testProvider("test") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{}, - }, - ResourceTypes: map[string]providers.Schema{ - "test_resource": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "valid": { - Type: cty.Bool, - Required: true, - }, - }, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "test_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Required: true, - }, - "valid": { - Type: cty.Bool, - Computed: true, - }, - }, - }, - }, - }, - } - var mu sync.Mutex - validVal := cty.False - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - // NOTE: This assumes that the prior state declared below will have - // "valid" set to false already, and thus will match validVal above. - resp.NewState = req.PriorState - return resp - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - cfg := req.Config.AsValueMap() - mu.Lock() - cfg["valid"] = validVal - mu.Unlock() - resp.State = cty.ObjectVal(cfg) - return resp - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - cfg := req.Config.AsValueMap() - prior := req.PriorState.AsValueMap() - resp.PlannedState = cty.ObjectVal(map[string]cty.Value{ - "id": prior["id"], - "valid": cfg["valid"], - }) - return resp - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - planned := req.PlannedState.AsValueMap() - - mu.Lock() - validVal = planned["valid"] - mu.Unlock() - - resp.NewState = req.PlannedState - return resp - } - - m := testModuleInline(t, map[string]string{ - "main.tf": ` - -resource "test_resource" "a" { - valid = true -} - -locals { - # NOTE: We intentionally read through a local value here to make sure - # that this behavior still works even if there isn't a direct dependency - # between the data resource and the managed resource. - object_id = test_resource.a.id -} - -data "test_data_source" "a" { - id = local.object_id - - lifecycle { - postcondition { - condition = self.valid - error_message = "Not valid!" - } - } -} -`}) - - managedAddr := mustResourceInstanceAddr(`test_resource.a`) - dataAddr := mustResourceInstanceAddr(`data.test_data_source.a`) - - // This state is intended to represent the outcome of a previous apply that - // failed due to postcondition failure but had already updated the - // relevant object to be invalid. - // - // It could also potentially represent a similar situation where the - // previous apply succeeded but there has been a change outside of - // Terraform that made it invalid, although technically in that scenario - // the state data would become invalid only during the planning step. For - // our purposes here that's close enough because we don't have a real - // remote system in place anyway. - priorState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - managedAddr, - &states.ResourceInstanceObjectSrc{ - // NOTE: "valid" is false here but is true in the configuration - // above, which is intended to represent that applying the - // configuration change would make this object become valid. - AttrsJSON: []byte(`{"id":"boop","valid":false}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, priorState, DefaultPlanOpts) - assertNoErrors(t, diags) - - if rc := plan.Changes.ResourceInstance(dataAddr); rc != nil { - if got, want := rc.Action, plans.Read; got != want { - t.Errorf("wrong action for %s\ngot: %s\nwant: %s", dataAddr, got, want) - } - if got, want := rc.ActionReason, plans.ResourceInstanceReadBecauseDependencyPending; got != want { - t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", dataAddr, got, want) - } - } else { - t.Fatalf("no planned change for %s", dataAddr) - } - - if rc := plan.Changes.ResourceInstance(managedAddr); rc != nil { - if got, want := rc.Action, plans.Update; got != want { - t.Errorf("wrong action for %s\ngot: %s\nwant: %s", managedAddr, got, want) - } - if got, want := rc.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", managedAddr, got, want) - } - } else { - t.Fatalf("no planned change for %s", managedAddr) - } - - // This is primarily a plan-time test, since the special handling of - // data resources is a plan-time concern, but we'll still try applying the - // plan here just to make sure it's valid. - newState, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - if rs := newState.ResourceInstance(dataAddr); rs != nil { - if !rs.HasCurrent() { - t.Errorf("no final state for %s", dataAddr) - } - } else { - t.Errorf("no final state for %s", dataAddr) - } - - if rs := newState.ResourceInstance(managedAddr); rs != nil { - if !rs.HasCurrent() { - t.Errorf("no final state for %s", managedAddr) - } - } else { - t.Errorf("no final state for %s", managedAddr) - } - - if got, want := validVal, cty.True; got != want { - t.Errorf("wrong final valid value\ngot: %#v\nwant: %#v", got, want) - } - -} - -func TestContext2Plan_managedResourceChecksOtherManagedResourceChange(t *testing.T) { - // This tests the incorrect situation where a managed resource checks - // another managed resource indirectly via a data resource. - // This doesn't work because Terraform can't tell that the data resource - // outcome will be updated by a separate managed resource change and so - // we expect it to fail. - // This would ideally have worked except that we previously included a - // special case in the rules for data resources where they only consider - // direct dependencies when deciding whether to defer (except when the - // data resource itself has conditions) and so they can potentially - // read "too early" if the user creates the explicitly-not-recommended - // situation of a data resource and a managed resource in the same - // configuration both representing the same remote object. - - p := testProvider("test") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{}, - }, - ResourceTypes: map[string]providers.Schema{ - "test_resource": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "valid": { - Type: cty.Bool, - Required: true, - }, - }, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "test_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Required: true, - }, - "valid": { - Type: cty.Bool, - Computed: true, - }, - }, - }, - }, - }, - } - var mu sync.Mutex - validVal := cty.False - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - // NOTE: This assumes that the prior state declared below will have - // "valid" set to false already, and thus will match validVal above. - resp.NewState = req.PriorState - return resp - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - cfg := req.Config.AsValueMap() - if cfg["id"].AsString() == "main" { - mu.Lock() - cfg["valid"] = validVal - mu.Unlock() - } - resp.State = cty.ObjectVal(cfg) - return resp - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - cfg := req.Config.AsValueMap() - prior := req.PriorState.AsValueMap() - resp.PlannedState = cty.ObjectVal(map[string]cty.Value{ - "id": prior["id"], - "valid": cfg["valid"], - }) - return resp - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - planned := req.PlannedState.AsValueMap() - - if planned["id"].AsString() == "main" { - mu.Lock() - validVal = planned["valid"] - mu.Unlock() - } - - resp.NewState = req.PlannedState - return resp - } - - m := testModuleInline(t, map[string]string{ - "main.tf": ` - -resource "test_resource" "a" { - valid = true -} - -locals { - # NOTE: We intentionally read through a local value here because a - # direct reference from data.test_data_source.a to test_resource.a would - # cause Terraform to defer the data resource to the apply phase due to - # there being a pending change for the managed resource. We're explicitly - # testing the failure case where the data resource read happens too - # eagerly, which is what results from the reference being only indirect - # so Terraform can't "see" that the data resource result might be affected - # by changes to the managed resource. - object_id = test_resource.a.id -} - -data "test_data_source" "a" { - id = local.object_id -} - -resource "test_resource" "b" { - valid = true - - lifecycle { - precondition { - condition = data.test_data_source.a.valid - error_message = "Not valid!" - } - } -} -`}) - - managedAddrA := mustResourceInstanceAddr(`test_resource.a`) - managedAddrB := mustResourceInstanceAddr(`test_resource.b`) - - // This state is intended to represent the outcome of a previous apply that - // failed due to postcondition failure but had already updated the - // relevant object to be invalid. - // - // It could also potentially represent a similar situation where the - // previous apply succeeded but there has been a change outside of - // Terraform that made it invalid, although technically in that scenario - // the state data would become invalid only during the planning step. For - // our purposes here that's close enough because we don't have a real - // remote system in place anyway. - priorState := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - managedAddrA, - &states.ResourceInstanceObjectSrc{ - // NOTE: "valid" is false here but is true in the configuration - // above, which is intended to represent that applying the - // configuration change would make this object become valid. - AttrsJSON: []byte(`{"id":"main","valid":false}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - s.SetResourceInstanceCurrent( - managedAddrB, - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"checker","valid":true}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, priorState, DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("unexpected successful plan; should've failed with non-passing precondition") - } - - if got, want := diags.Err().Error(), "Resource precondition failed: Not valid!"; !strings.Contains(got, want) { - t.Errorf("Missing expected error message\ngot: %s\nwant substring: %s", got, want) - } -} - -func TestContext2Plan_destroyWithRefresh(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { -} -`, - }) - - p := simpleMockProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "test_object": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "arg": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - // This is called from the first instance of this provider, so we can't - // check p.ReadResourceCalled after plan. - readResourceCalled := false - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - readResourceCalled = true - newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { - if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { - return cty.StringVal("current"), nil - } - return v, nil - }) - if err != nil { - // shouldn't get here - t.Fatalf("ReadResourceFn transform failed") - return providers.ReadResourceResponse{} - } - return providers.ReadResourceResponse{ - NewState: newVal, - } - } - - upgradeResourceStateCalled := false - p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - upgradeResourceStateCalled = true - t.Logf("UpgradeResourceState %s", req.RawStateJSON) - - // In the destroy-with-refresh codepath we end up calling - // UpgradeResourceState twice, because we do so once during refreshing - // (as part making a normal plan) and then again during the plan-destroy - // walk. The second call recieves the result of the earlier refresh, - // so we need to tolerate both "before" and "current" as possible - // inputs here. - if !bytes.Contains(req.RawStateJSON, []byte("before")) { - if !bytes.Contains(req.RawStateJSON, []byte("current")) { - t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object or the 'current' object\n%s", req.RawStateJSON) - } - } - - // We'll put something different in "arg" as part of upgrading, just - // so that we can verify below that PrevRunState contains the upgraded - // (but NOT refreshed) version of the object. - resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ - "arg": cty.StringVal("upgraded"), - }) - return resp - } - - addr := mustResourceInstanceAddr("test_object.a") - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"arg":"before"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - SkipRefresh: false, // the default - }) - assertNoErrors(t, diags) - - if !upgradeResourceStateCalled { - t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") - } - if !readResourceCalled { - t.Errorf("Provider's ReadResource wasn't called; should've been") - } - - if plan.PriorState == nil { - t.Fatal("missing plan state") - } - - for _, c := range plan.Changes.Resources { - if c.Action != plans.Delete { - t.Errorf("unexpected %s change for %s", c.Action, c.Addr) - } - } - - if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { - t.Errorf("%s has no previous run state at all after plan", addr) - } else { - if instState.Current == nil { - t.Errorf("%s has no current object in the previous run state", addr) - } else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { - t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) - } - } - if instState := plan.PriorState.ResourceInstance(addr); instState == nil { - t.Errorf("%s has no prior state at all after plan", addr) - } else { - if instState.Current == nil { - t.Errorf("%s has no current object in the prior state", addr) - } else if got, want := instState.Current.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { - t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) - } - } -} - -func TestContext2Plan_destroySkipRefresh(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { -} -`, - }) - - p := simpleMockProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "test_object": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "arg": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - t.Helper() - t.Errorf("unexpected call to ReadResource") - resp.NewState = req.PriorState - return resp - } - p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - t.Logf("UpgradeResourceState %s", req.RawStateJSON) - // We should've been given the prior state JSON as our input to upgrade. - if !bytes.Contains(req.RawStateJSON, []byte("before")) { - t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) - } - - // We'll put something different in "arg" as part of upgrading, just - // so that we can verify below that PrevRunState contains the upgraded - // (but NOT refreshed) version of the object. - resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ - "arg": cty.StringVal("upgraded"), - }) - return resp - } - - addr := mustResourceInstanceAddr("test_object.a") - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"arg":"before"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - SkipRefresh: true, - }) - assertNoErrors(t, diags) - - if !p.UpgradeResourceStateCalled { - t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") - } - if p.ReadResourceCalled { - t.Errorf("Provider's ReadResource was called; shouldn't have been") - } - - if plan.PriorState == nil { - t.Fatal("missing plan state") - } - - for _, c := range plan.Changes.Resources { - if c.Action != plans.Delete { - t.Errorf("unexpected %s change for %s", c.Action, c.Addr) - } - } - - if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { - t.Errorf("%s has no previous run state at all after plan", addr) - } else { - if instState.Current == nil { - t.Errorf("%s has no current object in the previous run state", addr) - } else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { - t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) - } - } - if instState := plan.PriorState.ResourceInstance(addr); instState == nil { - t.Errorf("%s has no prior state at all after plan", addr) - } else { - if instState.Current == nil { - t.Errorf("%s has no current object in the prior state", addr) - } else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { - // NOTE: The prior state should still have been _upgraded_, even - // though we skipped running refresh after upgrading it. - t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) - } - } -} - -func TestContext2Plan_unmarkingSensitiveAttributeForOutput(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_resource" "foo" { -} - -output "result" { - value = nonsensitive(test_resource.foo.sensitive_attr) -} -`, - }) - - p := new(MockProvider) - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "sensitive_attr": { - Type: cty.String, - Computed: true, - Sensitive: true, - }, - }, - }, - }, - }) - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: cty.UnknownVal(cty.Object(map[string]cty.Type{ - "id": cty.String, - "sensitive_attr": cty.String, - })), - } - } - - state := states.NewState() - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected create, got: %q %s", res.Addr, res.Action) - } - } -} - -func TestContext2Plan_destroyNoProviderConfig(t *testing.T) { - // providers do not need to be configured during a destroy plan - p := simpleMockProvider() - p.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - v := req.Config.GetAttr("test_string") - if v.IsNull() || !v.IsKnown() || v.AsString() != "ok" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("invalid provider configuration: %#v", req.Config)) - } - return resp - } - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - value = "ok" -} - -provider "test" { - test_string = local.value -} -`, - }) - - addr := mustResourceInstanceAddr("test_object.a") - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"test_string":"foo"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) -} - -func TestContext2Plan_movedResourceBasic(t *testing.T) { - addrA := mustResourceInstanceAddr("test_object.a") - addrB := mustResourceInstanceAddr("test_object.b") - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "b" { - } - - moved { - from = test_object.a - to = test_object.b - } - `, - }) - - state := states.BuildState(func(s *states.SyncState) { - // The prior state tracks test_object.a, which we should treat as - // test_object.b because of the "moved" block in the config. - s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - ForceReplace: []addrs.AbsResourceInstance{ - addrA, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - t.Run(addrA.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrA) - if instPlan != nil { - t.Fatalf("unexpected plan for %s; should've moved to %s", addrA, addrB) - } - }) - t.Run(addrB.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrB) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrB) - } - - if got, want := instPlan.Addr, addrB; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.PrevRunAddr, addrA; !got.Equal(want) { - t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.Action, plans.NoOp; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) -} - -func TestContext2Plan_movedResourceCollision(t *testing.T) { - addrNoKey := mustResourceInstanceAddr("test_object.a") - addrZeroKey := mustResourceInstanceAddr("test_object.a[0]") - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "a" { - # No "count" set, so test_object.a[0] will want - # to implicitly move to test_object.a, but will get - # blocked by the existing object at that address. - } - `, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addrNoKey, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - s.SetResourceInstanceCurrent(addrZeroKey, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - // We should have a warning, though! We'll lightly abuse the "for RPC" - // feature of diagnostics to get some more-readily-comparable diagnostic - // values. - gotDiags := diags.ForRPC() - wantDiags := tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Warning, - "Unresolved resource instance address changes", - `Terraform tried to adjust resource instance addresses in the prior state based on change information recorded in the configuration, but some adjustments did not succeed due to existing objects already at the intended addresses: - - test_object.a[0] could not move to test_object.a - -Terraform has planned to destroy these objects. If Terraform's proposed changes aren't appropriate, you must first resolve the conflicts using the "terraform state" subcommands and then create a new plan.`, - ), - }.ForRPC() - if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { - t.Errorf("wrong diagnostics\n%s", diff) - } - - t.Run(addrNoKey.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrNoKey) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrNoKey) - } - - if got, want := instPlan.Addr, addrNoKey; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.PrevRunAddr, addrNoKey; !got.Equal(want) { - t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.Action, plans.NoOp; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) - t.Run(addrZeroKey.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrZeroKey) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrZeroKey) - } - - if got, want := instPlan.Addr, addrZeroKey; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.PrevRunAddr, addrZeroKey; !got.Equal(want) { - t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.Action, plans.Delete; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseWrongRepetition; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) -} - -func TestContext2Plan_movedResourceCollisionDestroy(t *testing.T) { - // This is like TestContext2Plan_movedResourceCollision but intended to - // ensure we still produce the expected warning (and produce it only once) - // when we're creating a destroy plan, rather than a normal plan. - // (This case is interesting at the time of writing because we happen to - // use a normal plan as a trick to refresh before creating a destroy plan. - // This test will probably become uninteresting if a future change to - // the destroy-time planning behavior handles refreshing in a different - // way, which avoids this pre-processing step of running a normal plan - // first.) - - addrNoKey := mustResourceInstanceAddr("test_object.a") - addrZeroKey := mustResourceInstanceAddr("test_object.a[0]") - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "a" { - # No "count" set, so test_object.a[0] will want - # to implicitly move to test_object.a, but will get - # blocked by the existing object at that address. - } - `, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addrNoKey, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - s.SetResourceInstanceCurrent(addrZeroKey, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - // We should have a warning, though! We'll lightly abuse the "for RPC" - // feature of diagnostics to get some more-readily-comparable diagnostic - // values. - gotDiags := diags.ForRPC() - wantDiags := tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Warning, - "Unresolved resource instance address changes", - // NOTE: This message is _lightly_ confusing in the destroy case, - // because it says "Terraform has planned to destroy these objects" - // but this is a plan to destroy all objects, anyway. We expect the - // conflict situation to be pretty rare though, and even rarer in - // a "terraform destroy", so we'll just live with that for now - // unless we see evidence that lots of folks are being confused by - // it in practice. - `Terraform tried to adjust resource instance addresses in the prior state based on change information recorded in the configuration, but some adjustments did not succeed due to existing objects already at the intended addresses: - - test_object.a[0] could not move to test_object.a - -Terraform has planned to destroy these objects. If Terraform's proposed changes aren't appropriate, you must first resolve the conflicts using the "terraform state" subcommands and then create a new plan.`, - ), - }.ForRPC() - if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { - // If we get here with a diff that makes it seem like the above warning - // is being reported twice, the likely cause is not correctly handling - // the warnings from the hidden normal plan we run as part of preparing - // for a destroy plan, unless that strategy has changed in the meantime - // since we originally wrote this test. - t.Errorf("wrong diagnostics\n%s", diff) - } - - t.Run(addrNoKey.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrNoKey) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrNoKey) - } - - if got, want := instPlan.Addr, addrNoKey; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.PrevRunAddr, addrNoKey; !got.Equal(want) { - t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.Action, plans.Delete; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) - t.Run(addrZeroKey.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrZeroKey) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrZeroKey) - } - - if got, want := instPlan.Addr, addrZeroKey; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.PrevRunAddr, addrZeroKey; !got.Equal(want) { - t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.Action, plans.Delete; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) -} - -func TestContext2Plan_movedResourceUntargeted(t *testing.T) { - addrA := mustResourceInstanceAddr("test_object.a") - addrB := mustResourceInstanceAddr("test_object.b") - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "b" { - } - - moved { - from = test_object.a - to = test_object.b - } - `, - }) - - state := states.BuildState(func(s *states.SyncState) { - // The prior state tracks test_object.a, which we should treat as - // test_object.b because of the "moved" block in the config. - s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - t.Run("without targeting instance A", func(t *testing.T) { - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - // NOTE: addrA isn't included here, but it's pending move to addrB - // and so this plan request is invalid. - addrB, - }, - }) - diags.Sort() - - // We're semi-abusing "ForRPC" here just to get diagnostics that are - // more easily comparable than the various different diagnostics types - // tfdiags uses internally. The RPC-friendly diagnostics are also - // comparison-friendly, by discarding all of the dynamic type information. - gotDiags := diags.ForRPC() - wantDiags := tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - ), - tfdiags.Sourceless( - tfdiags.Error, - "Moved resource instances excluded by targeting", - `Resource instances in your current state have moved to new addresses in the latest configuration. Terraform must include those resource instances while planning in order to ensure a correct result, but your -target=... options to not fully cover all of those resource instances. - -To create a valid plan, either remove your -target=... options altogether or add the following additional target options: - -target="test_object.a" - -Note that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.`, - ), - }.ForRPC() - - if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { - t.Errorf("wrong diagnostics\n%s", diff) - } - }) - t.Run("without targeting instance B", func(t *testing.T) { - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrA, - // NOTE: addrB isn't included here, but it's pending move from - // addrA and so this plan request is invalid. - }, - }) - diags.Sort() - - // We're semi-abusing "ForRPC" here just to get diagnostics that are - // more easily comparable than the various different diagnostics types - // tfdiags uses internally. The RPC-friendly diagnostics are also - // comparison-friendly, by discarding all of the dynamic type information. - gotDiags := diags.ForRPC() - wantDiags := tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - ), - tfdiags.Sourceless( - tfdiags.Error, - "Moved resource instances excluded by targeting", - `Resource instances in your current state have moved to new addresses in the latest configuration. Terraform must include those resource instances while planning in order to ensure a correct result, but your -target=... options to not fully cover all of those resource instances. - -To create a valid plan, either remove your -target=... options altogether or add the following additional target options: - -target="test_object.b" - -Note that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.`, - ), - }.ForRPC() - - if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { - t.Errorf("wrong diagnostics\n%s", diff) - } - }) - t.Run("without targeting either instance", func(t *testing.T) { - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - mustResourceInstanceAddr("test_object.unrelated"), - // NOTE: neither addrA nor addrB are included here, but there's - // a pending move between them and so this is invalid. - }, - }) - diags.Sort() - - // We're semi-abusing "ForRPC" here just to get diagnostics that are - // more easily comparable than the various different diagnostics types - // tfdiags uses internally. The RPC-friendly diagnostics are also - // comparison-friendly, by discarding all of the dynamic type information. - gotDiags := diags.ForRPC() - wantDiags := tfdiags.Diagnostics{ - tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - ), - tfdiags.Sourceless( - tfdiags.Error, - "Moved resource instances excluded by targeting", - `Resource instances in your current state have moved to new addresses in the latest configuration. Terraform must include those resource instances while planning in order to ensure a correct result, but your -target=... options to not fully cover all of those resource instances. - -To create a valid plan, either remove your -target=... options altogether or add the following additional target options: - -target="test_object.a" - -target="test_object.b" - -Note that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.`, - ), - }.ForRPC() - - if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { - t.Errorf("wrong diagnostics\n%s", diff) - } - }) - t.Run("with both addresses in the target set", func(t *testing.T) { - // The error messages in the other subtests above suggest adding - // addresses to the set of targets. This additional test makes sure that - // following that advice actually leads to a valid result. - - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - // This time we're including both addresses in the target, - // to get the same effect an end-user would get if following - // the advice in our error message in the other subtests. - addrA, - addrB, - }, - }) - diags.Sort() - - // We're semi-abusing "ForRPC" here just to get diagnostics that are - // more easily comparable than the various different diagnostics types - // tfdiags uses internally. The RPC-friendly diagnostics are also - // comparison-friendly, by discarding all of the dynamic type information. - gotDiags := diags.ForRPC() - wantDiags := tfdiags.Diagnostics{ - // Still get the warning about the -target option... - tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - ), - // ...but now we have no error about test_object.a - }.ForRPC() - - if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { - t.Errorf("wrong diagnostics\n%s", diff) - } - }) -} - -func TestContext2Plan_untargetedResourceSchemaChange(t *testing.T) { - // an untargeted resource which requires a schema migration should not - // block planning due external changes in the plan. - addrA := mustResourceInstanceAddr("test_object.a") - addrB := mustResourceInstanceAddr("test_object.b") - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { -} -resource "test_object" "b" { -}`, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - s.SetResourceInstanceCurrent(addrB, &states.ResourceInstanceObjectSrc{ - // old_list is no longer in the schema - AttrsJSON: []byte(`{"old_list":["used to be","a list here"]}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - - // external changes trigger a "drift report", but because test_object.b was - // not targeted, the state was not fixed to match the schema and cannot be - // deocded for the report. - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - obj := req.PriorState.AsValueMap() - // test_number changed externally - obj["test_number"] = cty.NumberIntVal(1) - resp.NewState = cty.ObjectVal(obj) - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrA, - }, - }) - // - assertNoErrors(t, diags) -} - -func TestContext2Plan_movedResourceRefreshOnly(t *testing.T) { - addrA := mustResourceInstanceAddr("test_object.a") - addrB := mustResourceInstanceAddr("test_object.b") - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "b" { - } - - moved { - from = test_object.a - to = test_object.b - } - `, - }) - - state := states.BuildState(func(s *states.SyncState) { - // The prior state tracks test_object.a, which we should treat as - // test_object.b because of the "moved" block in the config. - s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.RefreshOnlyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - t.Run(addrA.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrA) - if instPlan != nil { - t.Fatalf("unexpected plan for %s; should've moved to %s", addrA, addrB) - } - }) - t.Run(addrB.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrB) - if instPlan != nil { - t.Fatalf("unexpected plan for %s", addrB) - } - }) - t.Run("drift", func(t *testing.T) { - var drifted *plans.ResourceInstanceChangeSrc - for _, dr := range plan.DriftedResources { - if dr.Addr.Equal(addrB) { - drifted = dr - break - } - } - - if drifted == nil { - t.Fatalf("instance %s is missing from the drifted resource changes", addrB) - } - - if got, want := drifted.PrevRunAddr, addrA; !got.Equal(want) { - t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) - } - if got, want := drifted.Action, plans.NoOp; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - }) -} - -func TestContext2Plan_refreshOnlyMode(t *testing.T) { - addr := mustResourceInstanceAddr("test_object.a") - - // The configuration, the prior state, and the refresh result intentionally - // have different values for "test_string" so we can observe that the - // refresh took effect but the configuration change wasn't considered. - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "a" { - arg = "after" - } - - output "out" { - value = test_object.a.arg - } - `, - }) - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"arg":"before"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "test_object": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "arg": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { - if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { - return cty.StringVal("current"), nil - } - return v, nil - }) - if err != nil { - // shouldn't get here - t.Fatalf("ReadResourceFn transform failed") - return providers.ReadResourceResponse{} - } - return providers.ReadResourceResponse{ - NewState: newVal, - } - } - p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - // We should've been given the prior state JSON as our input to upgrade. - if !bytes.Contains(req.RawStateJSON, []byte("before")) { - t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) - } - - // We'll put something different in "arg" as part of upgrading, just - // so that we can verify below that PrevRunState contains the upgraded - // (but NOT refreshed) version of the object. - resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ - "arg": cty.StringVal("upgraded"), - }) - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.RefreshOnlyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - if !p.UpgradeResourceStateCalled { - t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") - } - if !p.ReadResourceCalled { - t.Errorf("Provider's ReadResource wasn't called; should've been") - } - - if got, want := len(plan.Changes.Resources), 0; got != want { - t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources)) - } - - if instState := plan.PriorState.ResourceInstance(addr); instState == nil { - t.Errorf("%s has no prior state at all after plan", addr) - } else { - if instState.Current == nil { - t.Errorf("%s has no current object after plan", addr) - } else if got, want := instState.Current.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { - // Should've saved the result of refreshing - t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) - } - } - if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { - t.Errorf("%s has no previous run state at all after plan", addr) - } else { - if instState.Current == nil { - t.Errorf("%s has no current object in the previous run state", addr) - } else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { - // Should've saved the result of upgrading - t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) - } - } - - // The output value should also have updated. If not, it's likely that we - // skipped updating the working state to match the refreshed state when we - // were evaluating the resource. - if outChangeSrc := plan.Changes.OutputValue(addrs.RootModuleInstance.OutputValue("out")); outChangeSrc == nil { - t.Errorf("no change planned for output value 'out'") - } else { - outChange, err := outChangeSrc.Decode() - if err != nil { - t.Fatalf("failed to decode output value 'out': %s", err) - } - got := outChange.After - want := cty.StringVal("current") - if !want.RawEquals(got) { - t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want) - } - } -} - -func TestContext2Plan_refreshOnlyMode_deposed(t *testing.T) { - addr := mustResourceInstanceAddr("test_object.a") - deposedKey := states.DeposedKey("byebye") - - // The configuration, the prior state, and the refresh result intentionally - // have different values for "test_string" so we can observe that the - // refresh took effect but the configuration change wasn't considered. - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "a" { - arg = "after" - } - - output "out" { - value = test_object.a.arg - } - `, - }) - state := states.BuildState(func(s *states.SyncState) { - // Note that we're intentionally recording a _deposed_ object here, - // and not including a current object, so a normal (non-refresh) - // plan would normally plan to create a new object _and_ destroy - // the deposed one, but refresh-only mode should prevent that. - s.SetResourceInstanceDeposed(addr, deposedKey, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"arg":"before"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "test_object": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "arg": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { - if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { - return cty.StringVal("current"), nil - } - return v, nil - }) - if err != nil { - // shouldn't get here - t.Fatalf("ReadResourceFn transform failed") - return providers.ReadResourceResponse{} - } - return providers.ReadResourceResponse{ - NewState: newVal, - } - } - p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - // We should've been given the prior state JSON as our input to upgrade. - if !bytes.Contains(req.RawStateJSON, []byte("before")) { - t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) - } - - // We'll put something different in "arg" as part of upgrading, just - // so that we can verify below that PrevRunState contains the upgraded - // (but NOT refreshed) version of the object. - resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ - "arg": cty.StringVal("upgraded"), - }) - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.RefreshOnlyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - if !p.UpgradeResourceStateCalled { - t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") - } - if !p.ReadResourceCalled { - t.Errorf("Provider's ReadResource wasn't called; should've been") - } - - if got, want := len(plan.Changes.Resources), 0; got != want { - t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources)) - } - - if instState := plan.PriorState.ResourceInstance(addr); instState == nil { - t.Errorf("%s has no prior state at all after plan", addr) - } else { - if obj := instState.Deposed[deposedKey]; obj == nil { - t.Errorf("%s has no deposed object after plan", addr) - } else if got, want := obj.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { - // Should've saved the result of refreshing - t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) - } - } - if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { - t.Errorf("%s has no previous run state at all after plan", addr) - } else { - if obj := instState.Deposed[deposedKey]; obj == nil { - t.Errorf("%s has no deposed object in the previous run state", addr) - } else if got, want := obj.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { - // Should've saved the result of upgrading - t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) - } - } - - // The output value should also have updated. If not, it's likely that we - // skipped updating the working state to match the refreshed state when we - // were evaluating the resource. - if outChangeSrc := plan.Changes.OutputValue(addrs.RootModuleInstance.OutputValue("out")); outChangeSrc == nil { - t.Errorf("no change planned for output value 'out'") - } else { - outChange, err := outChangeSrc.Decode() - if err != nil { - t.Fatalf("failed to decode output value 'out': %s", err) - } - got := outChange.After - want := cty.UnknownVal(cty.String) - if !want.RawEquals(got) { - t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want) - } - } - - // Deposed objects should not be represented in drift. - if len(plan.DriftedResources) > 0 { - t.Errorf("unexpected drifted resources (%d)", len(plan.DriftedResources)) - } -} - -func TestContext2Plan_refreshOnlyMode_orphan(t *testing.T) { - addr := mustAbsResourceAddr("test_object.a") - - // The configuration, the prior state, and the refresh result intentionally - // have different values for "test_string" so we can observe that the - // refresh took effect but the configuration change wasn't considered. - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "a" { - arg = "after" - count = 1 - } - - output "out" { - value = test_object.a.*.arg - } - `, - }) - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr.Instance(addrs.IntKey(0)), &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"arg":"before"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - s.SetResourceInstanceCurrent(addr.Instance(addrs.IntKey(1)), &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"arg":"before"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "test_object": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "arg": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { - if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { - return cty.StringVal("current"), nil - } - return v, nil - }) - if err != nil { - // shouldn't get here - t.Fatalf("ReadResourceFn transform failed") - return providers.ReadResourceResponse{} - } - return providers.ReadResourceResponse{ - NewState: newVal, - } - } - p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - // We should've been given the prior state JSON as our input to upgrade. - if !bytes.Contains(req.RawStateJSON, []byte("before")) { - t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) - } - - // We'll put something different in "arg" as part of upgrading, just - // so that we can verify below that PrevRunState contains the upgraded - // (but NOT refreshed) version of the object. - resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ - "arg": cty.StringVal("upgraded"), - }) - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.RefreshOnlyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - if !p.UpgradeResourceStateCalled { - t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") - } - if !p.ReadResourceCalled { - t.Errorf("Provider's ReadResource wasn't called; should've been") - } - - if got, want := len(plan.Changes.Resources), 0; got != want { - t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources)) - } - - if rState := plan.PriorState.Resource(addr); rState == nil { - t.Errorf("%s has no prior state at all after plan", addr) - } else { - for i := 0; i < 2; i++ { - instKey := addrs.IntKey(i) - if obj := rState.Instance(instKey).Current; obj == nil { - t.Errorf("%s%s has no object after plan", addr, instKey) - } else if got, want := obj.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { - // Should've saved the result of refreshing - t.Errorf("%s%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, instKey, got, want) - } - } - } - if rState := plan.PrevRunState.Resource(addr); rState == nil { - t.Errorf("%s has no prior state at all after plan", addr) - } else { - for i := 0; i < 2; i++ { - instKey := addrs.IntKey(i) - if obj := rState.Instance(instKey).Current; obj == nil { - t.Errorf("%s%s has no object after plan", addr, instKey) - } else if got, want := obj.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { - // Should've saved the result of upgrading - t.Errorf("%s%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, instKey, got, want) - } - } - } - - // The output value should also have updated. If not, it's likely that we - // skipped updating the working state to match the refreshed state when we - // were evaluating the resource. - if outChangeSrc := plan.Changes.OutputValue(addrs.RootModuleInstance.OutputValue("out")); outChangeSrc == nil { - t.Errorf("no change planned for output value 'out'") - } else { - outChange, err := outChangeSrc.Decode() - if err != nil { - t.Fatalf("failed to decode output value 'out': %s", err) - } - got := outChange.After - want := cty.TupleVal([]cty.Value{cty.StringVal("current"), cty.StringVal("current")}) - if !want.RawEquals(got) { - t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want) - } - } -} - -func TestContext2Plan_invalidSensitiveModuleOutput(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "child/main.tf": ` -output "out" { - value = sensitive("xyz") -}`, - "main.tf": ` -module "child" { - source = "./child" -} - -output "root" { - value = module.child.out -}`, - }) - - ctx := testContext2(t, &ContextOpts{}) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), "Output refers to sensitive values"; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Plan_planDataSourceSensitiveNested(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "bar" { -} - -data "test_data_source" "foo" { - foo { - bar = test_instance.bar.sensitive - } -} -`, - }) - - p := new(MockProvider) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = cty.ObjectVal(map[string]cty.Value{ - "sensitive": cty.UnknownVal(cty.String), - }) - return resp - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "sensitive": { - Type: cty.String, - Computed: true, - Sensitive: true, - }, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "test_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("data.test_data_source.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"string":"data_id", "foo":[{"bar":"old"}]}`), - AttrSensitivePaths: []cty.PathValueMarks{ - { - Path: cty.GetAttrPath("foo"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"sensitive":"old"}`), - AttrSensitivePaths: []cty.PathValueMarks{ - { - Path: cty.GetAttrPath("sensitive"), - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - for _, res := range plan.Changes.Resources { - switch res.Addr.String() { - case "test_instance.bar": - if res.Action != plans.Update { - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - case "data.test_data_source.foo": - if res.Action != plans.Read { - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - default: - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - } -} - -func TestContext2Plan_forceReplace(t *testing.T) { - addrA := mustResourceInstanceAddr("test_object.a") - addrB := mustResourceInstanceAddr("test_object.b") - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "a" { - } - resource "test_object" "b" { - } - `, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - s.SetResourceInstanceCurrent(addrB, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - ForceReplace: []addrs.AbsResourceInstance{ - addrA, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - t.Run(addrA.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrA) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrA) - } - - if got, want := instPlan.Action, plans.DeleteThenCreate; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceReplaceByRequest; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) - t.Run(addrB.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrB) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrB) - } - - if got, want := instPlan.Action, plans.NoOp; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) -} - -func TestContext2Plan_forceReplaceIncompleteAddr(t *testing.T) { - addr0 := mustResourceInstanceAddr("test_object.a[0]") - addr1 := mustResourceInstanceAddr("test_object.a[1]") - addrBare := mustResourceInstanceAddr("test_object.a") - m := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test_object" "a" { - count = 2 - } - `, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(addr0, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - s.SetResourceInstanceCurrent(addr1, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - ForceReplace: []addrs.AbsResourceInstance{ - addrBare, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - diagsErr := diags.ErrWithWarnings() - if diagsErr == nil { - t.Fatalf("no warnings were returned") - } - if got, want := diagsErr.Error(), "Incompletely-matched force-replace resource instance"; !strings.Contains(got, want) { - t.Errorf("missing expected warning\ngot:\n%s\n\nwant substring: %s", got, want) - } - - t.Run(addr0.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addr0) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addr0) - } - - if got, want := instPlan.Action, plans.NoOp; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) - t.Run(addr1.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addr1) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addr1) - } - - if got, want := instPlan.Action, plans.NoOp; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) -} - -// Verify that adding a module instance does force existing module data sources -// to be deferred -func TestContext2Plan_noChangeDataSourceAddingModuleInstance(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - data = { - a = "a" - b = "b" - } -} - -module "one" { - source = "./mod" - for_each = local.data - input = each.value -} - -module "two" { - source = "./mod" - for_each = module.one - input = each.value.output -} -`, - "mod/main.tf": ` -variable "input" { -} - -resource "test_resource" "x" { - value = var.input -} - -data "test_data_source" "d" { - foo = test_resource.x.id -} - -output "output" { - value = test_resource.x.id -} -`, - }) - - p := testProvider("test") - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("data"), - "foo": cty.StringVal("foo"), - }), - } - state := states.NewState() - modOne := addrs.RootModuleInstance.Child("one", addrs.StringKey("a")) - modTwo := addrs.RootModuleInstance.Child("two", addrs.StringKey("a")) - one := state.EnsureModule(modOne) - two := state.EnsureModule(modTwo) - one.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`test_resource.x`).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo","value":"a"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - one.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`data.test_data_source.d`).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"data"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - two.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`test_resource.x`).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo","value":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - two.SetResourceInstanceCurrent( - mustResourceInstanceAddr(`data.test_data_source.d`).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"data"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - for _, res := range plan.Changes.Resources { - // both existing data sources should be read during plan - if res.Addr.Module[0].InstanceKey == addrs.StringKey("b") { - continue - } - - if res.Addr.Resource.Resource.Mode == addrs.DataResourceMode && res.Action != plans.NoOp { - t.Errorf("unexpected %s plan for %s", res.Action, res.Addr) - } - } -} - -func TestContext2Plan_moduleExpandOrphansResourceInstance(t *testing.T) { - // This test deals with the situation where a user has changed the - // repetition/expansion mode for a module call while there are already - // resource instances from the previous declaration in the state. - // - // This is conceptually just the same as removing the resources - // from the module configuration only for that instance, but the - // implementation of it ends up a little different because it's - // an entry in the resource address's _module path_ that we'll find - // missing, rather than the resource's own instance key, and so - // our analyses need to handle that situation by indicating that all - // of the resources under the missing module instance have zero - // instances, regardless of which resource in that module we might - // be asking about, and do so without tripping over any missing - // registrations in the instance expander that might lead to panics - // if we aren't careful. - // - // (For some history here, see https://github.com/hashicorp/terraform/issues/30110 ) - - addrNoKey := mustResourceInstanceAddr("module.child.test_object.a[0]") - addrZeroKey := mustResourceInstanceAddr("module.child[0].test_object.a[0]") - m := testModuleInline(t, map[string]string{ - "main.tf": ` - module "child" { - source = "./child" - count = 1 - } - `, - "child/main.tf": ` - resource "test_object" "a" { - count = 1 - } - `, - }) - - state := states.BuildState(func(s *states.SyncState) { - // Notice that addrNoKey is the address which lacks any instance key - // for module.child, and so that module instance doesn't match the - // call declared above with count = 1, and therefore the resource - // inside is "orphaned" even though the resource block actually - // still exists there. - s.SetResourceInstanceCurrent(addrNoKey, &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - - t.Run(addrNoKey.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrNoKey) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrNoKey) - } - - if got, want := instPlan.Addr, addrNoKey; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.PrevRunAddr, addrNoKey; !got.Equal(want) { - t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.Action, plans.Delete; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoModule; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) - - t.Run(addrZeroKey.String(), func(t *testing.T) { - instPlan := plan.Changes.ResourceInstance(addrZeroKey) - if instPlan == nil { - t.Fatalf("no plan for %s at all", addrZeroKey) - } - - if got, want := instPlan.Addr, addrZeroKey; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.PrevRunAddr, addrZeroKey; !got.Equal(want) { - t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.Action, plans.Create; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - }) -} - -func TestContext2Plan_resourcePreconditionPostcondition(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "boop" { - type = string -} - -resource "test_resource" "a" { - value = var.boop - lifecycle { - precondition { - condition = var.boop == "boop" - error_message = "Wrong boop." - } - postcondition { - condition = self.output != "" - error_message = "Output must not be blank." - } - } -} - -`, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "value": { - Type: cty.String, - Required: true, - }, - "output": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - t.Run("conditions pass", func(t *testing.T) { - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - m := req.ProposedNewState.AsValueMap() - m["output"] = cty.StringVal("bar") - - resp.PlannedState = cty.ObjectVal(m) - resp.LegacyTypeSystem = true - return resp - } - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - for _, res := range plan.Changes.Resources { - switch res.Addr.String() { - case "test_resource.a": - if res.Action != plans.Create { - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - default: - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - } - }) - - t.Run("precondition fail", func(t *testing.T) { - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("nope"), - SourceType: ValueFromCLIArg, - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), "Resource precondition failed: Wrong boop."; got != want { - t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) - } - if p.PlanResourceChangeCalled { - t.Errorf("Provider's PlanResourceChange was called; should'nt've been") - } - }) - - t.Run("precondition fail refresh-only", func(t *testing.T) { - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_resource.a"), &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"value":"boop","output":"blorp"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.RefreshOnlyMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("nope"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if len(diags) == 0 { - t.Fatalf("no diags, but should have warnings") - } - if got, want := diags.ErrWithWarnings().Error(), "Resource precondition failed: Wrong boop."; got != want { - t.Fatalf("wrong warning:\ngot: %s\nwant: %q", got, want) - } - if !p.ReadResourceCalled { - t.Errorf("Provider's ReadResource wasn't called; should've been") - } - }) - - t.Run("postcondition fail", func(t *testing.T) { - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - m := req.ProposedNewState.AsValueMap() - m["output"] = cty.StringVal("") - - resp.PlannedState = cty.ObjectVal(m) - resp.LegacyTypeSystem = true - return resp - } - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), "Resource postcondition failed: Output must not be blank."; got != want { - t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) - } - if !p.PlanResourceChangeCalled { - t.Errorf("Provider's PlanResourceChange wasn't called; should've been") - } - }) - - t.Run("postcondition fail refresh-only", func(t *testing.T) { - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_resource.a"), &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"value":"boop","output":"blorp"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { - if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "output"}) { - return cty.StringVal(""), nil - } - return v, nil - }) - if err != nil { - // shouldn't get here - t.Fatalf("ReadResourceFn transform failed") - return providers.ReadResourceResponse{} - } - return providers.ReadResourceResponse{ - NewState: newVal, - } - } - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.RefreshOnlyMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if len(diags) == 0 { - t.Fatalf("no diags, but should have warnings") - } - if got, want := diags.ErrWithWarnings().Error(), "Resource postcondition failed: Output must not be blank."; got != want { - t.Fatalf("wrong warning:\ngot: %s\nwant: %q", got, want) - } - if !p.ReadResourceCalled { - t.Errorf("Provider's ReadResource wasn't called; should've been") - } - if p.PlanResourceChangeCalled { - t.Errorf("Provider's PlanResourceChange was called; should'nt've been") - } - }) - - t.Run("precondition and postcondition fail refresh-only", func(t *testing.T) { - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_resource.a"), &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"value":"boop","output":"blorp"}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { - if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "output"}) { - return cty.StringVal(""), nil - } - return v, nil - }) - if err != nil { - // shouldn't get here - t.Fatalf("ReadResourceFn transform failed") - return providers.ReadResourceResponse{} - } - return providers.ReadResourceResponse{ - NewState: newVal, - } - } - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.RefreshOnlyMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("nope"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if got, want := len(diags), 2; got != want { - t.Errorf("wrong number of warnings, got %d, want %d", got, want) - } - warnings := diags.ErrWithWarnings().Error() - wantWarnings := []string{ - "Resource precondition failed: Wrong boop.", - "Resource postcondition failed: Output must not be blank.", - } - for _, want := range wantWarnings { - if !strings.Contains(warnings, want) { - t.Errorf("missing warning:\ngot: %s\nwant to contain: %q", warnings, want) - } - } - if !p.ReadResourceCalled { - t.Errorf("Provider's ReadResource wasn't called; should've been") - } - if p.PlanResourceChangeCalled { - t.Errorf("Provider's PlanResourceChange was called; should'nt've been") - } - }) -} - -func TestContext2Plan_dataSourcePreconditionPostcondition(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "boop" { - type = string -} - -data "test_data_source" "a" { - foo = var.boop - lifecycle { - precondition { - condition = var.boop == "boop" - error_message = "Wrong boop." - } - postcondition { - condition = length(self.results) > 0 - error_message = "Results cannot be empty." - } - } -} - -resource "test_resource" "a" { - value = data.test_data_source.a.results[0] -} -`, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "value": { - Type: cty.String, - Required: true, - }, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "test_data_source": { - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Required: true, - }, - "results": { - Type: cty.List(cty.String), - Computed: true, - }, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - t.Run("conditions pass", func(t *testing.T) { - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("boop"), - "results": cty.ListVal([]cty.Value{cty.StringVal("boop")}), - }), - } - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - for _, res := range plan.Changes.Resources { - switch res.Addr.String() { - case "test_resource.a": - if res.Action != plans.Create { - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - case "data.test_data_source.a": - if res.Action != plans.Read { - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - default: - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - } - - addr := mustResourceInstanceAddr("data.test_data_source.a") - if gotResult := plan.Checks.GetObjectResult(addr); gotResult == nil { - t.Errorf("no check result for %s", addr) - } else { - wantResult := &states.CheckResultObject{ - Status: checks.StatusPass, - } - if diff := cmp.Diff(wantResult, gotResult, valueComparer); diff != "" { - t.Errorf("wrong check result for %s\n%s", addr, diff) - } - } - }) - - t.Run("precondition fail", func(t *testing.T) { - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("nope"), - SourceType: ValueFromCLIArg, - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), "Resource precondition failed: Wrong boop."; got != want { - t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) - } - if p.ReadDataSourceCalled { - t.Errorf("Provider's ReadResource was called; should'nt've been") - } - }) - - t.Run("precondition fail refresh-only", func(t *testing.T) { - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.RefreshOnlyMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("nope"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if len(diags) == 0 { - t.Fatalf("no diags, but should have warnings") - } - if got, want := diags.ErrWithWarnings().Error(), "Resource precondition failed: Wrong boop."; got != want { - t.Fatalf("wrong warning:\ngot: %s\nwant: %q", got, want) - } - for _, res := range plan.Changes.Resources { - switch res.Addr.String() { - case "test_resource.a": - if res.Action != plans.Create { - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - case "data.test_data_source.a": - if res.Action != plans.Read { - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - default: - t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) - } - } - }) - - t.Run("postcondition fail", func(t *testing.T) { - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("boop"), - "results": cty.ListValEmpty(cty.String), - }), - } - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), "Resource postcondition failed: Results cannot be empty."; got != want { - t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) - } - if !p.ReadDataSourceCalled { - t.Errorf("Provider's ReadDataSource wasn't called; should've been") - } - }) - - t.Run("postcondition fail refresh-only", func(t *testing.T) { - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("boop"), - "results": cty.ListValEmpty(cty.String), - }), - } - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.RefreshOnlyMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if got, want := diags.ErrWithWarnings().Error(), "Resource postcondition failed: Results cannot be empty."; got != want { - t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) - } - addr := mustResourceInstanceAddr("data.test_data_source.a") - if gotResult := plan.Checks.GetObjectResult(addr); gotResult == nil { - t.Errorf("no check result for %s", addr) - } else { - wantResult := &states.CheckResultObject{ - Status: checks.StatusFail, - FailureMessages: []string{ - "Results cannot be empty.", - }, - } - if diff := cmp.Diff(wantResult, gotResult, valueComparer); diff != "" { - t.Errorf("wrong check result\n%s", diff) - } - } - }) - - t.Run("precondition and postcondition fail refresh-only", func(t *testing.T) { - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("nope"), - "results": cty.ListValEmpty(cty.String), - }), - } - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.RefreshOnlyMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("nope"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if got, want := len(diags), 2; got != want { - t.Errorf("wrong number of warnings, got %d, want %d", got, want) - } - warnings := diags.ErrWithWarnings().Error() - wantWarnings := []string{ - "Resource precondition failed: Wrong boop.", - "Resource postcondition failed: Results cannot be empty.", - } - for _, want := range wantWarnings { - if !strings.Contains(warnings, want) { - t.Errorf("missing warning:\ngot: %s\nwant to contain: %q", warnings, want) - } - } - }) -} - -func TestContext2Plan_outputPrecondition(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "boop" { - type = string -} - -output "a" { - value = var.boop - precondition { - condition = var.boop == "boop" - error_message = "Wrong boop." - } -} -`, - }) - - p := testProvider("test") - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - t.Run("condition pass", func(t *testing.T) { - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - addr := addrs.RootModuleInstance.OutputValue("a") - outputPlan := plan.Changes.OutputValue(addr) - if outputPlan == nil { - t.Fatalf("no plan for %s at all", addr) - } - if got, want := outputPlan.Addr, addr; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := outputPlan.Action, plans.Create; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if gotResult := plan.Checks.GetObjectResult(addr); gotResult == nil { - t.Errorf("no check result for %s", addr) - } else { - wantResult := &states.CheckResultObject{ - Status: checks.StatusPass, - } - if diff := cmp.Diff(wantResult, gotResult, valueComparer); diff != "" { - t.Errorf("wrong check result\n%s", diff) - } - } - }) - - t.Run("condition fail", func(t *testing.T) { - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("nope"), - SourceType: ValueFromCLIArg, - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), "Module output value precondition failed: Wrong boop."; got != want { - t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) - } - }) - - t.Run("condition fail refresh-only", func(t *testing.T) { - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.RefreshOnlyMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("nope"), - SourceType: ValueFromCLIArg, - }, - }, - }) - assertNoErrors(t, diags) - if len(diags) == 0 { - t.Fatalf("no diags, but should have warnings") - } - if got, want := diags.ErrWithWarnings().Error(), "Module output value precondition failed: Wrong boop."; got != want { - t.Errorf("wrong warning:\ngot: %s\nwant: %q", got, want) - } - addr := addrs.RootModuleInstance.OutputValue("a") - outputPlan := plan.Changes.OutputValue(addr) - if outputPlan == nil { - t.Fatalf("no plan for %s at all", addr) - } - if got, want := outputPlan.Addr, addr; !got.Equal(want) { - t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) - } - if got, want := outputPlan.Action, plans.Create; got != want { - t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) - } - if gotResult := plan.Checks.GetObjectResult(addr); gotResult == nil { - t.Errorf("no condition result for %s", addr) - } else { - wantResult := &states.CheckResultObject{ - Status: checks.StatusFail, - FailureMessages: []string{"Wrong boop."}, - } - if diff := cmp.Diff(wantResult, gotResult, valueComparer); diff != "" { - t.Errorf("wrong condition result\n%s", diff) - } - } - }) -} - -func TestContext2Plan_preconditionErrors(t *testing.T) { - testCases := []struct { - condition string - wantSummary string - wantDetail string - }{ - { - "data.test_data_source", - "Invalid reference", - `The "data" object must be followed by two attribute names`, - }, - { - "self.value", - `Invalid "self" reference`, - "only in resource provisioner, connection, and postcondition blocks", - }, - { - "data.foo.bar", - "Reference to undeclared resource", - `A data resource "foo" "bar" has not been declared in the root module`, - }, - { - "test_resource.b.value", - "Invalid condition result", - "Condition expression must return either true or false", - }, - { - "test_resource.c.value", - "Invalid condition result", - "Invalid condition result value: a bool is required", - }, - } - - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - for _, tc := range testCases { - t.Run(tc.condition, func(t *testing.T) { - main := fmt.Sprintf(` - resource "test_resource" "a" { - value = var.boop - lifecycle { - precondition { - condition = %s - error_message = "Not relevant." - } - } - } - - resource "test_resource" "b" { - value = null - } - - resource "test_resource" "c" { - value = "bar" - } - `, tc.condition) - m := testModuleInline(t, map[string]string{"main.tf": main}) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - - if !plan.Errored { - t.Fatal("plan failed to record error") - } - - diag := diags[0] - if got, want := diag.Description().Summary, tc.wantSummary; got != want { - t.Errorf("unexpected summary\n got: %s\nwant: %s", got, want) - } - if got, want := diag.Description().Detail, tc.wantDetail; !strings.Contains(got, want) { - t.Errorf("unexpected summary\ngot: %s\nwant to contain %q", got, want) - } - - for _, kv := range plan.Checks.ConfigResults.Elements() { - // All these are configuration or evaluation errors - if kv.Value.Status != checks.StatusError { - t.Errorf("incorrect status, got %s", kv.Value.Status) - } - } - }) - } -} - -func TestContext2Plan_preconditionSensitiveValues(t *testing.T) { - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "boop" { - sensitive = true - type = string -} - -output "a" { - sensitive = true - value = var.boop - - precondition { - condition = length(var.boop) <= 4 - error_message = "Boop is too long, ${length(var.boop)} > 4" - } -} -`, - }) - - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "boop": &InputValue{ - Value: cty.StringVal("bleep"), - SourceType: ValueFromCLIArg, - }, - }, - }) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := len(diags), 2; got != want { - t.Errorf("wrong number of diags, got %d, want %d", got, want) - } - for _, diag := range diags { - desc := diag.Description() - if desc.Summary == "Module output value precondition failed" { - if got, want := desc.Detail, "This check failed, but has an invalid error message as described in the other accompanying messages."; !strings.Contains(got, want) { - t.Errorf("unexpected detail\ngot: %s\nwant to contain %q", got, want) - } - } else if desc.Summary == "Error message refers to sensitive values" { - if got, want := desc.Detail, "The error expression used to explain this condition refers to sensitive values, so Terraform will not display the resulting message."; !strings.Contains(got, want) { - t.Errorf("unexpected detail\ngot: %s\nwant to contain %q", got, want) - } - } else { - t.Errorf("unexpected summary\ngot: %s", desc.Summary) - } - } -} - -func TestContext2Plan_triggeredBy(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { - count = 1 - test_string = "new" -} -resource "test_object" "b" { - count = 1 - test_string = test_object.a[count.index].test_string - lifecycle { - # the change to test_string in the other resource should trigger replacement - replace_triggered_by = [ test_object.a[count.index].test_string ] - } -} -`, - }) - - p := simpleMockProvider() - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.a[0]"), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"test_string":"old"}`), - Status: states.ObjectReady, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - s.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.b[0]"), - &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{}`), - Status: states.ObjectReady, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors\n%s", diags.Err().Error()) - } - for _, c := range plan.Changes.Resources { - switch c.Addr.String() { - case "test_object.a[0]": - if c.Action != plans.Update { - t.Fatalf("unexpected %s change for %s\n", c.Action, c.Addr) - } - case "test_object.b[0]": - if c.Action != plans.DeleteThenCreate { - t.Fatalf("unexpected %s change for %s\n", c.Action, c.Addr) - } - if c.ActionReason != plans.ResourceInstanceReplaceByTriggers { - t.Fatalf("incorrect reason for change: %s\n", c.ActionReason) - } - default: - t.Fatal("unexpected change", c.Addr, c.Action) - } - } -} - -func TestContext2Plan_dataSchemaChange(t *testing.T) { - // We can't decode the prior state when a data source upgrades the schema - // in an incompatible way. Since prior state for data sources is purely - // informational, decoding should be skipped altogether. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -data "test_object" "a" { - obj { - # args changes from a list to a map - args = { - val = "string" - } - } -} -`, - }) - - p := new(MockProvider) - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - DataSources: map[string]*configschema.Block{ - "test_object": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "obj": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "args": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }) - - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - resp.State = req.Config - return resp - } - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent(mustResourceInstanceAddr(`data.test_object.a`), &states.ResourceInstanceObjectSrc{ - AttrsJSON: []byte(`{"id":"old","obj":[{"args":["string"]}]}`), - Status: states.ObjectReady, - }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) -} - -func TestContext2Plan_applyGraphError(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { -} -resource "test_object" "b" { - depends_on = [test_object.a] -} -`, - }) - - p := simpleMockProvider() - - // Here we introduce a cycle via state which only shows up in the apply - // graph where the actual destroy instances are connected in the graph. - // This could happen for example when a user has an existing state with - // stored dependencies, and changes the config in such a way that - // contradicts the stored dependencies. - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"a"}`), - Dependencies: []addrs.ConfigResource{mustResourceInstanceAddr("test_object.b").ContainingResource().Config()}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"b"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - }) - if !diags.HasErrors() { - t.Fatal("cycle error not detected") - } - - msg := diags.ErrWithWarnings().Error() - if !strings.Contains(msg, "Cycle") { - t.Fatalf("no cycle error found:\n got: %s\n", msg) - } -} - -// plan a destroy with no state where configuration could fail to evaluate -// expansion indexes. -func TestContext2Plan_emptyDestroy(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - enable = true - value = local.enable ? module.example[0].out : null -} - -module "example" { - count = local.enable ? 1 : 0 - source = "./example" -} -`, - "example/main.tf": ` -resource "test_resource" "x" { -} - -output "out" { - value = test_resource.x -} -`, - }) - - p := testProvider("test") - state := states.NewState() - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - - assertNoErrors(t, diags) - - // ensure that the given states are valid and can be serialized - if plan.PrevRunState == nil { - t.Fatal("nil plan.PrevRunState") - } - if plan.PriorState == nil { - t.Fatal("nil plan.PriorState") - } -} - -// A deposed instances which no longer exists during ReadResource creates NoOp -// change, which should not effect the plan. -func TestContext2Plan_deposedNoLongerExists(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "b" { - count = 1 - test_string = "updated" - lifecycle { - create_before_destroy = true - } -} -`, - }) - - p := simpleMockProvider() - p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - s := req.PriorState.GetAttr("test_string").AsString() - if s == "current" { - resp.NewState = req.PriorState - return resp - } - // pretend the non-current instance has been deleted already - resp.NewState = cty.NullVal(req.PriorState.Type()) - return resp - } - - // Here we introduce a cycle via state which only shows up in the apply - // graph where the actual destroy instances are connected in the graph. - // This could happen for example when a user has an existing state with - // stored dependencies, and changes the config in such a way that - // contradicts the stored dependencies. - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceDeposed( - mustResourceInstanceAddr("test_object.a[0]").Resource, - states.DeposedKey("deposed"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"old"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.a[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"current"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - }) - assertNoErrors(t, diags) -} - -// make sure there are no cycles with changes around a provider configured via -// managed resources. -func TestContext2Plan_destroyWithResourceConfiguredProvider(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { - in = "a" -} - -provider "test" { - alias = "other" - in = test_object.a.out -} - -resource "test_object" "b" { - provider = test.other - in = "a" -} -`}) - - testProvider := &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "in": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "test_object": providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "in": { - Type: cty.String, - Optional: true, - }, - "out": { - Type: cty.Number, - Computed: true, - }, - }, - }, - }, - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(testProvider), - }, - }) - - // plan+apply to create the initial state - opts := SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables)) - plan, diags := ctx.Plan(m, states.NewState(), opts) - assertNoErrors(t, diags) - state, diags := ctx.Apply(plan, m) - assertNoErrors(t, diags) - - // Resource changes which have dependencies across providers which - // themselves depend on resources can result in cycles. - // Because other_object transitively depends on the module resources - // through its provider, we trigger changes on both sides of this boundary - // to ensure we can create a valid plan. - // - // Try to replace both instances - addrA := mustResourceInstanceAddr("test_object.a") - addrB := mustResourceInstanceAddr(`test_object.b`) - opts.ForceReplace = []addrs.AbsResourceInstance{addrA, addrB} - - _, diags = ctx.Plan(m, state, opts) - assertNoErrors(t, diags) -} - -func TestContext2Plan_destroyPartialState(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_object" "a" { -} - -output "out" { - value = module.mod.out -} - -module "mod" { - source = "./mod" -} -`, - - "./mod/main.tf": ` -resource "test_object" "a" { - count = 2 - - lifecycle { - precondition { - # test_object_b has already been destroyed, so referencing the first - # instance must not fail during a destroy plan. - condition = test_object.b[0].test_string == "invalid" - error_message = "should not block destroy" - } - precondition { - # this failing condition should bot block a destroy plan - condition = !local.continue - error_message = "should not block destroy" - } - } -} - -resource "test_object" "b" { - count = 2 -} - -locals { - continue = true -} - -output "out" { - # the reference to test_object.b[0] may not be valid during a destroy plan, - # but should not fail. - value = local.continue ? test_object.a[1].test_string != "invalid" && test_object.b[0].test_string != "invalid" : false - - precondition { - # test_object_b has already been destroyed, so referencing the first - # instance must not fail during a destroy plan. - condition = test_object.b[0].test_string == "invalid" - error_message = "should not block destroy" - } - precondition { - # this failing condition should bot block a destroy plan - condition = test_object.a[0].test_string == "invalid" - error_message = "should not block destroy" - } -} -`}) - - p := simpleMockProvider() - - // This state could be the result of a failed destroy, leaving only 2 - // remaining instances. We want to be able to continue the destroy to - // remove everything without blocking on invalid references or failing - // conditions. - state := states.NewState() - mod := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey)) - mod.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.a[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"current"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - mod.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.a[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"test_string":"current"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - assertNoErrors(t, diags) -} - -// Make sure the data sources in the prior state are serializeable even if -// there were an error in the plan. -func TestContext2Plan_dataSourceReadPlanError(t *testing.T) { - m, snap := testModuleWithSnapshot(t, "data-source-read-with-plan-error") - awsProvider := testProvider("aws") - testProvider := testProvider("test") - - testProvider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - resp.Diagnostics = resp.Diagnostics.Append(errors.New("oops")) - return resp - } - - state := states.NewState() - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(awsProvider), - addrs.NewDefaultProvider("test"): testProviderFuncFixed(testProvider), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("expected plan error") - } - - // make sure we can serialize the plan even if there were an error - _, _, _, err := contextOptsForPlanViaFile(t, snap, plan) - if err != nil { - t.Fatalf("failed to round-trip through planfile: %s", err) - } -} diff --git a/internal/terraform/context_plan_test.go b/internal/terraform/context_plan_test.go deleted file mode 100644 index 9a2eb5f9cb60..000000000000 --- a/internal/terraform/context_plan_test.go +++ /dev/null @@ -1,6931 +0,0 @@ -package terraform - -import ( - "bytes" - "errors" - "fmt" - "os" - "reflect" - "sort" - "strings" - "sync" - "sync/atomic" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestContext2Plan_basic(t *testing.T) { - m := testModule(t, "plan-good") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if l := len(plan.Changes.Resources); l < 2 { - t.Fatalf("wrong number of resources %d; want fewer than two\n%s", l, spew.Sdump(plan.Changes.Resources)) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - for _, r := range plan.Changes.Resources { - ric, err := r.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - foo := ric.After.GetAttr("foo").AsString() - if foo != "2" { - t.Fatalf("incorrect plan for 'bar': %#v", ric.After) - } - case "aws_instance.foo": - num, _ := ric.After.GetAttr("num").AsBigFloat().Int64() - if num != 2 { - t.Fatalf("incorrect plan for 'foo': %#v", ric.After) - } - default: - t.Fatal("unknown instance:", i) - } - } - - if !p.ValidateProviderConfigCalled { - t.Fatal("provider config was not checked before Configure") - } - -} - -func TestContext2Plan_createBefore_deposed(t *testing.T) { - m := testModule(t, "plan-cbd") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceDeposed( - mustResourceInstanceAddr("aws_instance.foo").Resource, - states.DeposedKey("00000001"), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - // the state should still show one deposed - expectedState := strings.TrimSpace(` - aws_instance.foo: (1 deposed) - ID = baz - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - Deposed ID 1 = foo`) - - if plan.PriorState.String() != expectedState { - t.Fatalf("\nexpected: %q\ngot: %q\n", expectedState, plan.PriorState.String()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - type InstanceGen struct { - Addr string - DeposedKey states.DeposedKey - } - want := map[InstanceGen]bool{ - { - Addr: "aws_instance.foo", - }: true, - { - Addr: "aws_instance.foo", - DeposedKey: states.DeposedKey("00000001"), - }: true, - } - got := make(map[InstanceGen]bool) - changes := make(map[InstanceGen]*plans.ResourceInstanceChangeSrc) - - for _, change := range plan.Changes.Resources { - k := InstanceGen{ - Addr: change.Addr.String(), - DeposedKey: change.DeposedKey, - } - got[k] = true - changes[k] = change - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("wrong resource instance object changes in plan\ngot: %s\nwant: %s", spew.Sdump(got), spew.Sdump(want)) - } - - { - ric, err := changes[InstanceGen{Addr: "aws_instance.foo"}].Decode(ty) - if err != nil { - t.Fatal(err) - } - - if got, want := ric.Action, plans.NoOp; got != want { - t.Errorf("current object change action is %s; want %s", got, want) - } - - // the existing instance should only have an unchanged id - expected, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("baz"), - "type": cty.StringVal("aws_instance"), - })) - if err != nil { - t.Fatal(err) - } - - checkVals(t, expected, ric.After) - } - - { - ric, err := changes[InstanceGen{Addr: "aws_instance.foo", DeposedKey: states.DeposedKey("00000001")}].Decode(ty) - if err != nil { - t.Fatal(err) - } - - if got, want := ric.Action, plans.Delete; got != want { - t.Errorf("deposed object change action is %s; want %s", got, want) - } - } -} - -func TestContext2Plan_createBefore_maintainRoot(t *testing.T) { - m := testModule(t, "plan-cbd-maintain-root") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !plan.PriorState.Empty() { - t.Fatal("expected empty prior state, got:", plan.PriorState) - } - - if len(plan.Changes.Resources) != 4 { - t.Error("expected 4 resource in plan, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - // these should all be creates - if res.Action != plans.Create { - t.Fatalf("unexpected action %s for %s", res.Action, res.Addr.String()) - } - } -} - -func TestContext2Plan_emptyDiff(t *testing.T) { - m := testModule(t, "plan-empty") - p := testProvider("aws") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !plan.PriorState.Empty() { - t.Fatal("expected empty state, got:", plan.PriorState) - } - - if len(plan.Changes.Resources) != 2 { - t.Error("expected 2 resource in plan, got", len(plan.Changes.Resources)) - } - - actions := map[string]plans.Action{} - - for _, res := range plan.Changes.Resources { - actions[res.Addr.String()] = res.Action - } - - expected := map[string]plans.Action{ - "aws_instance.foo": plans.Create, - "aws_instance.bar": plans.Create, - } - if !cmp.Equal(expected, actions) { - t.Fatal(cmp.Diff(expected, actions)) - } -} - -func TestContext2Plan_escapedVar(t *testing.T) { - m := testModule(t, "plan-escaped-var") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if len(plan.Changes.Resources) != 1 { - t.Error("expected 1 resource in plan, got", len(plan.Changes.Resources)) - } - - res := plan.Changes.Resources[0] - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - expected := objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("bar-${baz}"), - "type": cty.UnknownVal(cty.String), - }) - - checkVals(t, expected, ric.After) -} - -func TestContext2Plan_minimal(t *testing.T) { - m := testModule(t, "plan-empty") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !plan.PriorState.Empty() { - t.Fatal("expected empty state, got:", plan.PriorState) - } - - if len(plan.Changes.Resources) != 2 { - t.Error("expected 2 resource in plan, got", len(plan.Changes.Resources)) - } - - actions := map[string]plans.Action{} - - for _, res := range plan.Changes.Resources { - actions[res.Addr.String()] = res.Action - } - - expected := map[string]plans.Action{ - "aws_instance.foo": plans.Create, - "aws_instance.bar": plans.Create, - } - if !cmp.Equal(expected, actions) { - t.Fatal(cmp.Diff(expected, actions)) - } -} - -func TestContext2Plan_modules(t *testing.T) { - m := testModule(t, "plan-modules") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if len(plan.Changes.Resources) != 3 { - t.Error("expected 3 resource in plan, got", len(plan.Changes.Resources)) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - expectFoo := objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("2"), - "type": cty.UnknownVal(cty.String), - }) - - expectNum := objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "num": cty.NumberIntVal(2), - "type": cty.UnknownVal(cty.String), - }) - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - var expected cty.Value - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - expected = expectFoo - case "aws_instance.foo": - expected = expectNum - case "module.child.aws_instance.foo": - expected = expectNum - default: - t.Fatal("unknown instance:", i) - } - - checkVals(t, expected, ric.After) - } -} -func TestContext2Plan_moduleExpand(t *testing.T) { - // Test a smattering of plan expansion behavior - m := testModule(t, "plan-modules-expand") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - expected := map[string]struct{}{ - `aws_instance.foo["a"]`: {}, - `module.count_child[1].aws_instance.foo[0]`: {}, - `module.count_child[1].aws_instance.foo[1]`: {}, - `module.count_child[0].aws_instance.foo[0]`: {}, - `module.count_child[0].aws_instance.foo[1]`: {}, - `module.for_each_child["a"].aws_instance.foo[1]`: {}, - `module.for_each_child["a"].aws_instance.foo[0]`: {}, - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - _, ok := expected[ric.Addr.String()] - if !ok { - t.Fatal("unexpected resource:", ric.Addr.String()) - } - delete(expected, ric.Addr.String()) - } - for addr := range expected { - t.Error("missing resource", addr) - } -} - -// GH-1475 -func TestContext2Plan_moduleCycle(t *testing.T) { - m := testModule(t, "plan-module-cycle") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "some_input": {Type: cty.String, Optional: true}, - "type": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - var expected cty.Value - switch i := ric.Addr.String(); i { - case "aws_instance.b": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }) - case "aws_instance.c": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "some_input": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }) - default: - t.Fatal("unknown instance:", i) - } - - checkVals(t, expected, ric.After) - } -} - -func TestContext2Plan_moduleDeadlock(t *testing.T) { - testCheckDeadlock(t, func() { - m := testModule(t, "plan-module-deadlock") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if err != nil { - t.Fatalf("err: %s", err) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - expected := objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }) - switch i := ric.Addr.String(); i { - case "module.child.aws_instance.foo[0]": - case "module.child.aws_instance.foo[1]": - case "module.child.aws_instance.foo[2]": - default: - t.Fatal("unknown instance:", i) - } - - checkVals(t, expected, ric.After) - } - }) -} - -func TestContext2Plan_moduleInput(t *testing.T) { - m := testModule(t, "plan-module-input") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - var expected cty.Value - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("2"), - "type": cty.UnknownVal(cty.String), - }) - case "module.child.aws_instance.foo": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("42"), - "type": cty.UnknownVal(cty.String), - }) - default: - t.Fatal("unknown instance:", i) - } - - checkVals(t, expected, ric.After) - } -} - -func TestContext2Plan_moduleInputComputed(t *testing.T) { - m := testModule(t, "plan-module-input-computed") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - "compute": cty.StringVal("foo"), - }), ric.After) - case "module.child.aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_moduleInputFromVar(t *testing.T) { - m := testModule(t, "plan-module-input-var") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("52"), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("2"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.child.aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("52"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_moduleMultiVar(t *testing.T) { - m := testModule(t, "plan-module-multi-var") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "baz": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 5 { - t.Fatal("expected 5 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.parent[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.parent[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - }), ric.After) - case "module.child.aws_instance.bar[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "baz": cty.StringVal("baz"), - }), ric.After) - case "module.child.aws_instance.bar[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "baz": cty.StringVal("baz"), - }), ric.After) - case "module.child.aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("baz,baz"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_moduleOrphans(t *testing.T) { - m := testModule(t, "plan-modules-remove") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo": - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "num": cty.NumberIntVal(2), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.child.aws_instance.foo": - if res.Action != plans.Delete { - t.Fatalf("expected resource delete, got %s", res.Action) - } - default: - t.Fatal("unknown instance:", i) - } - } - - expectedState := ` -module.child: - aws_instance.foo: - ID = baz - provider = provider["registry.terraform.io/hashicorp/aws"]` - - if plan.PriorState.String() != expectedState { - t.Fatalf("\nexpected state: %q\n\ngot: %q", expectedState, plan.PriorState.String()) - } -} - -// https://github.com/hashicorp/terraform/issues/3114 -func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) { - m := testModule(t, "plan-modules-remove-provisioners") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - pr := testProvisioner() - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.top").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"top","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child1 := state.EnsureModule(addrs.RootModuleInstance.Child("parent", addrs.NoKey).Child("child1", addrs.NoKey)) - child1.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child2 := state.EnsureModule(addrs.RootModuleInstance.Child("parent", addrs.NoKey).Child("child2", addrs.NoKey)) - child2.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 3 { - t.Error("expected 3 planned resources, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "module.parent.module.child1.aws_instance.foo": - if res.Action != plans.Delete { - t.Fatalf("expected resource Delete, got %s", res.Action) - } - case "module.parent.module.child2.aws_instance.foo": - if res.Action != plans.Delete { - t.Fatalf("expected resource Delete, got %s", res.Action) - } - case "aws_instance.top": - if res.Action != plans.NoOp { - t.Fatalf("expected no changes, got %s", res.Action) - } - default: - t.Fatalf("unknown instance: %s\nafter: %#v", i, hcl2shim.ConfigValueFromHCL2(ric.After)) - } - } - - expectedState := `aws_instance.top: - ID = top - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - -module.parent.child1: - aws_instance.foo: - ID = baz - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -module.parent.child2: - aws_instance.foo: - ID = baz - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance` - - if expectedState != plan.PriorState.String() { - t.Fatalf("\nexpect state:\n%s\n\ngot state:\n%s\n", expectedState, plan.PriorState.String()) - } -} - -func TestContext2Plan_moduleProviderInherit(t *testing.T) { - var l sync.Mutex - var calls []string - - m := testModule(t, "plan-module-provider-inherit") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): func() (providers.Interface, error) { - l.Lock() - defer l.Unlock() - - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "from": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "from": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - from := req.Config.GetAttr("from") - if from.IsNull() || from.AsString() != "root" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("not root")) - } - - return - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - from := req.Config.GetAttr("from").AsString() - - l.Lock() - defer l.Unlock() - calls = append(calls, from) - return testDiffFn(req) - } - return p, nil - }, - }, - }) - - _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if err != nil { - t.Fatalf("err: %s", err) - } - - actual := calls - sort.Strings(actual) - expected := []string{"child", "root"} - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -// This tests (for GH-11282) that deeply nested modules properly inherit -// configuration. -func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) { - var l sync.Mutex - - m := testModule(t, "plan-module-provider-inherit-deep") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): func() (providers.Interface, error) { - l.Lock() - defer l.Unlock() - - var from string - p := testProvider("aws") - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "from": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }) - - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - v := req.Config.GetAttr("from") - if v.IsNull() || v.AsString() != "root" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("not root")) - } - from = v.AsString() - - return - } - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - if from != "root" { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("bad resource")) - return - } - - return testDiffFn(req) - } - return p, nil - }, - }, - }) - - _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { - var l sync.Mutex - var calls []string - - m := testModule(t, "plan-module-provider-defaults-var") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): func() (providers.Interface, error) { - l.Lock() - defer l.Unlock() - - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "to": {Type: cty.String, Optional: true}, - "from": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "from": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - var buf bytes.Buffer - from := req.Config.GetAttr("from") - if !from.IsNull() { - buf.WriteString(from.AsString() + "\n") - } - to := req.Config.GetAttr("to") - if !to.IsNull() { - buf.WriteString(to.AsString() + "\n") - } - - l.Lock() - defer l.Unlock() - calls = append(calls, buf.String()) - return - } - - return p, nil - }, - }, - }) - - _, err := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("root"), - SourceType: ValueFromCaller, - }, - }, - }) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := []string{ - "child\nchild\n", - "root\n", - } - sort.Strings(calls) - if !reflect.DeepEqual(calls, expected) { - t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, calls) - } -} - -func TestContext2Plan_moduleProviderVar(t *testing.T) { - m := testModule(t, "plan-module-provider-var") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "module.child.aws_instance.test": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "value": cty.StringVal("hello"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_moduleVar(t *testing.T) { - m := testModule(t, "plan-module-var") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - var expected cty.Value - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("2"), - "type": cty.UnknownVal(cty.String), - }) - case "module.child.aws_instance.foo": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "num": cty.NumberIntVal(2), - "type": cty.UnknownVal(cty.String), - }) - default: - t.Fatal("unknown instance:", i) - } - - checkVals(t, expected, ric.After) - } -} - -func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) { - m := testModule(t, "plan-module-wrong-var-type") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("succeeded; want errors") - } -} - -func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) { - m := testModule(t, "plan-module-wrong-var-type-nested") - p := testProvider("null") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("succeeded; want errors") - } -} - -func TestContext2Plan_moduleVarWithDefaultValue(t *testing.T) { - m := testModule(t, "plan-module-var-with-default-value") - p := testProvider("null") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } -} - -func TestContext2Plan_moduleVarComputed(t *testing.T) { - m := testModule(t, "plan-module-var-computed") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.child.aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - "compute": cty.StringVal("foo"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_preventDestroy_bad(t *testing.T) { - m := testModule(t, "plan-prevent-destroy-bad") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, err := ctx.Plan(m, state, DefaultPlanOpts) - - expectedErr := "aws_instance.foo has lifecycle.prevent_destroy" - if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { - if plan != nil { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, err) - } -} - -func TestContext2Plan_preventDestroy_good(t *testing.T) { - m := testModule(t, "plan-prevent-destroy-good") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !plan.Changes.Empty() { - t.Fatalf("expected no changes, got %#v\n", plan.Changes) - } -} - -func TestContext2Plan_preventDestroy_countBad(t *testing.T) { - m := testModule(t, "plan-prevent-destroy-count-bad") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc345"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, err := ctx.Plan(m, state, DefaultPlanOpts) - - expectedErr := "aws_instance.foo[1] has lifecycle.prevent_destroy" - if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { - if plan != nil { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, err) - } -} - -func TestContext2Plan_preventDestroy_countGood(t *testing.T) { - m := testModule(t, "plan-prevent-destroy-count-good") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "current": {Type: cty.String, Optional: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc345"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if plan.Changes.Empty() { - t.Fatalf("Expected non-empty plan, got %s", legacyDiffComparisonString(plan.Changes)) - } -} - -func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) { - m := testModule(t, "plan-prevent-destroy-count-good") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "current": {Type: cty.String, Optional: true}, - "type": {Type: cty.String, Optional: true, Computed: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123","current":"0","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !plan.Changes.Empty() { - t.Fatalf("Expected empty plan, got %s", legacyDiffComparisonString(plan.Changes)) - } -} - -func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) { - m := testModule(t, "plan-prevent-destroy-good") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - - expectedErr := "aws_instance.foo has lifecycle.prevent_destroy" - if !strings.Contains(fmt.Sprintf("%s", diags.Err()), expectedErr) { - if plan != nil { - t.Logf(legacyDiffComparisonString(plan.Changes)) - } - t.Fatalf("expected diagnostics would contain %q\nactual diags: %s", expectedErr, diags.Err()) - } -} - -func TestContext2Plan_provisionerCycle(t *testing.T) { - m := testModule(t, "plan-provisioner-cycle") - p := testProvider("aws") - pr := testProvisioner() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "local-exec": testProvisionerFuncFixed(pr), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("succeeded; want errors") - } -} - -func TestContext2Plan_computed(t *testing.T) { - m := testModule(t, "plan-computed") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.UnknownVal(cty.String), - "num": cty.NumberIntVal(2), - "type": cty.UnknownVal(cty.String), - "compute": cty.StringVal("foo"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_blockNestingGroup(t *testing.T) { - m := testModule(t, "plan-block-nesting-group") - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test": { - BlockTypes: map[string]*configschema.NestedBlock{ - "blah": { - Nesting: configschema.NestingGroup, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": {Type: cty.String, Required: true}, - }, - }, - }, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if got, want := 1, len(plan.Changes.Resources); got != want { - t.Fatalf("wrong number of planned resource changes %d; want %d\n%s", got, want, spew.Sdump(plan.Changes.Resources)) - } - - if !p.PlanResourceChangeCalled { - t.Fatalf("PlanResourceChange was not called at all") - } - - got := p.PlanResourceChangeRequest - want := providers.PlanResourceChangeRequest{ - TypeName: "test", - - // Because block type "blah" is defined as NestingGroup, we get a non-null - // value for it with null nested attributes, rather than the "blah" object - // itself being null, when there's no "blah" block in the config at all. - // - // This represents the situation where the remote service _always_ creates - // a single "blah", regardless of whether the block is present, but when - // the block _is_ present the user can override some aspects of it. The - // absense of the block means "use the defaults", in that case. - Config: cty.ObjectVal(map[string]cty.Value{ - "blah": cty.ObjectVal(map[string]cty.Value{ - "baz": cty.NullVal(cty.String), - }), - }), - ProposedNewState: cty.ObjectVal(map[string]cty.Value{ - "blah": cty.ObjectVal(map[string]cty.Value{ - "baz": cty.NullVal(cty.String), - }), - }), - } - if !cmp.Equal(got, want, valueTrans) { - t.Errorf("wrong PlanResourceChange request\n%s", cmp.Diff(got, want, valueTrans)) - } -} - -func TestContext2Plan_computedDataResource(t *testing.T) { - m := testModule(t, "plan-computed-data-resource") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "num": {Type: cty.String, Optional: true}, - "compute": {Type: cty.String, Optional: true}, - "foo": {Type: cty.String, Computed: true}, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "aws_vpc": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.DataSources["aws_vpc"].Block - ty := schema.ImpliedType() - - if rc := plan.Changes.ResourceInstance(addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "aws_instance", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)); rc == nil { - t.Fatalf("missing diff for aws_instance.foo") - } - rcs := plan.Changes.ResourceInstance(addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "aws_vpc", - Name: "bar", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) - if rcs == nil { - t.Fatalf("missing diff for data.aws_vpc.bar") - } - - rc, err := rcs.Decode(ty) - if err != nil { - t.Fatal(err) - } - - checkVals(t, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.UnknownVal(cty.String), - }), - rc.After, - ) - if got, want := rc.ActionReason, plans.ResourceInstanceReadBecauseConfigUnknown; got != want { - t.Errorf("wrong ActionReason\ngot: %s\nwant: %s", got, want) - } -} - -func TestContext2Plan_computedInFunction(t *testing.T) { - m := testModule(t, "plan-computed-in-function") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.Number, Optional: true}, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "aws_data_source": { - Attributes: map[string]*configschema.Attribute{ - "computed": {Type: cty.List(cty.String), Computed: true}, - }, - }, - }, - }) - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "computed": cty.ListVal([]cty.Value{ - cty.StringVal("foo"), - }), - }), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - assertNoErrors(t, diags) - - _, diags = ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - if !p.ReadDataSourceCalled { - t.Fatalf("ReadDataSource was not called on provider during plan; should've been called") - } -} - -func TestContext2Plan_computedDataCountResource(t *testing.T) { - m := testModule(t, "plan-computed-data-count") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "num": {Type: cty.String, Optional: true}, - "compute": {Type: cty.String, Optional: true}, - "foo": {Type: cty.String, Computed: true}, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "aws_vpc": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - // make sure we created 3 "bar"s - for i := 0; i < 3; i++ { - addr := addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "aws_vpc", - Name: "bar", - }.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance) - - if rcs := plan.Changes.ResourceInstance(addr); rcs == nil { - t.Fatalf("missing changes for %s", addr) - } - } -} - -func TestContext2Plan_localValueCount(t *testing.T) { - m := testModule(t, "plan-local-value-count") - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - // make sure we created 3 "foo"s - for i := 0; i < 3; i++ { - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance) - - if rcs := plan.Changes.ResourceInstance(addr); rcs == nil { - t.Fatalf("missing changes for %s", addr) - } - } -} - -func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) { - m := testModule(t, "plan-data-resource-becomes-computed") - p := testProvider("aws") - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - "computed": {Type: cty.String, Computed: true}, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "aws_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - fooVal := req.ProposedNewState.GetAttr("foo") - return providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "foo": fooVal, - "computed": cty.UnknownVal(cty.String), - }), - PlannedPrivate: req.PriorPrivate, - } - } - - schema := p.GetProviderSchemaResponse.DataSources["aws_data_source"].Block - ty := schema.ImpliedType() - - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - // This should not be called, because the configuration for the - // data resource contains an unknown value for "foo". - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("ReadDataSource called, but should not have been")), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("data.aws_data_source.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123","foo":"baz"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors during plan: %s", diags.Err()) - } - - rcs := plan.Changes.ResourceInstance(addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "aws_data_source", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) - if rcs == nil { - t.Logf("full changeset: %s", spew.Sdump(plan.Changes)) - t.Fatalf("missing diff for data.aws_data_resource.foo") - } - - rc, err := rcs.Decode(ty) - if err != nil { - t.Fatal(err) - } - - if got, want := rc.ActionReason, plans.ResourceInstanceReadBecauseConfigUnknown; got != want { - t.Errorf("wrong ActionReason\ngot: %s\nwant: %s", got, want) - } - - // foo should now be unknown - foo := rc.After.GetAttr("foo") - if foo.IsKnown() { - t.Fatalf("foo should be unknown, got %#v", foo) - } -} - -func TestContext2Plan_computedList(t *testing.T) { - m := testModule(t, "plan-computed-list") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "foo": {Type: cty.String, Optional: true}, - "num": {Type: cty.String, Optional: true}, - "list": {Type: cty.List(cty.String), Computed: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "foo": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "list": cty.UnknownVal(cty.List(cty.String)), - "num": cty.NumberIntVal(2), - "compute": cty.StringVal("list.#"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -// GH-8695. This tests that you can index into a computed list on a -// splatted resource. -func TestContext2Plan_computedMultiIndex(t *testing.T) { - m := testModule(t, "plan-computed-multi-index") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "foo": {Type: cty.List(cty.String), Optional: true}, - "ip": {Type: cty.List(cty.String), Computed: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 3 { - t.Fatal("expected 3 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "ip": cty.UnknownVal(cty.List(cty.String)), - "foo": cty.NullVal(cty.List(cty.String)), - "compute": cty.StringVal("ip.#"), - }), ric.After) - case "aws_instance.foo[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "ip": cty.UnknownVal(cty.List(cty.String)), - "foo": cty.NullVal(cty.List(cty.String)), - "compute": cty.StringVal("ip.#"), - }), ric.After) - case "aws_instance.bar[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "foo": cty.UnknownVal(cty.List(cty.String)), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_count(t *testing.T) { - m := testModule(t, "plan-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 6 { - t.Fatal("expected 6 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo,foo,foo,foo,foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[2]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[3]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[4]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_countComputed(t *testing.T) { - m := testModule(t, "plan-count-computed") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if err == nil { - t.Fatal("should error") - } -} - -func TestContext2Plan_countComputedModule(t *testing.T) { - m := testModule(t, "plan-count-computed-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - - expectedErr := `The "count" value depends on resource attributes` - if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { - t.Fatalf("expected err would contain %q\nerr: %s\n", - expectedErr, err) - } -} - -func TestContext2Plan_countModuleStatic(t *testing.T) { - m := testModule(t, "plan-count-module-static") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 3 { - t.Fatal("expected 3 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "module.child.aws_instance.foo[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.child.aws_instance.foo[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.child.aws_instance.foo[2]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_countModuleStaticGrandchild(t *testing.T) { - m := testModule(t, "plan-count-module-static-grandchild") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 3 { - t.Fatal("expected 3 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "module.child.module.child.aws_instance.foo[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.child.module.child.aws_instance.foo[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.child.module.child.aws_instance.foo[2]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_countIndex(t *testing.T) { - m := testModule(t, "plan-count-index") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("0"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("1"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_countVar(t *testing.T) { - m := testModule(t, "plan-count-var") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "instance_count": &InputValue{ - Value: cty.StringVal("3"), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 4 { - t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo,foo,foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[2]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_countZero(t *testing.T) { - m := testModule(t, "plan-count-zero") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.DynamicPseudoType, Optional: true}, - }, - }, - }, - }) - - // This schema contains a DynamicPseudoType, and therefore can't go through any shim functions - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - res := plan.Changes.Resources[0] - - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - expected := cty.TupleVal(nil) - - foo := ric.After.GetAttr("foo") - - if !cmp.Equal(expected, foo, valueComparer) { - t.Fatal(cmp.Diff(expected, foo, valueComparer)) - } -} - -func TestContext2Plan_countOneIndex(t *testing.T) { - m := testModule(t, "plan-count-one-index") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[0]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_countDecreaseToOne(t *testing.T) { - m := testModule(t, "plan-count-dec") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo":"foo","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[2]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 4 { - t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("bar"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo": - if res.Action != plans.NoOp { - t.Fatalf("resource %s should be unchanged", i) - } - case "aws_instance.foo[1]": - if res.Action != plans.Delete { - t.Fatalf("expected resource delete, got %s", res.Action) - } - case "aws_instance.foo[2]": - if res.Action != plans.Delete { - t.Fatalf("expected resource delete, got %s", res.Action) - } - default: - t.Fatal("unknown instance:", i) - } - } - - expectedState := `aws_instance.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -aws_instance.foo.1: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] -aws_instance.foo.2: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"]` - - if plan.PriorState.String() != expectedState { - t.Fatalf("epected state:\n%q\n\ngot state:\n%q\n", expectedState, plan.PriorState.String()) - } -} - -func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) { - m := testModule(t, "plan-count-inc") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","type":"aws_instance","foo":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 4 { - t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("bar"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[0]": - if res.Action != plans.NoOp { - t.Fatalf("resource %s should be unchanged", i) - } - case "aws_instance.foo[1]": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[2]": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_countIncreaseFromOne(t *testing.T) { - m := testModule(t, "plan-count-inc") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo":"foo","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 4 { - t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("bar"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[0]": - if res.Action != plans.NoOp { - t.Fatalf("resource %s should be unchanged", i) - } - case "aws_instance.foo[1]": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[2]": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -// https://github.com/PeoplePerHour/terraform/pull/11 -// -// This tests a case where both a "resource" and "resource.0" are in -// the state file, which apparently is a reasonable backwards compatibility -// concern found in the above 3rd party repo. -func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) { - m := testModule(t, "plan-count-inc") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo":"foo","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo":"foo","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 5 { - t.Fatal("expected 5 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("bar"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo": - if res.Action != plans.Delete { - t.Fatalf("resource %s should be removed", i) - } - case "aws_instance.foo[0]": - if res.Action != plans.NoOp { - t.Fatalf("resource %s should be unchanged", i) - } - case "aws_instance.foo[1]": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[2]": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -// A common pattern in TF configs is to have a set of resources with the same -// count and to use count.index to create correspondences between them: -// -// foo_id = "${foo.bar.*.id[count.index]}" -// -// This test is for the situation where some instances already exist and the -// count is increased. In that case, we should see only the create diffs -// for the new instances and not any update diffs for the existing ones. -func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) { - m := testModule(t, "plan-count-splat-reference") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "name": {Type: cty.String, Optional: true}, - "foo_name": {Type: cty.String, Optional: true}, - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","name":"foo 0"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","name":"foo 1"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo_name":"foo 0"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","foo_name":"foo 1"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 6 { - t.Fatal("expected 6 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar[0]", "aws_instance.bar[1]", "aws_instance.foo[0]", "aws_instance.foo[1]": - if res.Action != plans.NoOp { - t.Fatalf("resource %s should be unchanged", i) - } - case "aws_instance.bar[2]": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - // The instance ID changed, so just check that the name updated - if ric.After.GetAttr("foo_name") != cty.StringVal("foo 2") { - t.Fatalf("resource %s attr \"foo_name\" should be changed", i) - } - case "aws_instance.foo[2]": - if res.Action != plans.Create { - t.Fatalf("expected resource create, got %s", res.Action) - } - // The instance ID changed, so just check that the name updated - if ric.After.GetAttr("name") != cty.StringVal("foo 2") { - t.Fatalf("resource %s attr \"name\" should be changed", i) - } - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_forEach(t *testing.T) { - m := testModule(t, "plan-for-each") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 8 { - t.Fatal("expected 8 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - _, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - } -} - -func TestContext2Plan_forEachUnknownValue(t *testing.T) { - // This module has a variable defined, but it's value is unknown. We - // expect this to produce an error, but not to panic. - m := testModule(t, "plan-for-each-unknown-value") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": { - Value: cty.UnknownVal(cty.String), - SourceType: ValueFromCLIArg, - }, - }, - }) - if !diags.HasErrors() { - // Should get this error: - // Invalid for_each argument: The "for_each" value depends on resource attributes that cannot be determined until apply... - t.Fatal("succeeded; want errors") - } - - gotErrStr := diags.Err().Error() - wantErrStr := "Invalid for_each argument" - if !strings.Contains(gotErrStr, wantErrStr) { - t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) - } - - // We should have a diagnostic that is marked as being caused by unknown - // values. - for _, diag := range diags { - if tfdiags.DiagnosticCausedByUnknown(diag) { - return // don't fall through to the error below - } - } - t.Fatalf("no diagnostic is marked as being caused by unknown\n%s", diags.Err().Error()) -} - -func TestContext2Plan_destroy(t *testing.T) { - m := testModule(t, "plan-destroy") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.one").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.two").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.one", "aws_instance.two": - if res.Action != plans.Delete { - t.Fatalf("resource %s should be removed", i) - } - - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_moduleDestroy(t *testing.T) { - m := testModule(t, "plan-module-destroy") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo", "module.child.aws_instance.foo": - if res.Action != plans.Delete { - t.Fatalf("resource %s should be removed", i) - } - - default: - t.Fatal("unknown instance:", i) - } - } -} - -// GH-1835 -func TestContext2Plan_moduleDestroyCycle(t *testing.T) { - m := testModule(t, "plan-module-destroy-gh-1835") - p := testProvider("aws") - - state := states.NewState() - aModule := state.EnsureModule(addrs.RootModuleInstance.Child("a_module", addrs.NoKey)) - aModule.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - bModule := state.EnsureModule(addrs.RootModuleInstance.Child("b_module", addrs.NoKey)) - bModule.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "module.a_module.aws_instance.a", "module.b_module.aws_instance.b": - if res.Action != plans.Delete { - t.Fatalf("resource %s should be removed", i) - } - - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_moduleDestroyMultivar(t *testing.T) { - m := testModule(t, "plan-module-destroy-multivar") - p := testProvider("aws") - - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar0"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar1"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "module.child.aws_instance.foo[0]", "module.child.aws_instance.foo[1]": - if res.Action != plans.Delete { - t.Fatalf("resource %s should be removed", i) - } - - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_pathVar(t *testing.T) { - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - - m := testModule(t, "plan-path-var") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "cwd": {Type: cty.String, Optional: true}, - "module": {Type: cty.String, Optional: true}, - "root": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo": - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", i) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "cwd": cty.StringVal(cwd + "/barpath"), - "module": cty.StringVal(m.Module.SourceDir + "/foopath"), - "root": cty.StringVal(m.Module.SourceDir + "/barpath"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_diffVar(t *testing.T) { - m := testModule(t, "plan-diffvar") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","num":"2","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", i) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "num": cty.NumberIntVal(3), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo": - if res.Action != plans.Update { - t.Fatalf("resource %s should be updated", i) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.StringVal("bar"), - "num": cty.NumberIntVal(2), - "type": cty.StringVal("aws_instance"), - }), ric.Before) - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.StringVal("bar"), - "num": cty.NumberIntVal(3), - "type": cty.StringVal("aws_instance"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_hook(t *testing.T) { - m := testModule(t, "plan-good") - h := new(MockHook) - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !h.PreDiffCalled { - t.Fatal("should be called") - } - if !h.PostDiffCalled { - t.Fatal("should be called") - } -} - -func TestContext2Plan_closeProvider(t *testing.T) { - // this fixture only has an aliased provider located in the module, to make - // sure that the provier name contains a path more complex than - // "provider.aws". - m := testModule(t, "plan-close-module-provider") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if !p.CloseCalled { - t.Fatal("provider not closed") - } -} - -func TestContext2Plan_orphan(t *testing.T) { - m := testModule(t, "plan-orphan") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.baz").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.baz": - if res.Action != plans.Delete { - t.Fatalf("resource %s should be removed", i) - } - if got, want := ric.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - case "aws_instance.foo": - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", i) - } - if got, want := ric.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "num": cty.NumberIntVal(2), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -// This tests that configurations with UUIDs don't produce errors. -// For shadows, this would produce errors since a UUID changes every time. -func TestContext2Plan_shadowUuid(t *testing.T) { - m := testModule(t, "plan-shadow-uuid") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } -} - -func TestContext2Plan_state(t *testing.T) { - m := testModule(t, "plan-good") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if len(plan.Changes.Resources) < 2 { - t.Fatalf("bad: %#v", plan.Changes.Resources) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", i) - } - if got, want := ric.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("2"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo": - if res.Action != plans.Update { - t.Fatalf("resource %s should be updated", i) - } - if got, want := ric.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.StringVal("bar"), - "num": cty.NullVal(cty.Number), - "type": cty.NullVal(cty.String), - }), ric.Before) - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.StringVal("bar"), - "num": cty.NumberIntVal(2), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_requiresReplace(t *testing.T) { - m := testModule(t, "plan-requires-replace") - p := testProvider("test") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{}, - }, - ResourceTypes: map[string]providers.Schema{ - "test_thing": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "v": { - Type: cty.String, - Required: true, - }, - }, - }, - }, - }, - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - RequiresReplace: []cty.Path{ - cty.GetAttrPath("v"), - }, - } - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_thing.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"v":"hello"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["test_thing"].Block - ty := schema.ImpliedType() - - if got, want := len(plan.Changes.Resources), 1; got != want { - t.Fatalf("got %d changes; want %d", got, want) - } - - for _, res := range plan.Changes.Resources { - t.Run(res.Addr.String(), func(t *testing.T) { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "test_thing.foo": - if got, want := ric.Action, plans.DeleteThenCreate; got != want { - t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) - } - if got, want := ric.ActionReason, plans.ResourceInstanceReplaceBecauseCannotUpdate; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "v": cty.StringVal("goodbye"), - }), ric.After) - default: - t.Fatalf("unexpected resource instance %s", i) - } - }) - } -} - -func TestContext2Plan_taint(t *testing.T) { - m := testModule(t, "plan-taint") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","num":"2","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"baz"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - t.Run(res.Addr.String(), func(t *testing.T) { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.bar": - if got, want := res.Action, plans.DeleteThenCreate; got != want { - t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) - } - if got, want := res.ActionReason, plans.ResourceInstanceReplaceBecauseTainted; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("2"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo": - if got, want := res.Action, plans.NoOp; got != want { - t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) - } - if got, want := res.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - default: - t.Fatal("unknown instance:", i) - } - }) - } -} - -func TestContext2Plan_taintIgnoreChanges(t *testing.T) { - m := testModule(t, "plan-taint-ignore-changes") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "vars": {Type: cty.String, Optional: true}, - "type": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"foo","vars":"foo","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo": - if got, want := res.Action, plans.DeleteThenCreate; got != want { - t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) - } - if got, want := res.ActionReason, plans.ResourceInstanceReplaceBecauseTainted; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.StringVal("foo"), - "vars": cty.StringVal("foo"), - "type": cty.StringVal("aws_instance"), - }), ric.Before) - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "vars": cty.StringVal("foo"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -// Fails about 50% of the time before the fix for GH-4982, covers the fix. -func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) { - m := testModule(t, "plan-taint-interpolated-count") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"bar","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[2]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - for i := 0; i < 100; i++ { - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state.DeepCopy(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 3 { - t.Fatal("expected 3 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo[0]": - if got, want := ric.Action, plans.DeleteThenCreate; got != want { - t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) - } - if got, want := ric.ActionReason, plans.ResourceInstanceReplaceBecauseTainted; got != want { - t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.StringVal("bar"), - "type": cty.StringVal("aws_instance"), - }), ric.Before) - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "aws_instance.foo[1]", "aws_instance.foo[2]": - if res.Action != plans.NoOp { - t.Fatalf("resource %s should not be changed", i) - } - default: - t.Fatal("unknown instance:", i) - } - } - } -} - -func TestContext2Plan_targeted(t *testing.T) { - m := testModule(t, "plan-targeted") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo": - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", i) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "num": cty.NumberIntVal(2), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -// Test that targeting a module properly plans any inputs that depend -// on another module. -func TestContext2Plan_targetedCrossModule(t *testing.T) { - m := testModule(t, "plan-targeted-cross-module") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("B", addrs.NoKey), - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", ric.Addr) - } - switch i := ric.Addr.String(); i { - case "module.A.aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.StringVal("bar"), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.B.aws_instance.bar": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "foo": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_targetedModuleWithProvider(t *testing.T) { - m := testModule(t, "plan-targeted-module-with-provider") - p := testProvider("null") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "key": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "null_resource": { - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child2", addrs.NoKey), - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["null_resource"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - res := plan.Changes.Resources[0] - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - if ric.Addr.String() != "module.child2.null_resource.foo" { - t.Fatalf("unexpcetd resource: %s", ric.Addr) - } -} - -func TestContext2Plan_targetedOrphan(t *testing.T) { - m := testModule(t, "plan-targeted-orphan") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.orphan").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-789xyz"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.nottargeted").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "orphan", - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.orphan": - if res.Action != plans.Delete { - t.Fatalf("resource %s should be destroyed", ric.Addr) - } - default: - t.Fatal("unknown instance:", i) - } - } -} - -// https://github.com/hashicorp/terraform/issues/2538 -func TestContext2Plan_targetedModuleOrphan(t *testing.T) { - m := testModule(t, "plan-targeted-module-orphan") - p := testProvider("aws") - - state := states.NewState() - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.orphan").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-789xyz"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.nottargeted").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.DestroyMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource( - addrs.ManagedResourceMode, "aws_instance", "orphan", - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - res := plan.Changes.Resources[0] - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - if ric.Addr.String() != "module.child.aws_instance.orphan" { - t.Fatalf("unexpected resource :%s", ric.Addr) - } - if res.Action != plans.Delete { - t.Fatalf("resource %s should be deleted", ric.Addr) - } -} - -func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) { - m := testModule(t, "plan-targeted-module-untargeted-variable") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "blue", - ), - addrs.RootModuleInstance.Child("blue_mod", addrs.NoKey), - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", ric.Addr) - } - switch i := ric.Addr.String(); i { - case "aws_instance.blue": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - case "module.blue_mod.aws_instance.mod": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "value": cty.UnknownVal(cty.String), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -// ensure that outputs missing references due to targetting are removed from -// the graph. -func TestContext2Plan_outputContainsTargetedResource(t *testing.T) { - m := testModule(t, "plan-untargeted-resource-output") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("mod", addrs.NoKey).Resource( - addrs.ManagedResourceMode, "aws_instance", "a", - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("err: %s", diags) - } - if len(diags) != 1 { - t.Fatalf("got %d diagnostics; want 1", diags) - } - if got, want := diags[0].Severity(), tfdiags.Warning; got != want { - t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) - } - if got, want := diags[0].Description().Summary, "Resource targeting is in effect"; got != want { - t.Errorf("wrong diagnostic summary %#v; want %#v", got, want) - } -} - -// https://github.com/hashicorp/terraform/issues/4515 -func TestContext2Plan_targetedOverTen(t *testing.T) { - m := testModule(t, "plan-targeted-over-ten") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - for i := 0; i < 13; i++ { - key := fmt.Sprintf("aws_instance.foo[%d]", i) - id := fmt.Sprintf("i-abc%d", i) - attrs := fmt.Sprintf(`{"id":"%s","type":"aws_instance"}`, id) - - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr(key).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(attrs), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(1), - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - if res.Action != plans.NoOp { - t.Fatalf("unexpected action %s for %s", res.Action, ric.Addr) - } - } -} - -func TestContext2Plan_provider(t *testing.T) { - m := testModule(t, "plan-provider") - p := testProvider("aws") - - var value interface{} - p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - value = req.Config.GetAttr("foo").AsString() - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - opts := &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("bar"), - SourceType: ValueFromCaller, - }, - }, - } - - if _, err := ctx.Plan(m, states.NewState(), opts); err != nil { - t.Fatalf("err: %s", err) - } - - if value != "bar" { - t.Fatalf("bad: %#v", value) - } -} - -func TestContext2Plan_varListErr(t *testing.T) { - m := testModule(t, "plan-var-list-err") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - - if err == nil { - t.Fatal("should error") - } -} - -func TestContext2Plan_ignoreChanges(t *testing.T) { - m := testModule(t, "plan-ignore-changes") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","ami":"ami-abcd1234","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("ami-1234abcd"), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - res := plan.Changes.Resources[0] - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - if ric.Addr.String() != "aws_instance.foo" { - t.Fatalf("unexpected resource: %s", ric.Addr) - } - - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.StringVal("bar"), - "ami": cty.StringVal("ami-abcd1234"), - "type": cty.StringVal("aws_instance"), - }), ric.After) -} - -func TestContext2Plan_ignoreChangesWildcard(t *testing.T) { - m := testModule(t, "plan-ignore-changes-wildcard") - p := testProvider("aws") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - // computed attributes should not be set in config - id := req.Config.GetAttr("id") - if !id.IsNull() { - t.Error("computed id set in plan config") - } - - foo := req.Config.GetAttr("foo") - if foo.IsNull() { - t.Error(`missing "foo" during plan, was set to "bar" in state and config`) - } - - return testDiffFn(req) - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","ami":"ami-abcd1234","instance":"t2.micro","type":"aws_instance","foo":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("ami-1234abcd"), - SourceType: ValueFromCaller, - }, - "bar": &InputValue{ - Value: cty.StringVal("t2.small"), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.NoOp { - t.Fatalf("unexpected resource diffs in root module: %s", spew.Sdump(plan.Changes.Resources)) - } - } -} - -func TestContext2Plan_ignoreChangesInMap(t *testing.T) { - p := testProvider("test") - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_ignore_changes_map": { - Attributes: map[string]*configschema.Attribute{ - "tags": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - s := states.BuildState(func(ss *states.SyncState) { - ss.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_ignore_changes_map", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo","tags":{"ignored":"from state","other":"from state"},"type":"aws_instance"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - m := testModule(t, "plan-ignore-changes-in-map") - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, s, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["test_ignore_changes_map"].Block - ty := schema.ImpliedType() - - if got, want := len(plan.Changes.Resources), 1; got != want { - t.Fatalf("wrong number of changes %d; want %d", got, want) - } - - res := plan.Changes.Resources[0] - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - if res.Action != plans.Update { - t.Fatalf("resource %s should be updated, got %s", ric.Addr, res.Action) - } - - if got, want := ric.Addr.String(), "test_ignore_changes_map.foo"; got != want { - t.Fatalf("unexpected resource address %s; want %s", got, want) - } - - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "tags": cty.MapVal(map[string]cty.Value{ - "ignored": cty.StringVal("from state"), - "other": cty.StringVal("from config"), - }), - }), ric.After) -} - -func TestContext2Plan_ignoreChangesSensitive(t *testing.T) { - m := testModule(t, "plan-ignore-changes-sensitive") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar","ami":"ami-abcd1234","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("ami-1234abcd"), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - res := plan.Changes.Resources[0] - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - if ric.Addr.String() != "aws_instance.foo" { - t.Fatalf("unexpected resource: %s", ric.Addr) - } - - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.StringVal("bar"), - "ami": cty.StringVal("ami-abcd1234"), - "type": cty.StringVal("aws_instance"), - }), ric.After) -} - -func TestContext2Plan_moduleMapLiteral(t *testing.T) { - m := testModule(t, "plan-module-map-literal") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "meta": {Type: cty.Map(cty.String), Optional: true}, - "tags": {Type: cty.Map(cty.String), Optional: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - s := req.ProposedNewState.AsValueMap() - m := s["tags"].AsValueMap() - - if m["foo"].AsString() != "bar" { - t.Fatalf("Bad value in tags attr: %#v", m) - } - - meta := s["meta"].AsValueMap() - if len(meta) != 0 { - t.Fatalf("Meta attr not empty: %#v", meta) - } - return testDiffFn(req) - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } -} - -func TestContext2Plan_computedValueInMap(t *testing.T) { - m := testModule(t, "plan-computed-value-in-map") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "looked_up": {Type: cty.String, Optional: true}, - }, - }, - "aws_computed_source": { - Attributes: map[string]*configschema.Attribute{ - "computed_read_only": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp = testDiffFn(req) - - if req.TypeName != "aws_computed_source" { - return - } - - planned := resp.PlannedState.AsValueMap() - planned["computed_read_only"] = cty.UnknownVal(cty.String) - resp.PlannedState = cty.ObjectVal(planned) - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - schema := p.GetProviderSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block - - ric, err := res.Decode(schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", ric.Addr) - } - - switch i := ric.Addr.String(); i { - case "aws_computed_source.intermediates": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "computed_read_only": cty.UnknownVal(cty.String), - }), ric.After) - case "module.test_mod.aws_instance.inner2": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "looked_up": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_moduleVariableFromSplat(t *testing.T) { - m := testModule(t, "plan-module-variable-from-splat") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "thing": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if len(plan.Changes.Resources) != 4 { - t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - schema := p.GetProviderSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block - - ric, err := res.Decode(schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", ric.Addr) - } - - switch i := ric.Addr.String(); i { - case "module.mod1.aws_instance.test[0]", - "module.mod1.aws_instance.test[1]", - "module.mod2.aws_instance.test[0]", - "module.mod2.aws_instance.test[1]": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "thing": cty.StringVal("doesnt"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) { - m := testModule(t, "plan-cbd-depends-datasource") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "num": {Type: cty.String, Optional: true}, - "computed": {Type: cty.String, Optional: true, Computed: true}, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "aws_vpc": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.Number, Optional: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - computedVal := req.ProposedNewState.GetAttr("computed") - if computedVal.IsNull() { - computedVal = cty.UnknownVal(cty.String) - } - return providers.PlanResourceChangeResponse{ - PlannedState: cty.ObjectVal(map[string]cty.Value{ - "num": req.ProposedNewState.GetAttr("num"), - "computed": computedVal, - }), - } - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - cfg := req.Config.AsValueMap() - cfg["id"] = cty.StringVal("data_id") - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(cfg), - } - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - seenAddrs := make(map[string]struct{}) - for _, res := range plan.Changes.Resources { - var schema *configschema.Block - switch res.Addr.Resource.Resource.Mode { - case addrs.DataResourceMode: - schema = p.GetProviderSchemaResponse.DataSources[res.Addr.Resource.Resource.Type].Block - case addrs.ManagedResourceMode: - schema = p.GetProviderSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block - } - - ric, err := res.Decode(schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - seenAddrs[ric.Addr.String()] = struct{}{} - - t.Run(ric.Addr.String(), func(t *testing.T) { - switch i := ric.Addr.String(); i { - case "aws_instance.foo[0]": - if res.Action != plans.Create { - t.Fatalf("resource %s should be created, got %s", ric.Addr, ric.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "num": cty.StringVal("2"), - "computed": cty.StringVal("data_id"), - }), ric.After) - case "aws_instance.foo[1]": - if res.Action != plans.Create { - t.Fatalf("resource %s should be created, got %s", ric.Addr, ric.Action) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "num": cty.StringVal("2"), - "computed": cty.StringVal("data_id"), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - }) - } - - wantAddrs := map[string]struct{}{ - "aws_instance.foo[0]": {}, - "aws_instance.foo[1]": {}, - } - if !cmp.Equal(seenAddrs, wantAddrs) { - t.Errorf("incorrect addresses in changeset:\n%s", cmp.Diff(wantAddrs, seenAddrs)) - } -} - -// interpolated lists need to be stored in the original order. -func TestContext2Plan_listOrder(t *testing.T) { - m := testModule(t, "plan-list-order") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.List(cty.String), Optional: true}, - }, - }, - }, - }) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - changes := plan.Changes - rDiffA := changes.ResourceInstance(addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "a", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) - rDiffB := changes.ResourceInstance(addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "b", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) - - if !cmp.Equal(rDiffA.After, rDiffB.After, valueComparer) { - t.Fatal(cmp.Diff(rDiffA.After, rDiffB.After, valueComparer)) - } -} - -// Make sure ignore-changes doesn't interfere with set/list/map diffs. -// If a resource was being replaced by a RequiresNew attribute that gets -// ignored, we need to filter the diff properly to properly update rather than -// replace. -func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) { - m := testModule(t, "plan-ignore-changes-with-flatmaps") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "user_data": {Type: cty.String, Optional: true}, - "require_new": {Type: cty.String, Optional: true}, - - // This test predates the 0.12 work to integrate cty and - // HCL, and so it was ported as-is where its expected - // test output was clearly expecting a list of maps here - // even though it is named "set". - "set": {Type: cty.List(cty.Map(cty.String)), Optional: true}, - "lst": {Type: cty.List(cty.String), Optional: true}, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{ - "user_data":"x","require_new":"", - "set":[{"a":"1"}], - "lst":["j"] - }`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - res := plan.Changes.Resources[0] - schema := p.GetProviderSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block - - ric, err := res.Decode(schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - if res.Action != plans.Update { - t.Fatalf("resource %s should be updated, got %s", ric.Addr, ric.Action) - } - - if ric.Addr.String() != "aws_instance.foo" { - t.Fatalf("unknown resource: %s", ric.Addr) - } - - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "lst": cty.ListVal([]cty.Value{ - cty.StringVal("j"), - cty.StringVal("k"), - }), - "require_new": cty.StringVal(""), - "user_data": cty.StringVal("x"), - "set": cty.ListVal([]cty.Value{cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("1"), - "b": cty.StringVal("2"), - })}), - }), ric.After) -} - -// TestContext2Plan_resourceNestedCount ensures resource sets that depend on -// the count of another resource set (ie: count of a data source that depends -// on another data source's instance count - data.x.foo.*.id) get properly -// normalized to the indexes they should be. This case comes up when there is -// an existing state (after an initial apply). -func TestContext2Plan_resourceNestedCount(t *testing.T) { - m := testModule(t, "nested-resource-count-plan") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{ - NewState: req.PriorState, - } - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo0","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo1","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar0","type":"aws_instance"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar1","type":"aws_instance"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.baz[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz0","type":"aws_instance"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.bar")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.baz[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"baz1","type":"aws_instance"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.bar")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatalf("validate errors: %s", diags.Err()) - } - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("plan errors: %s", diags.Err()) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.NoOp { - t.Fatalf("resource %s should not change, plan returned %s", res.Addr, res.Action) - } - } -} - -// Higher level test at TestResource_dataSourceListApplyPanic -func TestContext2Plan_computedAttrRefTypeMismatch(t *testing.T) { - m := testModule(t, "plan-computed-attr-ref-type-mismatch") - p := testProvider("aws") - p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - var diags tfdiags.Diagnostics - if req.TypeName == "aws_instance" { - amiVal := req.Config.GetAttr("ami") - if amiVal.Type() != cty.String { - diags = diags.Append(fmt.Errorf("Expected ami to be cty.String, got %#v", amiVal)) - } - } - return providers.ValidateResourceConfigResponse{ - Diagnostics: diags, - } - } - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - if req.TypeName != "aws_ami_list" { - t.Fatalf("Reached apply for unexpected resource type! %s", req.TypeName) - } - // Pretend like we make a thing and the computed list "ids" is populated - s := req.PlannedState.AsValueMap() - s["id"] = cty.StringVal("someid") - s["ids"] = cty.ListVal([]cty.Value{ - cty.StringVal("ami-abc123"), - cty.StringVal("ami-bcd345"), - }) - - resp.NewState = cty.ObjectVal(s) - return - } - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("Succeeded; want type mismatch error for 'ami' argument") - } - - expected := `Inappropriate value for attribute "ami"` - if errStr := diags.Err().Error(); !strings.Contains(errStr, expected) { - t.Fatalf("expected:\n\n%s\n\nto contain:\n\n%s", errStr, expected) - } -} - -func TestContext2Plan_selfRef(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - m := testModule(t, "plan-self-ref") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected validation failure: %s", diags.Err()) - } - - _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("plan succeeded; want error") - } - - gotErrStr := diags.Err().Error() - wantErrStr := "Self-referential block" - if !strings.Contains(gotErrStr, wantErrStr) { - t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) - } -} - -func TestContext2Plan_selfRefMulti(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - m := testModule(t, "plan-self-ref-multi") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected validation failure: %s", diags.Err()) - } - - _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("plan succeeded; want error") - } - - gotErrStr := diags.Err().Error() - wantErrStr := "Self-referential block" - if !strings.Contains(gotErrStr, wantErrStr) { - t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) - } -} - -func TestContext2Plan_selfRefMultiAll(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.List(cty.String), Optional: true}, - }, - }, - }, - }) - - m := testModule(t, "plan-self-ref-multi-all") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected validation failure: %s", diags.Err()) - } - - _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("plan succeeded; want error") - } - - gotErrStr := diags.Err().Error() - - // The graph is checked for cycles before we can walk it, so we don't - // encounter the self-reference check. - //wantErrStr := "Self-referential block" - wantErrStr := "Cycle" - if !strings.Contains(gotErrStr, wantErrStr) { - t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) - } -} - -func TestContext2Plan_invalidOutput(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -data "aws_data_source" "name" {} - -output "out" { - value = data.aws_data_source.name.missing -}`, - }) - - p := testProvider("aws") - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("data_id"), - "foo": cty.StringVal("foo"), - }), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - // Should get this error: - // Unsupported attribute: This object does not have an attribute named "missing" - t.Fatal("succeeded; want errors") - } - - gotErrStr := diags.Err().Error() - wantErrStr := "Unsupported attribute" - if !strings.Contains(gotErrStr, wantErrStr) { - t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) - } -} - -func TestContext2Plan_invalidModuleOutput(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "child/main.tf": ` -data "aws_data_source" "name" {} - -output "out" { - value = "${data.aws_data_source.name.missing}" -}`, - "main.tf": ` -module "child" { - source = "./child" -} - -resource "aws_instance" "foo" { - foo = "${module.child.out}" -}`, - }) - - p := testProvider("aws") - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("data_id"), - "foo": cty.StringVal("foo"), - }), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - // Should get this error: - // Unsupported attribute: This object does not have an attribute named "missing" - t.Fatal("succeeded; want errors") - } - - gotErrStr := diags.Err().Error() - wantErrStr := "Unsupported attribute" - if !strings.Contains(gotErrStr, wantErrStr) { - t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) - } -} - -func TestContext2Plan_variableValidation(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "x" { - default = "bar" -} - -resource "aws_instance" "foo" { - foo = var.x -}`, - }) - - p := testProvider("aws") - p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { - foo := req.Config.GetAttr("foo").AsString() - if foo == "bar" { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("foo cannot be bar")) - } - return - } - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - // Should get this error: - // Unsupported attribute: This object does not have an attribute named "missing" - t.Fatal("succeeded; want errors") - } -} - -func TestContext2Plan_variableSensitivity(t *testing.T) { - m := testModule(t, "plan-variable-sensitivity") - - p := testProvider("aws") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "foo": cty.StringVal("foo").Mark(marks.Sensitive), - }), ric.After) - if len(res.ChangeSrc.BeforeValMarks) != 0 { - t.Errorf("unexpected BeforeValMarks: %#v", res.ChangeSrc.BeforeValMarks) - } - if len(res.ChangeSrc.AfterValMarks) != 1 { - t.Errorf("unexpected AfterValMarks: %#v", res.ChangeSrc.AfterValMarks) - continue - } - pvm := res.ChangeSrc.AfterValMarks[0] - if got, want := pvm.Path, cty.GetAttrPath("foo"); !got.Equals(want) { - t.Errorf("unexpected path for mark\n got: %#v\nwant: %#v", got, want) - } - if got, want := pvm.Marks, cty.NewValueMarks(marks.Sensitive); !got.Equal(want) { - t.Errorf("unexpected value for mark\n got: %#v\nwant: %#v", got, want) - } - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_variableSensitivityModule(t *testing.T) { - m := testModule(t, "plan-variable-sensitivity-module") - - p := testProvider("aws") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - SetVariables: InputValues{ - "sensitive_var": {Value: cty.NilVal}, - "another_var": &InputValue{ - Value: cty.StringVal("boop"), - SourceType: ValueFromCaller, - }, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "module.child.aws_instance.foo": - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "foo": cty.StringVal("foo").Mark(marks.Sensitive), - "value": cty.StringVal("boop").Mark(marks.Sensitive), - }), ric.After) - if len(res.ChangeSrc.BeforeValMarks) != 0 { - t.Errorf("unexpected BeforeValMarks: %#v", res.ChangeSrc.BeforeValMarks) - } - if len(res.ChangeSrc.AfterValMarks) != 2 { - t.Errorf("expected AfterValMarks to contain two elements: %#v", res.ChangeSrc.AfterValMarks) - continue - } - // validate that the after marks have "foo" and "value" - contains := func(pvmSlice []cty.PathValueMarks, stepName string) bool { - for _, pvm := range pvmSlice { - if pvm.Path.Equals(cty.GetAttrPath(stepName)) { - if pvm.Marks.Equal(cty.NewValueMarks(marks.Sensitive)) { - return true - } - } - } - return false - } - if !contains(res.ChangeSrc.AfterValMarks, "foo") { - t.Error("unexpected AfterValMarks to contain \"foo\" with sensitive mark") - } - if !contains(res.ChangeSrc.AfterValMarks, "value") { - t.Error("unexpected AfterValMarks to contain \"value\" with sensitive mark") - } - default: - t.Fatal("unknown instance:", i) - } - } -} - -func checkVals(t *testing.T, expected, got cty.Value) { - t.Helper() - // The GoStringer format seems to result in the closest thing to a useful - // diff for values with marks. - // TODO: if we want to continue using cmp.Diff on cty.Values, we should - // make a transformer that creates a more comparable structure. - valueTrans := cmp.Transformer("gostring", func(v cty.Value) string { - return fmt.Sprintf("%#v\n", v) - }) - if !cmp.Equal(expected, got, valueComparer, typeComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, got, valueTrans, equateEmpty)) - } -} - -func objectVal(t *testing.T, schema *configschema.Block, m map[string]cty.Value) cty.Value { - t.Helper() - v, err := schema.CoerceValue( - cty.ObjectVal(m), - ) - if err != nil { - t.Fatal(err) - } - return v -} - -func TestContext2Plan_requiredModuleOutput(t *testing.T) { - m := testModule(t, "plan-required-output") - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "required": {Type: cty.String, Required: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["test_resource"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - var expected cty.Value - switch i := ric.Addr.String(); i { - case "test_resource.root": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "required": cty.UnknownVal(cty.String), - }) - case "module.mod.test_resource.for_output": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "required": cty.StringVal("val"), - }) - default: - t.Fatal("unknown instance:", i) - } - - checkVals(t, expected, ric.After) - }) - } -} - -func TestContext2Plan_requiredModuleObject(t *testing.T) { - m := testModule(t, "plan-required-whole-mod") - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "required": {Type: cty.String, Required: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - - schema := p.GetProviderSchemaResponse.ResourceTypes["test_resource"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 2 { - t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - var expected cty.Value - switch i := ric.Addr.String(); i { - case "test_resource.root": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "required": cty.UnknownVal(cty.String), - }) - case "module.mod.test_resource.for_output": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "required": cty.StringVal("val"), - }) - default: - t.Fatal("unknown instance:", i) - } - - checkVals(t, expected, ric.After) - }) - } -} - -func TestContext2Plan_expandOrphan(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - count = 1 - source = "./mod" -} -`, - "mod/main.tf": ` -resource "aws_instance" "foo" { -} -`, - }) - - state := states.NewState() - state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.IntKey(0))).SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"child","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.IntKey(1))).SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"child","type":"aws_instance"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - expected := map[string]plans.Action{ - `module.mod[1].aws_instance.foo`: plans.Delete, - `module.mod[0].aws_instance.foo`: plans.NoOp, - } - - for _, res := range plan.Changes.Resources { - want := expected[res.Addr.String()] - if res.Action != want { - t.Fatalf("expected %s action, got: %q %s", want, res.Addr, res.Action) - } - delete(expected, res.Addr.String()) - } - - for res, action := range expected { - t.Errorf("missing %s change for %s", action, res) - } -} - -func TestContext2Plan_indexInVar(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "a" { - count = 1 - source = "./mod" - in = "test" -} - -module "b" { - count = 1 - source = "./mod" - in = length(module.a) -} -`, - "mod/main.tf": ` -resource "aws_instance" "foo" { - foo = var.in -} - -variable "in" { -} - -output"out" { - value = aws_instance.foo.id -} -`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Plan_targetExpandedAddress(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - count = 3 - source = "./mod" -} -`, - "mod/main.tf": ` -resource "aws_instance" "foo" { - count = 2 -} -`, - }) - - p := testProvider("aws") - - targets := []addrs.Targetable{} - target, diags := addrs.ParseTargetStr("module.mod[1].aws_instance.foo[0]") - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - targets = append(targets, target.Subject) - - target, diags = addrs.ParseTargetStr("module.mod[2]") - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - targets = append(targets, target.Subject) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: targets, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - expected := map[string]plans.Action{ - // the single targeted mod[1] instances - `module.mod[1].aws_instance.foo[0]`: plans.Create, - // the whole mode[2] - `module.mod[2].aws_instance.foo[0]`: plans.Create, - `module.mod[2].aws_instance.foo[1]`: plans.Create, - } - - for _, res := range plan.Changes.Resources { - want := expected[res.Addr.String()] - if res.Action != want { - t.Fatalf("expected %s action, got: %q %s", want, res.Addr, res.Action) - } - delete(expected, res.Addr.String()) - } - - for res, action := range expected { - t.Errorf("missing %s change for %s", action, res) - } -} - -func TestContext2Plan_targetResourceInModuleInstance(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - count = 3 - source = "./mod" -} -`, - "mod/main.tf": ` -resource "aws_instance" "foo" { -} -`, - }) - - p := testProvider("aws") - - target, diags := addrs.ParseTargetStr("module.mod[1].aws_instance.foo") - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - targets := []addrs.Targetable{target.Subject} - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: targets, - }) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - expected := map[string]plans.Action{ - // the single targeted mod[1] instance - `module.mod[1].aws_instance.foo`: plans.Create, - } - - for _, res := range plan.Changes.Resources { - want := expected[res.Addr.String()] - if res.Action != want { - t.Fatalf("expected %s action, got: %q %s", want, res.Addr, res.Action) - } - delete(expected, res.Addr.String()) - } - - for res, action := range expected { - t.Errorf("missing %s change for %s", action, res) - } -} - -func TestContext2Plan_moduleRefIndex(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod" { - for_each = { - a = "thing" - } - in = null - source = "./mod" -} - -module "single" { - source = "./mod" - in = module.mod["a"] -} -`, - "mod/main.tf": ` -variable "in" { -} - -output "out" { - value = "foo" -} - -resource "aws_instance" "foo" { -} -`, - }) - - p := testProvider("aws") - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Plan_noChangeDataPlan(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -data "test_data_source" "foo" {} -`, - }) - - p := new(MockProvider) - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - DataSources: map[string]*configschema.Block{ - "test_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }) - - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("data_id"), - "foo": cty.StringVal("foo"), - }), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("data.test_data_source.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"data_id", "foo":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - for _, res := range plan.Changes.Resources { - if res.Action != plans.NoOp { - t.Fatalf("expected NoOp, got: %q %s", res.Addr, res.Action) - } - } -} - -// for_each can reference a resource with 0 instances -func TestContext2Plan_scaleInForEach(t *testing.T) { - p := testProvider("test") - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - m = {} -} - -resource "test_instance" "a" { - for_each = local.m -} - -resource "test_instance" "b" { - for_each = test_instance.a -} -`}) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.a[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a0"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_instance.a")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - assertNoErrors(t, diags) - - t.Run("test_instance.a[0]", func(t *testing.T) { - instAddr := mustResourceInstanceAddr("test_instance.a[0]") - change := plan.Changes.ResourceInstance(instAddr) - if change == nil { - t.Fatalf("no planned change for %s", instAddr) - } - if got, want := change.PrevRunAddr, instAddr; !want.Equal(got) { - t.Errorf("wrong previous run address for %s %s; want %s", instAddr, got, want) - } - if got, want := change.Action, plans.Delete; got != want { - t.Errorf("wrong action for %s %s; want %s", instAddr, got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseWrongRepetition; got != want { - t.Errorf("wrong action reason for %s %s; want %s", instAddr, got, want) - } - }) - t.Run("test_instance.b", func(t *testing.T) { - instAddr := mustResourceInstanceAddr("test_instance.b") - change := plan.Changes.ResourceInstance(instAddr) - if change == nil { - t.Fatalf("no planned change for %s", instAddr) - } - if got, want := change.PrevRunAddr, instAddr; !want.Equal(got) { - t.Errorf("wrong previous run address for %s %s; want %s", instAddr, got, want) - } - if got, want := change.Action, plans.Delete; got != want { - t.Errorf("wrong action for %s %s; want %s", instAddr, got, want) - } - if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseWrongRepetition; got != want { - t.Errorf("wrong action reason for %s %s; want %s", instAddr, got, want) - } - }) -} - -func TestContext2Plan_targetedModuleInstance(t *testing.T) { - m := testModule(t, "plan-targeted") - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("mod", addrs.IntKey(0)), - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - if len(plan.Changes.Resources) != 1 { - t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) - } - - for _, res := range plan.Changes.Resources { - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } - - switch i := ric.Addr.String(); i { - case "module.mod[0].aws_instance.foo": - if res.Action != plans.Create { - t.Fatalf("resource %s should be created", i) - } - checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "num": cty.NumberIntVal(2), - "type": cty.UnknownVal(cty.String), - }), ric.After) - default: - t.Fatal("unknown instance:", i) - } - } -} - -func TestContext2Plan_dataRefreshedInPlan(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -data "test_data_source" "d" { -} -`}) - - p := testProvider("test") - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("this"), - "foo": cty.NullVal(cty.String), - }), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } - - d := plan.PriorState.ResourceInstance(mustResourceInstanceAddr("data.test_data_source.d")) - if d == nil || d.Current == nil { - t.Fatal("data.test_data_source.d not found in state:", plan.PriorState) - } - - if d.Current.Status != states.ObjectReady { - t.Fatal("expected data.test_data_source.d to be fully read in refreshed state, got status", d.Current.Status) - } -} - -func TestContext2Plan_dataReferencesResourceDirectly(t *testing.T) { - // When a data resource refers to a managed resource _directly_, any - // pending change for the managed resource will cause the data resource - // to be deferred to the apply step. - // See also TestContext2Plan_dataReferencesResourceIndirectly for the - // other case, where the reference is indirect. - - p := testProvider("test") - - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source should not be read")) - return resp - } - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - x = "value" -} - -resource "test_resource" "a" { - value = local.x -} - -// test_resource.a.value can be resolved during plan, but the reference implies -// that the data source should wait until the resource is created. -data "test_data_source" "d" { - foo = test_resource.a.value -} - -// ensure referencing an indexed instance that has not yet created will also -// delay reading the data source -resource "test_resource" "b" { - count = 2 - value = local.x -} - -data "test_data_source" "e" { - foo = test_resource.b[0].value -} -`}) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - rc := plan.Changes.ResourceInstance(addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_data_source", - Name: "d", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) - if rc != nil { - if got, want := rc.ActionReason, plans.ResourceInstanceReadBecauseDependencyPending; got != want { - t.Errorf("wrong ActionReason\ngot: %s\nwant: %s", got, want) - } - } else { - t.Error("no change for test_data_source.e") - } -} - -func TestContext2Plan_dataReferencesResourceIndirectly(t *testing.T) { - // When a data resource refers to a managed resource indirectly, pending - // changes for the managed resource _do not_ cause the data resource to - // be deferred to apply. This is a pragmatic special case added for - // backward compatibility with the old situation where we would _always_ - // eagerly read data resources with known configurations, regardless of - // the plans for their dependencies. - // This test creates an indirection through a local value, but the same - // principle would apply for both input variable and output value - // indirection. - // - // See also TestContext2Plan_dataReferencesResourceDirectly for the - // other case, where the reference is direct. - // This special exception doesn't apply for a data resource that has - // custom conditions; see - // TestContext2Plan_dataResourceChecksManagedResourceChange for that - // situation. - - p := testProvider("test") - var applyCount int64 - p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - atomic.AddInt64(&applyCount, 1) - resp.NewState = req.PlannedState - return resp - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - if atomic.LoadInt64(&applyCount) == 0 { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source read before managed resource apply")) - } else { - resp.State = req.Config - } - return resp - } - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - x = "value" -} - -resource "test_resource" "a" { - value = local.x -} - -locals { - y = test_resource.a.value -} - -// test_resource.a.value would ideally cause a pending change for -// test_resource.a to defer this to the apply step, but we intentionally don't -// do that when it's indirect (through a local value, here) as a concession -// to backward compatibility. -data "test_data_source" "d" { - foo = local.y -} -`}) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatalf("successful plan; want an error") - } - - if got, want := diags.Err().Error(), "data source read before managed resource apply"; !strings.Contains(got, want) { - t.Errorf("Missing expected error message\ngot: %s\nwant substring: %s", got, want) - } -} - -func TestContext2Plan_skipRefresh(t *testing.T) { - p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { -} -`}) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a","type":"test_instance"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{ - Mode: plans.NormalMode, - SkipRefresh: true, - }) - assertNoErrors(t, diags) - - if p.ReadResourceCalled { - t.Fatal("Resource should not have been refreshed") - } - - for _, c := range plan.Changes.Resources { - if c.Action != plans.NoOp { - t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) - } - } -} - -func TestContext2Plan_dataInModuleDependsOn(t *testing.T) { - p := testProvider("test") - - readDataSourceB := false - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - cfg := req.Config.AsValueMap() - foo := cfg["foo"].AsString() - - cfg["id"] = cty.StringVal("ID") - cfg["foo"] = cty.StringVal("new") - - if foo == "b" { - readDataSourceB = true - } - - resp.State = cty.ObjectVal(cfg) - return resp - } - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "a" { - source = "./mod_a" -} - -module "b" { - source = "./mod_b" - depends_on = [module.a] -}`, - "mod_a/main.tf": ` -data "test_data_source" "a" { - foo = "a" -}`, - "mod_b/main.tf": ` -data "test_data_source" "b" { - foo = "b" -}`, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - assertNoErrors(t, diags) - - // The change to data source a should not prevent data source b from being - // read. - if !readDataSourceB { - t.Fatal("data source b was not read during plan") - } -} - -func TestContext2Plan_rpcDiagnostics(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { -} -`, - }) - - p := testProvider("test") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - resp := testDiffFn(req) - resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("don't frobble")) - return resp - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if len(diags) == 0 { - t.Fatal("expected warnings") - } - - for _, d := range diags { - des := d.Description().Summary - if !strings.Contains(des, "frobble") { - t.Fatalf(`expected frobble, got %q`, des) - } - } -} - -// ignore_changes needs to be re-applied to the planned value for provider -// using the LegacyTypeSystem -func TestContext2Plan_legacyProviderIgnoreChanges(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { - lifecycle { - ignore_changes = [data] - } -} -`, - }) - - p := testProvider("test") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - m := req.ProposedNewState.AsValueMap() - // this provider "hashes" the data attribute as bar - m["data"] = cty.StringVal("bar") - - resp.PlannedState = cty.ObjectVal(m) - resp.LegacyTypeSystem = true - return resp - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "data": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a","data":"foo"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - for _, c := range plan.Changes.Resources { - if c.Action != plans.NoOp { - t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) - } - } -} - -func TestContext2Plan_validateIgnoreAll(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { - lifecycle { - ignore_changes = all - } -} -`, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "data": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - var diags tfdiags.Diagnostics - if req.TypeName == "test_instance" { - if !req.Config.GetAttr("id").IsNull() { - diags = diags.Append(errors.New("id cannot be set in config")) - } - } - return providers.ValidateResourceConfigResponse{ - Diagnostics: diags, - } - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a","data":"foo"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - _, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } -} - -func TestContext2Plan_legacyProviderIgnoreAll(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { - lifecycle { - ignore_changes = all - } - data = "foo" -} -`, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "data": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - plan := req.ProposedNewState.AsValueMap() - // Update both the computed id and the configured data. - // Legacy providers expect terraform to be able to ignore these. - - plan["id"] = cty.StringVal("updated") - plan["data"] = cty.StringVal("updated") - resp.PlannedState = cty.ObjectVal(plan) - resp.LegacyTypeSystem = true - return resp - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"orig","data":"orig"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - for _, c := range plan.Changes.Resources { - if c.Action != plans.NoOp { - t.Fatalf("expected NoOp plan, got %s\n", c.Action) - } - } -} - -func TestContext2Plan_dataRemovalNoProvider(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { -} -`, - }) - - p := testProvider("test") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_instance.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a","data":"foo"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - // the provider for this data source is no longer in the config, but that - // should not matter for state removal. - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("data.test_data_source.d").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"d"}`), - Dependencies: []addrs.ConfigResource{}, - }, - mustProviderConfig(`provider["registry.terraform.io/local/test"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - // We still need to be able to locate the provider to decode the - // state, since we do not know during init that this provider is - // only used for an orphaned data source. - addrs.NewProvider("registry.terraform.io", "local", "test"): testProviderFuncFixed(p), - }, - }) - _, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } -} - -func TestContext2Plan_noSensitivityChange(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "sensitive_var" { - default = "hello" - sensitive = true -} - -resource "test_resource" "foo" { - value = var.sensitive_var - sensitive_value = var.sensitive_var -}`, - }) - - p := testProvider("test") - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo", "value":"hello", "sensitive_value":"hello"}`), - AttrSensitivePaths: []cty.PathValueMarks{ - {Path: cty.Path{cty.GetAttrStep{Name: "value"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - {Path: cty.Path{cty.GetAttrStep{Name: "sensitive_value"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - for _, c := range plan.Changes.Resources { - if c.Action != plans.NoOp { - t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) - } - } -} - -func TestContext2Plan_variableCustomValidationsSensitive(t *testing.T) { - m := testModule(t, "validate-variable-custom-validations-child-sensitive") - - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), `Invalid value for variable: Value must not be "nope".`; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Plan_nullOutputNoOp(t *testing.T) { - // this should always plan a NoOp change for the output - m := testModuleInline(t, map[string]string{ - "main.tf": ` -output "planned" { - value = false ? 1 : null -} -`, - }) - - ctx := testContext2(t, &ContextOpts{}) - state := states.BuildState(func(s *states.SyncState) { - r := s.Module(addrs.RootModuleInstance) - r.SetOutputValue("planned", cty.NullVal(cty.DynamicPseudoType), false) - }) - plan, diags := ctx.Plan(m, state, DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - for _, c := range plan.Changes.Outputs { - if c.Action != plans.NoOp { - t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) - } - } -} - -func TestContext2Plan_createOutput(t *testing.T) { - // this should always plan a NoOp change for the output - m := testModuleInline(t, map[string]string{ - "main.tf": ` -output "planned" { - value = 1 -} -`, - }) - - ctx := testContext2(t, &ContextOpts{}) - plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - for _, c := range plan.Changes.Outputs { - if c.Action != plans.Create { - t.Fatalf("expected Create change, got %s for %q", c.Action, c.Addr) - } - } -} - -//////////////////////////////////////////////////////////////////////////////// -// NOTE: Due to the size of this file, new tests should be added to -// context_plan2_test.go. -//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/terraform/context_plugins.go b/internal/terraform/context_plugins.go deleted file mode 100644 index 4b3071cf6d84..000000000000 --- a/internal/terraform/context_plugins.go +++ /dev/null @@ -1,209 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" -) - -// contextPlugins represents a library of available plugins (providers and -// provisioners) which we assume will all be used with the same -// terraform.Context, and thus it'll be safe to cache certain information -// about the providers for performance reasons. -type contextPlugins struct { - providerFactories map[addrs.Provider]providers.Factory - provisionerFactories map[string]provisioners.Factory - - // We memoize the schemas we've previously loaded in here, to avoid - // repeatedly paying the cost of activating the same plugins to access - // their schemas in various different spots. We use schemas for many - // purposes in Terraform, so there isn't a single choke point where - // it makes sense to preload all of them. - providerSchemas map[addrs.Provider]*ProviderSchema - provisionerSchemas map[string]*configschema.Block - schemasLock sync.Mutex -} - -func newContextPlugins(providerFactories map[addrs.Provider]providers.Factory, provisionerFactories map[string]provisioners.Factory) *contextPlugins { - ret := &contextPlugins{ - providerFactories: providerFactories, - provisionerFactories: provisionerFactories, - } - ret.init() - return ret -} - -func (cp *contextPlugins) init() { - cp.providerSchemas = make(map[addrs.Provider]*ProviderSchema, len(cp.providerFactories)) - cp.provisionerSchemas = make(map[string]*configschema.Block, len(cp.provisionerFactories)) -} - -func (cp *contextPlugins) HasProvider(addr addrs.Provider) bool { - _, ok := cp.providerFactories[addr] - return ok -} - -func (cp *contextPlugins) NewProviderInstance(addr addrs.Provider) (providers.Interface, error) { - f, ok := cp.providerFactories[addr] - if !ok { - return nil, fmt.Errorf("unavailable provider %q", addr.String()) - } - - return f() - -} - -func (cp *contextPlugins) HasProvisioner(typ string) bool { - _, ok := cp.provisionerFactories[typ] - return ok -} - -func (cp *contextPlugins) NewProvisionerInstance(typ string) (provisioners.Interface, error) { - f, ok := cp.provisionerFactories[typ] - if !ok { - return nil, fmt.Errorf("unavailable provisioner %q", typ) - } - - return f() -} - -// ProviderSchema uses a temporary instance of the provider with the given -// address to obtain the full schema for all aspects of that provider. -// -// ProviderSchema memoizes results by unique provider address, so it's fine -// to repeatedly call this method with the same address if various different -// parts of Terraform all need the same schema information. -func (cp *contextPlugins) ProviderSchema(addr addrs.Provider) (*ProviderSchema, error) { - cp.schemasLock.Lock() - defer cp.schemasLock.Unlock() - - if schema, ok := cp.providerSchemas[addr]; ok { - return schema, nil - } - - log.Printf("[TRACE] terraform.contextPlugins: Initializing provider %q to read its schema", addr) - - provider, err := cp.NewProviderInstance(addr) - if err != nil { - return nil, fmt.Errorf("failed to instantiate provider %q to obtain schema: %s", addr, err) - } - defer provider.Close() - - resp := provider.GetProviderSchema() - if resp.Diagnostics.HasErrors() { - return nil, fmt.Errorf("failed to retrieve schema from provider %q: %s", addr, resp.Diagnostics.Err()) - } - - s := &ProviderSchema{ - Provider: resp.Provider.Block, - ResourceTypes: make(map[string]*configschema.Block), - DataSources: make(map[string]*configschema.Block), - - ResourceTypeSchemaVersions: make(map[string]uint64), - } - - if resp.Provider.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - return nil, fmt.Errorf("provider %s has invalid negative schema version for its configuration blocks,which is a bug in the provider ", addr) - } - - for t, r := range resp.ResourceTypes { - if err := r.Block.InternalValidate(); err != nil { - return nil, fmt.Errorf("provider %s has invalid schema for managed resource type %q, which is a bug in the provider: %q", addr, t, err) - } - s.ResourceTypes[t] = r.Block - s.ResourceTypeSchemaVersions[t] = uint64(r.Version) - if r.Version < 0 { - return nil, fmt.Errorf("provider %s has invalid negative schema version for managed resource type %q, which is a bug in the provider", addr, t) - } - } - - for t, d := range resp.DataSources { - if err := d.Block.InternalValidate(); err != nil { - return nil, fmt.Errorf("provider %s has invalid schema for data resource type %q, which is a bug in the provider: %q", addr, t, err) - } - s.DataSources[t] = d.Block - if d.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - return nil, fmt.Errorf("provider %s has invalid negative schema version for data resource type %q, which is a bug in the provider", addr, t) - } - } - - if resp.ProviderMeta.Block != nil { - s.ProviderMeta = resp.ProviderMeta.Block - } - - cp.providerSchemas[addr] = s - return s, nil -} - -// ProviderConfigSchema is a helper wrapper around ProviderSchema which first -// reads the full schema of the given provider and then extracts just the -// provider's configuration schema, which defines what's expected in a -// "provider" block in the configuration when configuring this provider. -func (cp *contextPlugins) ProviderConfigSchema(providerAddr addrs.Provider) (*configschema.Block, error) { - providerSchema, err := cp.ProviderSchema(providerAddr) - if err != nil { - return nil, err - } - - return providerSchema.Provider, nil -} - -// ResourceTypeSchema is a helper wrapper around ProviderSchema which first -// reads the schema of the given provider and then tries to find the schema -// for the resource type of the given resource mode in that provider. -// -// ResourceTypeSchema will return an error if the provider schema lookup -// fails, but will return nil if the provider schema lookup succeeds but then -// the provider doesn't have a resource of the requested type. -// -// Managed resource types have versioned schemas, so the second return value -// is the current schema version number for the requested resource. The version -// is irrelevant for other resource modes. -func (cp *contextPlugins) ResourceTypeSchema(providerAddr addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (*configschema.Block, uint64, error) { - providerSchema, err := cp.ProviderSchema(providerAddr) - if err != nil { - return nil, 0, err - } - - schema, version := providerSchema.SchemaForResourceType(resourceMode, resourceType) - return schema, version, nil -} - -// ProvisionerSchema uses a temporary instance of the provisioner with the -// given type name to obtain the schema for that provisioner's configuration. -// -// ProvisionerSchema memoizes results by provisioner type name, so it's fine -// to repeatedly call this method with the same name if various different -// parts of Terraform all need the same schema information. -func (cp *contextPlugins) ProvisionerSchema(typ string) (*configschema.Block, error) { - cp.schemasLock.Lock() - defer cp.schemasLock.Unlock() - - if schema, ok := cp.provisionerSchemas[typ]; ok { - return schema, nil - } - - log.Printf("[TRACE] terraform.contextPlugins: Initializing provisioner %q to read its schema", typ) - provisioner, err := cp.NewProvisionerInstance(typ) - if err != nil { - return nil, fmt.Errorf("failed to instantiate provisioner %q to obtain schema: %s", typ, err) - } - defer provisioner.Close() - - resp := provisioner.GetSchema() - if resp.Diagnostics.HasErrors() { - return nil, fmt.Errorf("failed to retrieve schema from provisioner %q: %s", typ, resp.Diagnostics.Err()) - } - - cp.provisionerSchemas[typ] = resp.Provisioner - return resp.Provisioner, nil -} diff --git a/internal/terraform/context_plugins_test.go b/internal/terraform/context_plugins_test.go deleted file mode 100644 index 26813ceb0dfa..000000000000 --- a/internal/terraform/context_plugins_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package terraform - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" -) - -// simpleMockPluginLibrary returns a plugin library pre-configured with -// one provider and one provisioner, both called "test". -// -// The provider is built with simpleMockProvider and the provisioner with -// simpleMockProvisioner, and all schemas used in both are as built by -// function simpleTestSchema. -// -// Each call to this function produces an entirely-separate set of objects, -// so the caller can feel free to modify the returned value to further -// customize the mocks contained within. -func simpleMockPluginLibrary() *contextPlugins { - // We create these out here, rather than in the factory functions below, - // because we want each call to the factory to return the _same_ instance, - // so that test code can customize it before passing this component - // factory into real code under test. - provider := simpleMockProvider() - provisioner := simpleMockProvisioner() - ret := &contextPlugins{ - providerFactories: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): func() (providers.Interface, error) { - return provider, nil - }, - }, - provisionerFactories: map[string]provisioners.Factory{ - "test": func() (provisioners.Interface, error) { - return provisioner, nil - }, - }, - } - ret.init() // prepare the internal cache data structures - return ret -} - -// simpleTestSchema returns a block schema that contains a few optional -// attributes for use in tests. -// -// The returned schema contains the following optional attributes: -// -// - test_string, of type string -// - test_number, of type number -// - test_bool, of type bool -// - test_list, of type list(string) -// - test_map, of type map(string) -// -// Each call to this function produces an entirely new schema instance, so -// callers can feel free to modify it once returned. -func simpleTestSchema() *configschema.Block { - return &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "test_string": { - Type: cty.String, - Optional: true, - }, - "test_number": { - Type: cty.Number, - Optional: true, - }, - "test_bool": { - Type: cty.Bool, - Optional: true, - }, - "test_list": { - Type: cty.List(cty.String), - Optional: true, - }, - "test_map": { - Type: cty.Map(cty.String), - Optional: true, - }, - }, - } -} diff --git a/internal/terraform/context_refresh.go b/internal/terraform/context_refresh.go deleted file mode 100644 index cac5232b0d0f..000000000000 --- a/internal/terraform/context_refresh.go +++ /dev/null @@ -1,37 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Refresh is a vestigial operation that is equivalent to call to Plan and -// then taking the prior state of the resulting plan. -// -// We retain this only as a measure of semi-backward-compatibility for -// automation relying on the "terraform refresh" subcommand. The modern way -// to get this effect is to create and then apply a plan in the refresh-only -// mode. -func (c *Context) Refresh(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*states.State, tfdiags.Diagnostics) { - if opts == nil { - // This fallback is only here for tests, not for real code. - opts = &PlanOpts{ - Mode: plans.NormalMode, - } - } - if opts.Mode != plans.NormalMode { - panic("can only Refresh in the normal planning mode") - } - - log.Printf("[DEBUG] Refresh is really just plan now, so creating a %s plan", opts.Mode) - p, diags := c.Plan(config, prevRunState, opts) - if diags.HasErrors() { - return nil, diags - } - - return p.PriorState, diags -} diff --git a/internal/terraform/context_refresh_test.go b/internal/terraform/context_refresh_test.go deleted file mode 100644 index 9b4f4dab5566..000000000000 --- a/internal/terraform/context_refresh_test.go +++ /dev/null @@ -1,1685 +0,0 @@ -package terraform - -import ( - "reflect" - "sort" - "strings" - "sync" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" -) - -func TestContext2Refresh(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-basic") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.web").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo","foo":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - readState, err := hcl2shim.HCL2ValueFromFlatmap(map[string]string{"id": "foo", "foo": "baz"}, ty) - if err != nil { - t.Fatal(err) - } - - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: readState, - } - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if !p.ReadResourceCalled { - t.Fatal("ReadResource should be called") - } - - mod := s.RootModule() - fromState, err := mod.Resources["aws_instance.web"].Instances[addrs.NoKey].Current.Decode(ty) - if err != nil { - t.Fatal(err) - } - - newState, err := schema.CoerceValue(fromState.Value) - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(readState, newState, valueComparer) { - t.Fatal(cmp.Diff(readState, newState, valueComparer, equateEmpty)) - } -} - -func TestContext2Refresh_dynamicAttr(t *testing.T) { - m := testModule(t, "refresh-dynamic") - - startingState := states.BuildState(func(ss *states.SyncState) { - ss.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"dynamic":{"type":"string","value":"hello"}}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - - readStateVal := cty.ObjectVal(map[string]cty.Value{ - "dynamic": cty.EmptyTupleVal, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "dynamic": {Type: cty.DynamicPseudoType, Optional: true}, - }, - }, - }, - }) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{ - NewState: readStateVal, - } - } - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - return resp - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - schema := p.GetProviderSchemaResponse.ResourceTypes["test_instance"].Block - ty := schema.ImpliedType() - - s, diags := ctx.Refresh(m, startingState, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if !p.ReadResourceCalled { - t.Fatal("ReadResource should be called") - } - - mod := s.RootModule() - newState, err := mod.Resources["test_instance.foo"].Instances[addrs.NoKey].Current.Decode(ty) - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(readStateVal, newState.Value, valueComparer) { - t.Error(cmp.Diff(newState.Value, readStateVal, valueComparer, equateEmpty)) - } -} - -func TestContext2Refresh_dataComputedModuleVar(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-data-module-var") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - obj := req.ProposedNewState.AsValueMap() - obj["id"] = cty.UnknownVal(cty.String) - resp.PlannedState = cty.ObjectVal(obj) - return resp - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - resp.State = req.Config - return resp - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "aws_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - "output": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{Mode: plans.RefreshOnlyMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - checkStateString(t, plan.PriorState, ` - -`) -} - -func TestContext2Refresh_targeted(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_elb": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "instances": { - Type: cty.Set(cty.String), - Optional: true, - }, - }, - }, - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "vpc_id": { - Type: cty.String, - Optional: true, - }, - }, - }, - "aws_vpc": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_vpc.metoo", `{"id":"vpc-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.notme", `{"id":"i-bcd345"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.me", `{"id":"i-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_elb.meneither", `{"id":"lb-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - m := testModule(t, "refresh-targeted") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - refreshedResources := make([]string, 0, 2) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - refreshedResources = append(refreshedResources, req.PriorState.GetAttr("id").AsString()) - return providers.ReadResourceResponse{ - NewState: req.PriorState, - } - } - - _, diags := ctx.Refresh(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "me", - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - expected := []string{"vpc-abc123", "i-abc123"} - if !reflect.DeepEqual(refreshedResources, expected) { - t.Fatalf("expected: %#v, got: %#v", expected, refreshedResources) - } -} - -func TestContext2Refresh_targetedCount(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_elb": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "instances": { - Type: cty.Set(cty.String), - Optional: true, - }, - }, - }, - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "vpc_id": { - Type: cty.String, - Optional: true, - }, - }, - }, - "aws_vpc": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_vpc.metoo", `{"id":"vpc-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.notme", `{"id":"i-bcd345"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.me[0]", `{"id":"i-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.me[1]", `{"id":"i-cde567"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.me[2]", `{"id":"i-cde789"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_elb.meneither", `{"id":"lb-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - m := testModule(t, "refresh-targeted-count") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - refreshedResources := make([]string, 0, 2) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - refreshedResources = append(refreshedResources, req.PriorState.GetAttr("id").AsString()) - return providers.ReadResourceResponse{ - NewState: req.PriorState, - } - } - - _, diags := ctx.Refresh(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "me", - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - // Target didn't specify index, so we should get all our instances - expected := []string{ - "vpc-abc123", - "i-abc123", - "i-cde567", - "i-cde789", - } - sort.Strings(expected) - sort.Strings(refreshedResources) - if !reflect.DeepEqual(refreshedResources, expected) { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", refreshedResources, expected) - } -} - -func TestContext2Refresh_targetedCountIndex(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_elb": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "instances": { - Type: cty.Set(cty.String), - Optional: true, - }, - }, - }, - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "vpc_id": { - Type: cty.String, - Optional: true, - }, - }, - }, - "aws_vpc": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_vpc.metoo", `{"id":"vpc-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.notme", `{"id":"i-bcd345"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.me[0]", `{"id":"i-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.me[1]", `{"id":"i-cde567"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.me[2]", `{"id":"i-cde789"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_elb.meneither", `{"id":"lb-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - m := testModule(t, "refresh-targeted-count") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - refreshedResources := make([]string, 0, 2) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - refreshedResources = append(refreshedResources, req.PriorState.GetAttr("id").AsString()) - return providers.ReadResourceResponse{ - NewState: req.PriorState, - } - } - - _, diags := ctx.Refresh(m, state, &PlanOpts{ - Mode: plans.NormalMode, - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.ResourceInstance( - addrs.ManagedResourceMode, "aws_instance", "me", addrs.IntKey(0), - ), - }, - }) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - expected := []string{"vpc-abc123", "i-abc123"} - if !reflect.DeepEqual(refreshedResources, expected) { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", refreshedResources, expected) - } -} - -func TestContext2Refresh_moduleComputedVar(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "value": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }) - - m := testModule(t, "refresh-module-computed-var") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - // This was failing (see GH-2188) at some point, so this test just - // verifies that the failure goes away. - if _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { - t.Fatalf("refresh errs: %s", diags.Err()) - } -} - -func TestContext2Refresh_delete(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-basic") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block.ImpliedType()), - } - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - mod := s.RootModule() - if len(mod.Resources) > 0 { - t.Fatal("resources should be empty") - } -} - -func TestContext2Refresh_ignoreUncreated(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-basic") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - } - - _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - if p.ReadResourceCalled { - t.Fatal("refresh should not be called") - } -} - -func TestContext2Refresh_hook(t *testing.T) { - h := new(MockHook) - p := testProvider("aws") - m := testModule(t, "refresh-basic") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Hooks: []Hook{h}, - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - if _, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { - t.Fatalf("refresh errs: %s", diags.Err()) - } - if !h.PreRefreshCalled { - t.Fatal("should be called") - } - if !h.PostRefreshCalled { - t.Fatal("should be called") - } -} - -func TestContext2Refresh_modules(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-modules") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceTainted(root, "aws_instance.web", `{"id":"bar"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - testSetResourceInstanceCurrent(child, "aws_instance.web", `{"id":"baz"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - if !req.PriorState.GetAttr("id").RawEquals(cty.StringVal("baz")) { - return providers.ReadResourceResponse{ - NewState: req.PriorState, - } - } - - new, _ := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { - if len(path) == 1 && path[0].(cty.GetAttrStep).Name == "id" { - return cty.StringVal("new"), nil - } - return v, nil - }) - return providers.ReadResourceResponse{ - NewState: new, - } - } - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testContextRefreshModuleStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Refresh_moduleInputComputedOutput(t *testing.T) { - m := testModule(t, "refresh-module-input-computed-output") - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "compute": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - if _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { - t.Fatalf("refresh errs: %s", diags.Err()) - } -} - -func TestContext2Refresh_moduleVarModule(t *testing.T) { - m := testModule(t, "refresh-module-var-module") - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - if _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { - t.Fatalf("refresh errs: %s", diags.Err()) - } -} - -// GH-70 -func TestContext2Refresh_noState(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-no-state") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - } - - if _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { - t.Fatalf("refresh errs: %s", diags.Err()) - } -} - -func TestContext2Refresh_output(t *testing.T) { - p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "foo": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }) - - m := testModule(t, "refresh-output") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo","foo":"bar"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - root.SetOutputValue("foo", cty.StringVal("foo"), false) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testContextRefreshOutputStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%q\n\nwant:\n%q", actual, expected) - } -} - -func TestContext2Refresh_outputPartial(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-output-partial") - - // Refresh creates a partial plan for any instances that don't have - // remote objects yet, to get stub values for interpolation. Therefore - // we need to make DiffFn available to let that complete. - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) - - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block.ImpliedType()), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.foo", `{}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testContextRefreshOutputPartialStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Refresh_stateBasic(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-basic") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"bar"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block - ty := schema.ImpliedType() - - readStateVal, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - })) - if err != nil { - t.Fatal(err) - } - - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: readStateVal, - } - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - if !p.ReadResourceCalled { - t.Fatal("read resource should be called") - } - - mod := s.RootModule() - newState, err := mod.Resources["aws_instance.web"].Instances[addrs.NoKey].Current.Decode(ty) - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(readStateVal, newState.Value, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(readStateVal, newState.Value, valueComparer, equateEmpty)) - } -} - -func TestContext2Refresh_dataCount(t *testing.T) { - p := testProvider("test") - m := testModule(t, "refresh-data-count") - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - m := req.ProposedNewState.AsValueMap() - m["things"] = cty.ListVal([]cty.Value{cty.StringVal("foo")}) - resp.PlannedState = cty.ObjectVal(m) - return resp - } - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "things": {Type: cty.List(cty.String), Computed: true}, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "test": {}, - }, - }) - - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - return providers.ReadDataSourceResponse{ - State: req.Config, - } - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - s, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}) - - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - checkStateString(t, s, ``) -} - -func TestContext2Refresh_dataState(t *testing.T) { - m := testModule(t, "refresh-data-resource-basic") - state := states.NewState() - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "inputs": { - Type: cty.Map(cty.String), - Optional: true, - }, - }, - } - - p := testProvider("null") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - DataSources: map[string]*configschema.Block{ - "null_data_source": schema, - }, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - var readStateVal cty.Value - - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - m := req.Config.AsValueMap() - readStateVal = cty.ObjectVal(m) - - return providers.ReadDataSourceResponse{ - State: readStateVal, - } - } - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - if !p.ReadDataSourceCalled { - t.Fatal("ReadDataSource should have been called") - } - - mod := s.RootModule() - - newState, err := mod.Resources["data.null_data_source.testing"].Instances[addrs.NoKey].Current.Decode(schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(readStateVal, newState.Value, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(readStateVal, newState.Value, valueComparer, equateEmpty)) - } -} - -func TestContext2Refresh_dataStateRefData(t *testing.T) { - p := testProvider("null") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - DataSources: map[string]*configschema.Block{ - "null_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "foo": { - Type: cty.String, - Optional: true, - }, - "bar": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }) - - m := testModule(t, "refresh-data-ref-data") - state := states.NewState() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), - }, - }) - - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - // add the required id - m := req.Config.AsValueMap() - m["id"] = cty.StringVal("foo") - - return providers.ReadDataSourceResponse{ - State: cty.ObjectVal(m), - } - } - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testTerraformRefreshDataRefDataStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestContext2Refresh_tainted(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-basic") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceTainted(root, "aws_instance.web", `{"id":"bar"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - // add the required id - m := req.PriorState.AsValueMap() - m["id"] = cty.StringVal("foo") - - return providers.ReadResourceResponse{ - NewState: cty.ObjectVal(m), - } - } - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - if !p.ReadResourceCalled { - t.Fatal("ReadResource was not called; should have been") - } - - actual := strings.TrimSpace(s.String()) - expected := strings.TrimSpace(testContextRefreshTaintedStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -// Doing a Refresh (or any operation really, but Refresh usually -// happens first) with a config with an unknown provider should result in -// an error. The key bug this found was that this wasn't happening if -// Providers was _empty_. -func TestContext2Refresh_unknownProvider(t *testing.T) { - m := testModule(t, "refresh-unknown-provider") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - c, diags := NewContext(&ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{}, - }) - assertNoDiagnostics(t, diags) - - _, diags = c.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}) - if !diags.HasErrors() { - t.Fatal("successfully refreshed; want error") - } - - if got, want := diags.Err().Error(), "Missing required provider"; !strings.Contains(got, want) { - t.Errorf("missing expected error\nwant substring: %s\ngot:\n%s", want, got) - } -} - -func TestContext2Refresh_vars(t *testing.T) { - p := testProvider("aws") - - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "ami": { - Type: cty.String, - Optional: true, - }, - "id": { - Type: cty.String, - Computed: true, - }, - }, - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{"aws_instance": schema}, - }) - - m := testModule(t, "refresh-vars") - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - readStateVal, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - })) - if err != nil { - t.Fatal(err) - } - - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: readStateVal, - } - - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - return providers.PlanResourceChangeResponse{ - PlannedState: req.ProposedNewState, - } - } - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - if !p.ReadResourceCalled { - t.Fatal("read resource should be called") - } - - mod := s.RootModule() - - newState, err := mod.Resources["aws_instance.web"].Instances[addrs.NoKey].Current.Decode(schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(readStateVal, newState.Value, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(readStateVal, newState.Value, valueComparer, equateEmpty)) - } - - for _, r := range mod.Resources { - if r.Addr.Resource.Type == "" { - t.Fatalf("no type: %#v", r) - } - } -} - -func TestContext2Refresh_orphanModule(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "refresh-module-orphan") - - // Create a custom refresh function to track the order they were visited - var order []string - var orderLock sync.Mutex - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - orderLock.Lock() - defer orderLock.Unlock() - - order = append(order, req.PriorState.GetAttr("id").AsString()) - return providers.ReadResourceResponse{ - NewState: req.PriorState, - } - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - Dependencies: []addrs.ConfigResource{ - {Module: addrs.Module{"module.child"}}, - {Module: addrs.Module{"module.child"}}, - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.bar").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-bcd23"}`), - Dependencies: []addrs.ConfigResource{{Module: addrs.Module{"module.grandchild"}}}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - grandchild := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey).Child("grandchild", addrs.NoKey)) - testSetResourceInstanceCurrent(grandchild, "aws_instance.baz", `{"id":"i-cde345"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - testCheckDeadlock(t, func() { - _, err := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if err != nil { - t.Fatalf("err: %s", err.Err()) - } - - // TODO: handle order properly for orphaned modules / resources - // expected := []string{"i-abc123", "i-bcd234", "i-cde345"} - // if !reflect.DeepEqual(order, expected) { - // t.Fatalf("expected: %#v, got: %#v", expected, order) - // } - }) -} - -func TestContext2Validate(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - "num": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }) - - m := testModule(t, "validate-good") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if len(diags) != 0 { - t.Fatalf("unexpected error: %#v", diags.ErrWithWarnings()) - } -} - -func TestContext2Refresh_updateProviderInState(t *testing.T) { - m := testModule(t, "update-resource-provider") - p := testProvider("aws") - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.bar", `{"id":"foo"}`, `provider["registry.terraform.io/hashicorp/aws"].baz`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - expected := strings.TrimSpace(` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"].foo`) - - s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - actual := s.String() - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestContext2Refresh_schemaUpgradeFlatmap(t *testing.T) { - m := testModule(t, "refresh-schema-upgrade") - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "name": { // imagining we renamed this from "id" - Type: cty.String, - Optional: true, - }, - }, - }, - }, - ResourceTypeSchemaVersions: map[string]uint64{ - "test_thing": 5, - }, - }) - p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ - UpgradedState: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("foo"), - }), - } - - s := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 3, - AttrsFlat: map[string]string{ - "id": "foo", - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Refresh(m, s, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - { - got := p.UpgradeResourceStateRequest - want := providers.UpgradeResourceStateRequest{ - TypeName: "test_thing", - Version: 3, - RawStateFlatmap: map[string]string{ - "id": "foo", - }, - } - if !cmp.Equal(got, want) { - t.Errorf("wrong upgrade request\n%s", cmp.Diff(want, got)) - } - } - - { - got := state.String() - want := strings.TrimSpace(` -test_thing.bar: - ID = - provider = provider["registry.terraform.io/hashicorp/test"] - name = foo -`) - if got != want { - t.Fatalf("wrong result state\ngot:\n%s\n\nwant:\n%s", got, want) - } - } -} - -func TestContext2Refresh_schemaUpgradeJSON(t *testing.T) { - m := testModule(t, "refresh-schema-upgrade") - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "name": { // imagining we renamed this from "id" - Type: cty.String, - Optional: true, - }, - }, - }, - }, - ResourceTypeSchemaVersions: map[string]uint64{ - "test_thing": 5, - }, - }) - p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ - UpgradedState: cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("foo"), - }), - } - - s := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - SchemaVersion: 3, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Refresh(m, s, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - { - got := p.UpgradeResourceStateRequest - want := providers.UpgradeResourceStateRequest{ - TypeName: "test_thing", - Version: 3, - RawStateJSON: []byte(`{"id":"foo"}`), - } - if !cmp.Equal(got, want) { - t.Errorf("wrong upgrade request\n%s", cmp.Diff(want, got)) - } - } - - { - got := state.String() - want := strings.TrimSpace(` -test_thing.bar: - ID = - provider = provider["registry.terraform.io/hashicorp/test"] - name = foo -`) - if got != want { - t.Fatalf("wrong result state\ngot:\n%s\n\nwant:\n%s", got, want) - } - } -} - -func TestContext2Refresh_dataValidation(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -data "aws_data_source" "foo" { - foo = "bar" -} -`, - }) - - p := testProvider("aws") - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - resp.PlannedState = req.ProposedNewState - return - } - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - resp.State = req.Config - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - // Should get this error: - // Unsupported attribute: This object does not have an attribute named "missing" - t.Fatal(diags.Err()) - } - - if !p.ValidateDataResourceConfigCalled { - t.Fatal("ValidateDataSourceConfig not called during plan") - } -} - -func TestContext2Refresh_dataResourceDependsOn(t *testing.T) { - m := testModule(t, "plan-data-depends-on") - p := testProvider("test") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - "test_data": { - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Computed: true}, - }, - }, - }, - }) - p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ - State: cty.ObjectVal(map[string]cty.Value{ - "compute": cty.StringVal("value"), - }), - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "test_resource.a", `{"id":"a"}`, `provider["registry.terraform.io/hashicorp/test"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } -} - -// verify that create_before_destroy is updated in the state during refresh -func TestRefresh_updateLifecycle(t *testing.T) { - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "bar", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "aws_instance" "bar" { - lifecycle { - create_before_destroy = true - } -} -`, - }) - - p := testProvider("aws") - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - state, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatalf("plan errors: %s", diags.Err()) - } - - r := state.ResourceInstance(mustResourceInstanceAddr("aws_instance.bar")) - if !r.Current.CreateBeforeDestroy { - t.Fatal("create_before_destroy not updated in instance state") - } -} - -func TestContext2Refresh_dataSourceOrphan(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ``, - }) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_data_source", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - Dependencies: []addrs.ConfigResource{}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - p := testProvider("test") - p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - resp.State = cty.NullVal(req.Config.Type()) - return - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - _, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if p.ReadResourceCalled { - t.Fatal("there are no managed resources to read") - } - - if p.ReadDataSourceCalled { - t.Fatal("orphaned data source instance should not be read") - } -} - -// Legacy providers may return invalid null values for blocks, causing noise in -// the diff output and unexpected behavior with ignore_changes. Make sure -// refresh fixes these up before storing the state. -func TestContext2Refresh_reifyNullBlock(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_resource" "foo" { -} -`, - }) - - p := new(MockProvider) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - // incorrectly return a null _set_block value - v := req.PriorState.AsValueMap() - v["set_block"] = cty.NullVal(v["set_block"].Type()) - return providers.ReadResourceResponse{NewState: cty.ObjectVal(v)} - } - - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "set_block": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "a": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - }, - }, - }, - }) - p.PlanResourceChangeFn = testDiffFn - - fooAddr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.NoKey) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - fooAddr, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo", "network_interface":[]}`), - Dependencies: []addrs.ConfigResource{}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - plan, diags := ctx.Plan(m, state, &PlanOpts{Mode: plans.RefreshOnlyMode}) - if diags.HasErrors() { - t.Fatalf("refresh errors: %s", diags.Err()) - } - - jsonState := plan.PriorState.ResourceInstance(fooAddr.Absolute(addrs.RootModuleInstance)).Current.AttrsJSON - - // the set_block should still be an empty container, and not null - expected := `{"id":"foo","set_block":[]}` - if string(jsonState) != expected { - t.Fatalf("invalid state\nexpected: %s\ngot: %s\n", expected, jsonState) - } -} diff --git a/internal/terraform/context_test.go b/internal/terraform/context_test.go deleted file mode 100644 index 12c376622b1c..000000000000 --- a/internal/terraform/context_test.go +++ /dev/null @@ -1,1005 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "fmt" - "path/filepath" - "sort" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/planfile" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - "github.com/zclconf/go-cty/cty" -) - -var ( - equateEmpty = cmpopts.EquateEmpty() - typeComparer = cmp.Comparer(cty.Type.Equals) - valueComparer = cmp.Comparer(cty.Value.RawEquals) - valueTrans = cmp.Transformer("hcl2shim", hcl2shim.ConfigValueFromHCL2) -) - -func TestNewContextRequiredVersion(t *testing.T) { - cases := []struct { - Name string - Module string - Version string - Value string - Err bool - }{ - { - "no requirement", - "", - "0.1.0", - "", - false, - }, - - { - "doesn't match", - "", - "0.1.0", - "> 0.6.0", - true, - }, - - { - "matches", - "", - "0.7.0", - "> 0.6.0", - false, - }, - - { - "prerelease doesn't match with inequality", - "", - "0.8.0", - "> 0.7.0-beta", - true, - }, - - { - "prerelease doesn't match with equality", - "", - "0.7.0", - "0.7.0-beta", - true, - }, - - { - "module matches", - "context-required-version-module", - "0.5.0", - "", - false, - }, - - { - "module doesn't match", - "context-required-version-module", - "0.4.0", - "", - true, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - // Reset the version for the tests - old := tfversion.SemVer - tfversion.SemVer = version.Must(version.NewVersion(tc.Version)) - defer func() { tfversion.SemVer = old }() - - name := "context-required-version" - if tc.Module != "" { - name = tc.Module - } - mod := testModule(t, name) - if tc.Value != "" { - constraint, err := version.NewConstraint(tc.Value) - if err != nil { - t.Fatalf("can't parse %q as version constraint", tc.Value) - } - mod.Module.CoreVersionConstraints = append(mod.Module.CoreVersionConstraints, configs.VersionConstraint{ - Required: constraint, - }) - } - c, diags := NewContext(&ContextOpts{}) - if diags.HasErrors() { - t.Fatalf("unexpected NewContext errors: %s", diags.Err()) - } - - diags = c.Validate(mod) - if diags.HasErrors() != tc.Err { - t.Fatalf("err: %s", diags.Err()) - } - }) - } -} - -func TestContext_missingPlugins(t *testing.T) { - ctx, diags := NewContext(&ContextOpts{}) - assertNoDiagnostics(t, diags) - - configSrc := ` -terraform { - required_providers { - explicit = { - source = "example.com/foo/beep" - } - builtin = { - source = "terraform.io/builtin/nonexist" - } - } -} - -resource "implicit_thing" "a" { - provisioner "nonexist" { - } -} - -resource "implicit_thing" "b" { - provider = implicit2 -} -` - - cfg := testModuleInline(t, map[string]string{ - "main.tf": configSrc, - }) - - // Validate and Plan are the two entry points where we explicitly verify - // the available plugins match what the configuration needs. For other - // operations we typically fail more deeply in Terraform Core, with - // potentially-less-helpful error messages, because getting there would - // require doing some pretty weird things that aren't common enough to - // be worth the complexity to check for them. - - validateDiags := ctx.Validate(cfg) - _, planDiags := ctx.Plan(cfg, nil, DefaultPlanOpts) - - tests := map[string]tfdiags.Diagnostics{ - "validate": validateDiags, - "plan": planDiags, - } - - for testName, gotDiags := range tests { - t.Run(testName, func(t *testing.T) { - var wantDiags tfdiags.Diagnostics - wantDiags = wantDiags.Append( - tfdiags.Sourceless( - tfdiags.Error, - "Missing required provider", - "This configuration requires built-in provider terraform.io/builtin/nonexist, but that provider isn't available in this Terraform version.", - ), - tfdiags.Sourceless( - tfdiags.Error, - "Missing required provider", - "This configuration requires provider example.com/foo/beep, but that provider isn't available. You may be able to install it automatically by running:\n terraform init", - ), - tfdiags.Sourceless( - tfdiags.Error, - "Missing required provider", - "This configuration requires provider registry.terraform.io/hashicorp/implicit, but that provider isn't available. You may be able to install it automatically by running:\n terraform init", - ), - tfdiags.Sourceless( - tfdiags.Error, - "Missing required provider", - "This configuration requires provider registry.terraform.io/hashicorp/implicit2, but that provider isn't available. You may be able to install it automatically by running:\n terraform init", - ), - tfdiags.Sourceless( - tfdiags.Error, - "Missing required provisioner plugin", - `This configuration requires provisioner plugin "nonexist", which isn't available. If you're intending to use an external provisioner plugin, you must install it manually into one of the plugin search directories before running Terraform.`, - ), - ) - assertDiagnosticsMatch(t, gotDiags, wantDiags) - }) - } -} - -func testContext2(t *testing.T, opts *ContextOpts) *Context { - t.Helper() - - ctx, diags := NewContext(opts) - if diags.HasErrors() { - t.Fatalf("failed to create test context\n\n%s\n", diags.Err()) - } - - return ctx -} - -func testApplyFn(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - resp.NewState = req.PlannedState - if req.PlannedState.IsNull() { - resp.NewState = cty.NullVal(req.PriorState.Type()) - return - } - - planned := req.PlannedState.AsValueMap() - if planned == nil { - planned = map[string]cty.Value{} - } - - id, ok := planned["id"] - if !ok || id.IsNull() || !id.IsKnown() { - planned["id"] = cty.StringVal("foo") - } - - // our default schema has a computed "type" attr - if ty, ok := planned["type"]; ok && !ty.IsNull() { - planned["type"] = cty.StringVal(req.TypeName) - } - - if cmp, ok := planned["compute"]; ok && !cmp.IsNull() { - computed := cmp.AsString() - if val, ok := planned[computed]; ok && !val.IsKnown() { - planned[computed] = cty.StringVal("computed_value") - } - } - - for k, v := range planned { - if k == "unknown" { - // "unknown" should cause an error - continue - } - - if !v.IsKnown() { - switch k { - case "type": - planned[k] = cty.StringVal(req.TypeName) - default: - planned[k] = cty.NullVal(v.Type()) - } - } - } - - resp.NewState = cty.ObjectVal(planned) - return -} - -func testDiffFn(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - var planned map[string]cty.Value - - // this is a destroy plan - if req.ProposedNewState.IsNull() { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp - } - - if !req.ProposedNewState.IsNull() { - planned = req.ProposedNewState.AsValueMap() - } - if planned == nil { - planned = map[string]cty.Value{} - } - - // id is always computed for the tests - if id, ok := planned["id"]; ok && id.IsNull() { - planned["id"] = cty.UnknownVal(cty.String) - } - - // the old tests have require_new replace on every plan - if _, ok := planned["require_new"]; ok { - resp.RequiresReplace = append(resp.RequiresReplace, cty.Path{cty.GetAttrStep{Name: "require_new"}}) - } - - for k := range planned { - requiresNewKey := "__" + k + "_requires_new" - _, ok := planned[requiresNewKey] - if ok { - resp.RequiresReplace = append(resp.RequiresReplace, cty.Path{cty.GetAttrStep{Name: requiresNewKey}}) - } - } - - if v, ok := planned["compute"]; ok && !v.IsNull() { - k := v.AsString() - unknown := cty.UnknownVal(cty.String) - if strings.HasSuffix(k, ".#") { - k = k[:len(k)-2] - unknown = cty.UnknownVal(cty.List(cty.String)) - } - planned[k] = unknown - } - - if t, ok := planned["type"]; ok && t.IsNull() { - planned["type"] = cty.UnknownVal(cty.String) - } - - resp.PlannedState = cty.ObjectVal(planned) - return -} - -func testProvider(prefix string) *MockProvider { - p := new(MockProvider) - p.GetProviderSchemaResponse = testProviderSchema(prefix) - - return p -} - -func testProvisioner() *MockProvisioner { - p := new(MockProvisioner) - p.GetSchemaResponse = provisioners.GetSchemaResponse{ - Provisioner: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "command": { - Type: cty.String, - Optional: true, - }, - "order": { - Type: cty.String, - Optional: true, - }, - "when": { - Type: cty.String, - Optional: true, - }, - }, - }, - } - return p -} - -func checkStateString(t *testing.T, state *states.State, expected string) { - t.Helper() - actual := strings.TrimSpace(state.String()) - expected = strings.TrimSpace(expected) - - if actual != expected { - t.Fatalf("incorrect state\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -// Test helper that gives a function 3 seconds to finish, assumes deadlock and -// fails test if it does not. -func testCheckDeadlock(t *testing.T, f func()) { - t.Helper() - timeout := make(chan bool, 1) - done := make(chan bool, 1) - go func() { - time.Sleep(3 * time.Second) - timeout <- true - }() - go func(f func(), done chan bool) { - defer func() { done <- true }() - f() - }(f, done) - select { - case <-timeout: - t.Fatalf("timed out! probably deadlock") - case <-done: - // ok - } -} - -func testProviderSchema(name string) *providers.GetProviderSchemaResponse { - return getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": { - Type: cty.String, - Optional: true, - }, - "foo": { - Type: cty.String, - Optional: true, - }, - "value": { - Type: cty.String, - Optional: true, - }, - "root": { - Type: cty.Number, - Optional: true, - }, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - name + "_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "ami": { - Type: cty.String, - Optional: true, - }, - "dep": { - Type: cty.String, - Optional: true, - }, - "num": { - Type: cty.Number, - Optional: true, - }, - "require_new": { - Type: cty.String, - Optional: true, - }, - "var": { - Type: cty.String, - Optional: true, - }, - "foo": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "bar": { - Type: cty.String, - Optional: true, - }, - "compute": { - Type: cty.String, - Optional: true, - Computed: false, - }, - "compute_value": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "value": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "output": { - Type: cty.String, - Optional: true, - }, - "write": { - Type: cty.String, - Optional: true, - }, - "instance": { - Type: cty.String, - Optional: true, - }, - "vpc_id": { - Type: cty.String, - Optional: true, - }, - "type": { - Type: cty.String, - Computed: true, - }, - - // Generated by testDiffFn if compute = "unknown" is set in the test config - "unknown": { - Type: cty.String, - Computed: true, - }, - }, - }, - name + "_eip": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "instance": { - Type: cty.String, - Optional: true, - }, - }, - }, - name + "_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "value": { - Type: cty.String, - Optional: true, - }, - "sensitive_value": { - Type: cty.String, - Sensitive: true, - Optional: true, - }, - "random": { - Type: cty.String, - Optional: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nesting_single": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - "sensitive_value": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - Nesting: configschema.NestingSingle, - }, - }, - }, - name + "_ami_list": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "ids": { - Type: cty.List(cty.String), - Optional: true, - Computed: true, - }, - }, - }, - name + "_remote_state": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - "foo": { - Type: cty.String, - Optional: true, - }, - "output": { - Type: cty.Map(cty.String), - Computed: true, - }, - }, - }, - name + "_file": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - "template": { - Type: cty.String, - Optional: true, - }, - "rendered": { - Type: cty.String, - Computed: true, - }, - "__template_requires_new": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - DataSources: map[string]*configschema.Block{ - name + "_data_source": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "foo": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - name + "_remote_state": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - "foo": { - Type: cty.String, - Optional: true, - }, - "output": { - Type: cty.Map(cty.String), - Optional: true, - }, - }, - }, - name + "_file": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - "template": { - Type: cty.String, - Optional: true, - }, - "rendered": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }) -} - -// contextOptsForPlanViaFile is a helper that creates a temporary plan file, -// then reads it back in again and produces a ContextOpts object containing the -// planned changes, prior state and config from the plan file. -// -// This is intended for testing the separated plan/apply workflow in a more -// convenient way than spelling out all of these steps every time. Normally -// only the command and backend packages need to deal with such things, but -// our context tests try to exercise lots of stuff at once and so having them -// round-trip things through on-disk files is often an important part of -// fully representing an old bug in a regression test. -func contextOptsForPlanViaFile(t *testing.T, configSnap *configload.Snapshot, plan *plans.Plan) (*ContextOpts, *configs.Config, *plans.Plan, error) { - dir := t.TempDir() - - // We'll just create a dummy statefile.File here because we're not going - // to run through any of the codepaths that care about Lineage/Serial/etc - // here anyway. - stateFile := &statefile.File{ - State: plan.PriorState, - } - prevStateFile := &statefile.File{ - State: plan.PrevRunState, - } - - // To make life a little easier for test authors, we'll populate a simple - // backend configuration if they didn't set one, since the backend is - // usually dealt with in a calling package and so tests in this package - // don't really care about it. - if plan.Backend.Config == nil { - cfg, err := plans.NewDynamicValue(cty.EmptyObjectVal, cty.EmptyObject) - if err != nil { - panic(fmt.Sprintf("NewDynamicValue failed: %s", err)) // shouldn't happen because we control the inputs - } - plan.Backend.Type = "local" - plan.Backend.Config = cfg - plan.Backend.Workspace = "default" - } - - filename := filepath.Join(dir, "tfplan") - err := planfile.Create(filename, planfile.CreateArgs{ - ConfigSnapshot: configSnap, - PreviousRunStateFile: prevStateFile, - StateFile: stateFile, - Plan: plan, - }) - if err != nil { - return nil, nil, nil, err - } - - pr, err := planfile.Open(filename) - if err != nil { - return nil, nil, nil, err - } - - config, diags := pr.ReadConfig() - if diags.HasErrors() { - return nil, nil, nil, diags.Err() - } - - plan, err = pr.ReadPlan() - if err != nil { - return nil, nil, nil, err - } - - // Note: This has grown rather silly over the course of ongoing refactoring, - // because ContextOpts is no longer actually responsible for carrying - // any information from a plan file and instead all of the information - // lives inside the config and plan objects. We continue to return a - // silly empty ContextOpts here just to keep all of the calling tests - // working. - return &ContextOpts{}, config, plan, nil -} - -// legacyPlanComparisonString produces a string representation of the changes -// from a plan and a given state togther, as was formerly produced by the -// String method of terraform.Plan. -// -// This is here only for compatibility with existing tests that predate our -// new plan and state types, and should not be used in new tests. Instead, use -// a library like "cmp" to do a deep equality check and diff on the two -// data structures. -func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { - return fmt.Sprintf( - "DIFF:\n\n%s\n\nSTATE:\n\n%s", - legacyDiffComparisonString(changes), - state.String(), - ) -} - -// legacyDiffComparisonString produces a string representation of the changes -// from a planned changes object, as was formerly produced by the String method -// of terraform.Diff. -// -// This is here only for compatibility with existing tests that predate our -// new plan types, and should not be used in new tests. Instead, use a library -// like "cmp" to do a deep equality check and diff on the two data structures. -func legacyDiffComparisonString(changes *plans.Changes) string { - // The old string representation of a plan was grouped by module, but - // our new plan structure is not grouped in that way and so we'll need - // to preprocess it in order to produce that grouping. - type ResourceChanges struct { - Current *plans.ResourceInstanceChangeSrc - Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc - } - byModule := map[string]map[string]*ResourceChanges{} - resourceKeys := map[string][]string{} - var moduleKeys []string - for _, rc := range changes.Resources { - if rc.Action == plans.NoOp { - // We won't mention no-op changes here at all, since the old plan - // model we are emulating here didn't have such a concept. - continue - } - moduleKey := rc.Addr.Module.String() - if _, exists := byModule[moduleKey]; !exists { - moduleKeys = append(moduleKeys, moduleKey) - byModule[moduleKey] = make(map[string]*ResourceChanges) - } - resourceKey := rc.Addr.Resource.String() - if _, exists := byModule[moduleKey][resourceKey]; !exists { - resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) - byModule[moduleKey][resourceKey] = &ResourceChanges{ - Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), - } - } - - if rc.DeposedKey == states.NotDeposed { - byModule[moduleKey][resourceKey].Current = rc - } else { - byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc - } - } - sort.Strings(moduleKeys) - for _, ks := range resourceKeys { - sort.Strings(ks) - } - - var buf bytes.Buffer - - for _, moduleKey := range moduleKeys { - rcs := byModule[moduleKey] - var mBuf bytes.Buffer - - for _, resourceKey := range resourceKeys[moduleKey] { - rc := rcs[resourceKey] - - crud := "UPDATE" - if rc.Current != nil { - switch rc.Current.Action { - case plans.DeleteThenCreate: - crud = "DESTROY/CREATE" - case plans.CreateThenDelete: - crud = "CREATE/DESTROY" - case plans.Delete: - crud = "DESTROY" - case plans.Create: - crud = "CREATE" - } - } else { - // We must be working on a deposed object then, in which - // case destroying is the only possible action. - crud = "DESTROY" - } - - extra := "" - if rc.Current == nil && len(rc.Deposed) > 0 { - extra = " (deposed only)" - } - - fmt.Fprintf( - &mBuf, "%s: %s%s\n", - crud, resourceKey, extra, - ) - - attrNames := map[string]bool{} - var oldAttrs map[string]string - var newAttrs map[string]string - if rc.Current != nil { - if before := rc.Current.Before; before != nil { - ty, err := before.ImpliedType() - if err == nil { - val, err := before.Decode(ty) - if err == nil { - oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range oldAttrs { - attrNames[k] = true - } - } - } - } - if after := rc.Current.After; after != nil { - ty, err := after.ImpliedType() - if err == nil { - val, err := after.Decode(ty) - if err == nil { - newAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range newAttrs { - attrNames[k] = true - } - } - } - } - } - if oldAttrs == nil { - oldAttrs = make(map[string]string) - } - if newAttrs == nil { - newAttrs = make(map[string]string) - } - - attrNamesOrder := make([]string, 0, len(attrNames)) - keyLen := 0 - for n := range attrNames { - attrNamesOrder = append(attrNamesOrder, n) - if len(n) > keyLen { - keyLen = len(n) - } - } - sort.Strings(attrNamesOrder) - - for _, attrK := range attrNamesOrder { - v := newAttrs[attrK] - u := oldAttrs[attrK] - - if v == hcl2shim.UnknownVariableValue { - v = "" - } - // NOTE: we don't support here because we would - // need schema to do that. Excluding sensitive values - // is now done at the UI layer, and so should not be tested - // at the core layer. - - updateMsg := "" - // TODO: Mark " (forces new resource)" in updateMsg when appropriate. - - fmt.Fprintf( - &mBuf, " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, v, - updateMsg, - ) - } - } - - if moduleKey == "" { // root module - buf.Write(mBuf.Bytes()) - buf.WriteByte('\n') - continue - } - - fmt.Fprintf(&buf, "%s:\n", moduleKey) - s := bufio.NewScanner(&mBuf) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return buf.String() -} - -// assertNoDiagnostics fails the test in progress (using t.Fatal) if the given -// diagnostics is non-empty. -func assertNoDiagnostics(t *testing.T, diags tfdiags.Diagnostics) { - t.Helper() - if len(diags) == 0 { - return - } - logDiagnostics(t, diags) - t.FailNow() -} - -// assertNoDiagnostics fails the test in progress (using t.Fatal) if the given -// diagnostics has any errors. -func assertNoErrors(t *testing.T, diags tfdiags.Diagnostics) { - t.Helper() - if !diags.HasErrors() { - return - } - logDiagnostics(t, diags) - t.FailNow() -} - -// assertDiagnosticsMatch fails the test in progress (using t.Fatal) if the -// two sets of diagnostics don't match after being normalized using the -// "ForRPC" processing step, which eliminates the specific type information -// and HCL expression information of each diagnostic. -// -// assertDiagnosticsMatch sorts the two sets of diagnostics in the usual way -// before comparing them, though diagnostics only have a partial order so that -// will not totally normalize the ordering of all diagnostics sets. -func assertDiagnosticsMatch(t *testing.T, got, want tfdiags.Diagnostics) { - got = got.ForRPC() - want = want.ForRPC() - got.Sort() - want.Sort() - if diff := cmp.Diff(want, got); diff != "" { - t.Fatalf("wrong diagnostics\n%s", diff) - } -} - -// logDiagnostics is a test helper that logs the given diagnostics to to the -// given testing.T using t.Log, in a way that is hopefully useful in debugging -// a test. It does not generate any errors or fail the test. See -// assertNoDiagnostics and assertNoErrors for more specific helpers that can -// also fail the test. -func logDiagnostics(t *testing.T, diags tfdiags.Diagnostics) { - t.Helper() - for _, diag := range diags { - desc := diag.Description() - rng := diag.Source() - - var severity string - switch diag.Severity() { - case tfdiags.Error: - severity = "ERROR" - case tfdiags.Warning: - severity = "WARN" - default: - severity = "???" // should never happen - } - - if subj := rng.Subject; subj != nil { - if desc.Detail == "" { - t.Logf("[%s@%s] %s", severity, subj.StartString(), desc.Summary) - } else { - t.Logf("[%s@%s] %s: %s", severity, subj.StartString(), desc.Summary, desc.Detail) - } - } else { - if desc.Detail == "" { - t.Logf("[%s] %s", severity, desc.Summary) - } else { - t.Logf("[%s] %s: %s", severity, desc.Summary, desc.Detail) - } - } - } -} - -const testContextRefreshModuleStr = ` -aws_instance.web: (tainted) - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - -module.child: - aws_instance.web: - ID = new - provider = provider["registry.terraform.io/hashicorp/aws"] -` - -const testContextRefreshOutputStr = ` -aws_instance.web: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - -Outputs: - -foo = bar -` - -const testContextRefreshOutputPartialStr = ` - -` - -const testContextRefreshTaintedStr = ` -aws_instance.web: (tainted) - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] -` diff --git a/internal/terraform/context_validate.go b/internal/terraform/context_validate.go deleted file mode 100644 index aad884442a39..000000000000 --- a/internal/terraform/context_validate.go +++ /dev/null @@ -1,80 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Validate performs semantic validation of a configuration, and returns -// any warnings or errors. -// -// Syntax and structural checks are performed by the configuration loader, -// and so are not repeated here. -// -// Validate considers only the configuration and so it won't catch any -// errors caused by current values in the state, or other external information -// such as root module input variables. However, the Plan function includes -// all of the same checks as Validate, in addition to the other work it does -// to consider the previous run state and the planning options. -func (c *Context) Validate(config *configs.Config) tfdiags.Diagnostics { - defer c.acquireRun("validate")() - - var diags tfdiags.Diagnostics - - moreDiags := c.checkConfigDependencies(config) - diags = diags.Append(moreDiags) - // If required dependencies are not available then we'll bail early since - // otherwise we're likely to just see a bunch of other errors related to - // incompatibilities, which could be overwhelming for the user. - if diags.HasErrors() { - return diags - } - - log.Printf("[DEBUG] Building and walking validate graph") - - // Validate is to check if the given module is valid regardless of - // input values, current state, etc. Therefore we populate all of the - // input values with unknown values of the expected type, allowing us - // to perform a type check without assuming any particular values. - varValues := make(InputValues) - for name, variable := range config.Module.Variables { - ty := variable.Type - if ty == cty.NilType { - // Can't predict the type at all, so we'll just mark it as - // cty.DynamicVal (unknown value of cty.DynamicPseudoType). - ty = cty.DynamicPseudoType - } - varValues[name] = &InputValue{ - Value: cty.UnknownVal(ty), - SourceType: ValueFromUnknown, - } - } - - graph, moreDiags := (&PlanGraphBuilder{ - Config: config, - Plugins: c.plugins, - State: states.NewState(), - RootVariableValues: varValues, - Operation: walkValidate, - }).Build(addrs.RootModuleInstance) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return diags - } - - walker, walkDiags := c.walk(graph, walkValidate, &graphWalkOpts{ - Config: config, - }) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return diags - } - - return diags -} diff --git a/internal/terraform/context_validate_test.go b/internal/terraform/context_validate_test.go deleted file mode 100644 index 47e71f404f07..000000000000 --- a/internal/terraform/context_validate_test.go +++ /dev/null @@ -1,2484 +0,0 @@ -package terraform - -import ( - "errors" - "fmt" - "strings" - "testing" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestContext2Validate_badCount(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }) - - m := testModule(t, "validate-bad-count") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_badResource_reference(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }) - - m := testModule(t, "validate-bad-resource-count") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_badVar(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - "num": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - - m := testModule(t, "validate-bad-var") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_varNoDefaultExplicitType(t *testing.T) { - m := testModule(t, "validate-var-no-default-explicit-type") - c, diags := NewContext(&ContextOpts{}) - if diags.HasErrors() { - t.Fatalf("unexpected NewContext errors: %s", diags.Err()) - } - - // NOTE: This test has grown idiosyncratic because originally Terraform - // would (optionally) check variables during validation, and then in - // Terraform v0.12 we switched to checking variables during NewContext, - // and now most recently we've switched to checking variables only during - // planning because root variables are a plan option. Therefore this has - // grown into a plan test rather than a validate test, but it lives on - // here in order to make it easier to navigate through that history in - // version control. - _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - // Error should be: The input variable "maybe_a_map" has not been assigned a value. - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_computedVar(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }, - } - pt := testProvider("test") - pt.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "value": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - m := testModule(t, "validate-computed-var") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - addrs.NewDefaultProvider("test"): testProviderFuncFixed(pt), - }, - }) - - p.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - val := req.Config.GetAttr("value") - if val.IsKnown() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value isn't computed")) - } - - return - } - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - if p.ConfigureProviderCalled { - t.Fatal("Configure should not be called for provider") - } -} - -func TestContext2Validate_computedInFunction(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.Number, Optional: true}, - }, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "aws_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "optional_attr": {Type: cty.String, Optional: true}, - "computed": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } - - m := testModule(t, "validate-computed-in-function") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -// Test that validate allows through computed counts. We do this and allow -// them to fail during "plan" since we can't know if the computed values -// can be realized during a plan. -func TestContext2Validate_countComputed(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }, - DataSources: map[string]providers.Schema{ - "aws_data_source": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } - - m := testModule(t, "validate-count-computed") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_countNegative(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }, - } - m := testModule(t, "validate-count-negative") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_countVariable(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - m := testModule(t, "apply-count-variable") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_countVariableNoDefault(t *testing.T) { - p := testProvider("aws") - m := testModule(t, "validate-count-variable") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - c, diags := NewContext(&ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - assertNoDiagnostics(t, diags) - - _, diags = c.Plan(m, nil, &PlanOpts{}) - if !diags.HasErrors() { - // Error should be: The input variable "foo" has not been assigned a value. - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_moduleBadOutput(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - m := testModule(t, "validate-bad-module-output") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_moduleGood(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - m := testModule(t, "validate-good-module") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_moduleBadResource(t *testing.T) { - m := testModule(t, "validate-module-bad-rc") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }, - } - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ValidateResourceConfigResponse = &providers.ValidateResourceConfigResponse{ - Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), - } - - diags := c.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_moduleDepsShouldNotCycle(t *testing.T) { - m := testModule(t, "validate-module-deps-cycle") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_moduleProviderVar(t *testing.T) { - m := testModule(t, "validate-module-pc-vars") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - if req.Config.GetAttr("foo").IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("foo is null")) - } - return - } - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_moduleProviderInheritUnused(t *testing.T) { - m := testModule(t, "validate-module-pc-inherit-unused") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - if req.Config.GetAttr("foo").IsNull() { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("foo is null")) - } - return - } - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_orphans(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - "num": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - m := testModule(t, "validate-good") - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - var diags tfdiags.Diagnostics - if req.Config.GetAttr("foo").IsNull() { - diags = diags.Append(errors.New("foo is not set")) - } - return providers.ValidateResourceConfigResponse{ - Diagnostics: diags, - } - } - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_providerConfig_bad(t *testing.T) { - m := testModule(t, "validate-bad-pc") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }, - } - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ValidateProviderConfigResponse = &providers.ValidateProviderConfigResponse{ - Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), - } - - diags := c.Validate(m) - if len(diags) != 1 { - t.Fatalf("wrong number of diagnostics %d; want %d", len(diags), 1) - } - if !strings.Contains(diags.Err().Error(), "bad") { - t.Fatalf("bad: %s", diags.Err().Error()) - } -} - -func TestContext2Validate_providerConfig_skippedEmpty(t *testing.T) { - m := testModule(t, "validate-skipped-pc-empty") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }, - } - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ValidateProviderConfigResponse = &providers.ValidateProviderConfigResponse{ - Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("should not be called")), - } - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_providerConfig_good(t *testing.T) { - m := testModule(t, "validate-bad-pc") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }, - } - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -// In this test there is a mismatch between the provider's fqn (hashicorp/test) -// and it's local name set in required_providers (arbitrary). -func TestContext2Validate_requiredProviderConfig(t *testing.T) { - m := testModule(t, "validate-required-provider-config") - p := testProvider("aws") - - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "required_attribute": {Type: cty.String, Required: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{}, - }, - }, - }, - } - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_provisionerConfig_bad(t *testing.T) { - m := testModule(t, "validate-bad-prov-conf") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - pr := simpleMockProvisioner() - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - p.ValidateProviderConfigResponse = &providers.ValidateProviderConfigResponse{ - Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), - } - - diags := c.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_badResourceConnection(t *testing.T) { - m := testModule(t, "validate-bad-resource-connection") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - pr := simpleMockProvisioner() - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - diags := c.Validate(m) - t.Log(diags.Err()) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_badProvisionerConnection(t *testing.T) { - m := testModule(t, "validate-bad-prov-connection") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - pr := simpleMockProvisioner() - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - diags := c.Validate(m) - t.Log(diags.Err()) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_provisionerConfig_good(t *testing.T) { - m := testModule(t, "validate-bad-prov-conf") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - pr := simpleMockProvisioner() - pr.ValidateProvisionerConfigFn = func(req provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { - var diags tfdiags.Diagnostics - if req.Config.GetAttr("test_string").IsNull() { - diags = diags.Append(errors.New("test_string is not set")) - } - return provisioners.ValidateProvisionerConfigResponse{ - Diagnostics: diags, - } - } - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_requiredVar(t *testing.T) { - m := testModule(t, "validate-required-var") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - c, diags := NewContext(&ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - assertNoDiagnostics(t, diags) - - // NOTE: This test has grown idiosyncratic because originally Terraform - // would (optionally) check variables during validation, and then in - // Terraform v0.12 we switched to checking variables during NewContext, - // and now most recently we've switched to checking variables only during - // planning because root variables are a plan option. Therefore this has - // grown into a plan test rather than a validate test, but it lives on - // here in order to make it easier to navigate through that history in - // version control. - _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) - if !diags.HasErrors() { - // Error should be: The input variable "foo" has not been assigned a value. - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_resourceConfig_bad(t *testing.T) { - m := testModule(t, "validate-bad-rc") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ValidateResourceConfigResponse = &providers.ValidateResourceConfigResponse{ - Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), - } - - diags := c.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } -} - -func TestContext2Validate_resourceConfig_good(t *testing.T) { - m := testModule(t, "validate-bad-rc") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_tainted(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - "num": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - m := testModule(t, "validate-good") - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - var diags tfdiags.Diagnostics - if req.Config.GetAttr("foo").IsNull() { - diags = diags.Append(errors.New("foo is not set")) - } - return providers.ValidateResourceConfigResponse{ - Diagnostics: diags, - } - } - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_targetedDestroy(t *testing.T) { - m := testModule(t, "validate-targeted") - p := testProvider("aws") - pr := simpleMockProvisioner() - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - "num": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - testSetResourceInstanceCurrent(root, "aws_instance.foo", `{"id":"i-bcd345"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - testSetResourceInstanceCurrent(root, "aws_instance.bar", `{"id":"i-abc123"}`, `provider["registry.terraform.io/hashicorp/aws"]`) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": testProvisionerFuncFixed(pr), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_varRefUnknown(t *testing.T) { - m := testModule(t, "validate-variable-ref") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - var value cty.Value - p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - value = req.Config.GetAttr("foo") - return providers.ValidateResourceConfigResponse{} - } - - c.Validate(m) - - // Input variables are always unknown during the validate walk, because - // we're checking for validity of all possible input values. Validity - // against specific input values is checked during the plan walk. - if !value.RawEquals(cty.UnknownVal(cty.String)) { - t.Fatalf("bad: %#v", value) - } -} - -// Module variables weren't being interpolated during Validate phase. -// related to https://github.com/hashicorp/terraform/issues/5322 -func TestContext2Validate_interpolateVar(t *testing.T) { - input := new(MockUIInput) - - m := testModule(t, "input-interpolate-var") - p := testProvider("null") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "template_file": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "template": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -// When module vars reference something that is actually computed, this -// shouldn't cause validation to fail. -func TestContext2Validate_interpolateComputedModuleVarDef(t *testing.T) { - input := new(MockUIInput) - - m := testModule(t, "validate-computed-module-var-ref") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -// Computed values are lost when a map is output from a module -func TestContext2Validate_interpolateMap(t *testing.T) { - input := new(MockUIInput) - - m := testModule(t, "issue-9549") - p := testProvider("template") - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), - }, - UIInput: input, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} - -func TestContext2Validate_varSensitive(t *testing.T) { - // Smoke test through validate where a variable has sensitive applied - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "foo" { - default = "xyz" - sensitive = true -} - -variable "bar" { - sensitive = true -} - -data "aws_data_source" "bar" { - foo = var.bar -} - -resource "aws_instance" "foo" { - foo = var.foo -} -`, - }) - - p := testProvider("aws") - p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - // Providers receive unmarked values - if got, want := req.Config.GetAttr("foo"), cty.UnknownVal(cty.String); !got.RawEquals(want) { - t.Fatalf("wrong value for foo\ngot: %#v\nwant: %#v", got, want) - } - return providers.ValidateResourceConfigResponse{} - } - p.ValidateDataResourceConfigFn = func(req providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { - if got, want := req.Config.GetAttr("foo"), cty.UnknownVal(cty.String); !got.RawEquals(want) { - t.Fatalf("wrong value for foo\ngot: %#v\nwant: %#v", got, want) - } - return providers.ValidateDataResourceConfigResponse{} - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if !p.ValidateResourceConfigCalled { - t.Fatal("expected ValidateResourceConfigFn to be called") - } - - if !p.ValidateDataResourceConfigCalled { - t.Fatal("expected ValidateDataSourceConfigFn to be called") - } -} - -func TestContext2Validate_invalidOutput(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -data "aws_data_source" "name" {} - -output "out" { - value = "${data.aws_data_source.name.missing}" -}`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - // Should get this error: - // Unsupported attribute: This object does not have an attribute named "missing" - if got, want := diags.Err().Error(), "Unsupported attribute"; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_invalidModuleOutput(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "child/main.tf": ` -data "aws_data_source" "name" {} - -output "out" { - value = "${data.aws_data_source.name.missing}" -}`, - "main.tf": ` -module "child" { - source = "./child" -} - -resource "aws_instance" "foo" { - foo = "${module.child.out}" -}`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - // Should get this error: - // Unsupported attribute: This object does not have an attribute named "missing" - if got, want := diags.Err().Error(), "Unsupported attribute"; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_sensitiveRootModuleOutput(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "child/main.tf": ` -variable "foo" { - default = "xyz" - sensitive = true -} - -output "out" { - value = var.foo -}`, - "main.tf": ` -module "child" { - source = "./child" -} - -output "root" { - value = module.child.out - sensitive = true -}`, - }) - - ctx := testContext2(t, &ContextOpts{}) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } -} - -func TestContext2Validate_legacyResourceCount(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "aws_instance" "test" {} - -output "out" { - value = aws_instance.test.count -}`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - // Should get this error: - // Invalid resource count attribute: The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(aws_instance.test) to count resource instances. - if got, want := diags.Err().Error(), "Invalid resource count attribute:"; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_invalidModuleRef(t *testing.T) { - // This test is verifying that we properly validate and report on references - // to modules that are not declared, since we were missing some validation - // here in early 0.12.0 alphas that led to a panic. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -output "out" { - # Intentionally referencing undeclared module to ensure error - value = module.foo -}`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - // Should get this error: - // Reference to undeclared module: No module call named "foo" is declared in the root module. - if got, want := diags.Err().Error(), "Reference to undeclared module:"; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_invalidModuleOutputRef(t *testing.T) { - // This test is verifying that we properly validate and report on references - // to modules that are not declared, since we were missing some validation - // here in early 0.12.0 alphas that led to a panic. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -output "out" { - # Intentionally referencing undeclared module to ensure error - value = module.foo.bar -}`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - // Should get this error: - // Reference to undeclared module: No module call named "foo" is declared in the root module. - if got, want := diags.Err().Error(), "Reference to undeclared module:"; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_invalidDependsOnResourceRef(t *testing.T) { - // This test is verifying that we raise an error if depends_on - // refers to something that doesn't exist in configuration. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "bar" { - depends_on = [test_resource.nonexistant] -} -`, - }) - - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - // Should get this error: - // Reference to undeclared module: No module call named "foo" is declared in the root module. - if got, want := diags.Err().Error(), "Reference to undeclared resource:"; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_invalidResourceIgnoreChanges(t *testing.T) { - // This test is verifying that we raise an error if ignore_changes - // refers to something that can be statically detected as not conforming - // to the resource type schema. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "bar" { - lifecycle { - ignore_changes = [does_not_exist_in_schema] - } -} -`, - }) - - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - // Should get this error: - // Reference to undeclared module: No module call named "foo" is declared in the root module. - if got, want := diags.Err().Error(), `no argument, nested block, or exported attribute named "does_not_exist_in_schema"`; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_variableCustomValidationsFail(t *testing.T) { - // This test is for custom validation rules associated with root module - // variables, and specifically that we handle the situation where the - // given value is invalid in a child module. - m := testModule(t, "validate-variable-custom-validations-child") - - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), `Invalid value for variable: Value must not be "nope".`; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_variableCustomValidationsRoot(t *testing.T) { - // This test is for custom validation rules associated with root module - // variables, and specifically that we handle the situation where their - // values are unknown during validation, skipping the validation check - // altogether. (Root module variables are never known during validation.) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "test" { - type = string - - validation { - condition = var.test != "nope" - error_message = "Value must not be \"nope\"." - } -} -`, - }) - - p := testProvider("test") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error\ngot: %s", diags.Err().Error()) - } -} - -func TestContext2Validate_expandModules(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod1" { - for_each = toset(["a", "b"]) - source = "./mod" -} - -module "mod2" { - for_each = module.mod1 - source = "./mod" - input = module.mod1["a"].out -} - -module "mod3" { - count = length(module.mod2) - source = "./mod" -} -`, - "mod/main.tf": ` -resource "aws_instance" "foo" { -} - -output "out" { - value = 1 -} - -variable "input" { - type = number - default = 0 -} - -module "nested" { - count = 2 - source = "./nested" - input = count.index -} -`, - "mod/nested/main.tf": ` -variable "input" { -} - -resource "aws_instance" "foo" { - count = var.input -} -`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_expandModulesInvalidCount(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod1" { - count = -1 - source = "./mod" -} -`, - "mod/main.tf": ` -resource "aws_instance" "foo" { -} -`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), `Invalid count argument`; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_expandModulesInvalidForEach(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod1" { - for_each = ["a", "b"] - source = "./mod" -} -`, - "mod/main.tf": ` -resource "aws_instance" "foo" { -} -`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - if got, want := diags.Err().Error(), `Invalid for_each argument`; !strings.Contains(got, want) { - t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) - } -} - -func TestContext2Validate_expandMultipleNestedModules(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "modA" { - for_each = { - first = "m" - second = "n" - } - source = "./modA" -} -`, - "modA/main.tf": ` -locals { - m = { - first = "m" - second = "n" - } -} - -module "modB" { - for_each = local.m - source = "./modB" - y = each.value -} - -module "modC" { - for_each = local.m - source = "./modC" - x = module.modB[each.key].out - y = module.modB[each.key].out -} - -`, - "modA/modB/main.tf": ` -variable "y" { - type = string -} - -resource "aws_instance" "foo" { - foo = var.y -} - -output "out" { - value = aws_instance.foo.id -} -`, - "modA/modC/main.tf": ` -variable "x" { - type = string -} - -variable "y" { - type = string -} - -resource "aws_instance" "foo" { - foo = var.x -} - -output "out" { - value = var.y -} -`, - }) - - p := testProvider("aws") - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_invalidModuleDependsOn(t *testing.T) { - // validate module and output depends_on - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod1" { - source = "./mod" - depends_on = [resource_foo.bar.baz] -} - -module "mod2" { - source = "./mod" - depends_on = [resource_foo.bar.baz] -} -`, - "mod/main.tf": ` -output "out" { - value = "foo" -} -`, - }) - - diags := testContext2(t, &ContextOpts{}).Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - - if len(diags) != 2 { - t.Fatalf("wanted 2 diagnostic errors, got %q", diags) - } - - for _, d := range diags { - des := d.Description().Summary - if !strings.Contains(des, "Invalid depends_on reference") { - t.Fatalf(`expected "Invalid depends_on reference", got %q`, des) - } - } -} - -func TestContext2Validate_invalidOutputDependsOn(t *testing.T) { - // validate module and output depends_on - m := testModuleInline(t, map[string]string{ - "main.tf": ` -module "mod1" { - source = "./mod" -} - -output "out" { - value = "bar" - depends_on = [resource_foo.bar.baz] -} -`, - "mod/main.tf": ` -output "out" { - value = "bar" - depends_on = [resource_foo.bar.baz] -} -`, - }) - - diags := testContext2(t, &ContextOpts{}).Validate(m) - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") - } - - if len(diags) != 2 { - t.Fatalf("wanted 2 diagnostic errors, got %q", diags) - } - - for _, d := range diags { - des := d.Description().Summary - if !strings.Contains(des, "Invalid depends_on reference") { - t.Fatalf(`expected "Invalid depends_on reference", got %q`, des) - } - } -} - -func TestContext2Validate_rpcDiagnostics(t *testing.T) { - // validate module and output depends_on - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { -} -`, - }) - - p := testProvider("test") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "test_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - }, - }, - }, - }, - } - - p.ValidateResourceConfigResponse = &providers.ValidateResourceConfigResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(tfdiags.SimpleWarning("don't frobble")), - } - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if len(diags) == 0 { - t.Fatal("expected warnings") - } - - for _, d := range diags { - des := d.Description().Summary - if !strings.Contains(des, "frobble") { - t.Fatalf(`expected frobble, got %q`, des) - } - } -} - -func TestContext2Validate_sensitiveProvisionerConfig(t *testing.T) { - m := testModule(t, "validate-sensitive-provisioner-config") - p := testProvider("aws") - p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ - ResourceTypes: map[string]providers.Schema{ - "aws_instance": { - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - } - - pr := simpleMockProvisioner() - - c := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "test": testProvisionerFuncFixed(pr), - }, - }) - - pr.ValidateProvisionerConfigFn = func(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { - if r.Config.ContainsMarked() { - t.Errorf("provisioner config contains marked values") - } - return pr.ValidateProvisionerConfigResponse - } - - diags := c.Validate(m) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - if !pr.ValidateProvisionerConfigCalled { - t.Fatal("ValidateProvisionerConfig not called") - } -} - -func TestContext2Plan_validateMinMaxDynamicBlock(t *testing.T) { - p := new(MockProvider) - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "things": { - Type: cty.List(cty.String), - Computed: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": {Type: cty.String, Optional: true}, - }, - }, - Nesting: configschema.NestingList, - MinItems: 2, - MaxItems: 3, - }, - }, - }, - }, - }) - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "test_instance" "a" { - // MinItems 2 - foo { - bar = "a" - } - foo { - bar = "b" - } -} - -resource "test_instance" "b" { - // one dymamic block can satisfy MinItems of 2 - dynamic "foo" { - for_each = test_instance.a.things - content { - bar = foo.value - } - } -} - -resource "test_instance" "c" { - // we may have more than MaxItems dynamic blocks when they are unknown - foo { - bar = "b" - } - dynamic "foo" { - for_each = test_instance.a.things - content { - bar = foo.value - } - } - dynamic "foo" { - for_each = test_instance.a.things - content { - bar = "${foo.value}-2" - } - } - dynamic "foo" { - for_each = test_instance.b.things - content { - bar = foo.value - } - } -} -`}) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_passInheritedProvider(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` -terraform { - required_providers { - test = { - source = "hashicorp/test" - } - } -} - -module "first" { - source = "./first" - providers = { - test = test - } -} -`, - - // This module does not define a config for the test provider, but we - // should be able to pass whatever the implied config is to a child - // module. - "first/main.tf": ` -terraform { - required_providers { - test = { - source = "hashicorp/test" - } - } -} - -module "second" { - source = "./second" - providers = { - test.alias = test - } -}`, - - "first/second/main.tf": ` -terraform { - required_providers { - test = { - source = "hashicorp/test" - configuration_aliases = [test.alias] - } - } -} - -resource "test_object" "t" { - provider = test.alias -} -`, - }) - - p := simpleMockProvider() - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Plan_lookupMismatchedObjectTypes(t *testing.T) { - p := new(MockProvider) - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "things": { - Type: cty.List(cty.String), - Optional: true, - }, - }, - }, - }, - }) - - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "items" { - type = list(string) - default = [] -} - -resource "test_instance" "a" { - for_each = length(var.items) > 0 ? { default = {} } : {} -} - -output "out" { - // Strictly speaking, this expression is incorrect because the map element - // type is a different type from the default value, and the lookup - // implementation expects to be able to convert the default to match the - // element type. - // There are two reasons this works which we need to maintain for - // compatibility. First during validation the 'test_instance.a' expression - // only returns a dynamic value, preventing any type comparison. Later during - // plan and apply 'test_instance.a' is an object and not a map, and the - // lookup implementation skips the type comparison when the keys are known - // statically. - value = lookup(test_instance.a, "default", { id = null })["id"] -} -`}) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_nonNullableVariableDefaultValidation(t *testing.T) { - m := testModuleInline(t, map[string]string{ - "main.tf": ` - module "first" { - source = "./mod" - input = null - } - `, - - "mod/main.tf": ` - variable "input" { - type = string - default = "default" - nullable = false - - // Validation expressions should receive the default with nullable=false and - // a null input. - validation { - condition = var.input != null - error_message = "Input cannot be null!" - } - } - `, - }) - - ctx := testContext2(t, &ContextOpts{}) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_precondition_good(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "input" { - type = string - default = "foo" -} - -resource "aws_instance" "test" { - foo = var.input - - lifecycle { - precondition { - condition = length(var.input) > 0 - error_message = "Input cannot be empty." - } - } -} - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_precondition_badCondition(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "input" { - type = string - default = "foo" -} - -resource "aws_instance" "test" { - foo = var.input - - lifecycle { - precondition { - condition = length(one(var.input)) == 1 - error_message = "You can't do that." - } - } -} - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } - if got, want := diags.Err().Error(), "Invalid function argument"; !strings.Contains(got, want) { - t.Errorf("unexpected error.\ngot: %s\nshould contain: %q", got, want) - } -} - -func TestContext2Validate_precondition_badErrorMessage(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "input" { - type = string - default = "foo" -} - -resource "aws_instance" "test" { - foo = var.input - - lifecycle { - precondition { - condition = var.input != "foo" - error_message = "This is a bad use of a function: ${one(var.input)}." - } - } -} - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } - if got, want := diags.Err().Error(), "Invalid function argument"; !strings.Contains(got, want) { - t.Errorf("unexpected error.\ngot: %s\nshould contain: %q", got, want) - } -} - -func TestContext2Validate_postcondition_good(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "aws_instance" "test" { - foo = "foo" - - lifecycle { - postcondition { - condition = length(self.foo) > 0 - error_message = "Input cannot be empty." - } - } -} - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_postcondition_badCondition(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - // This postcondition's condition expression does not refer to self, which - // is unrealistic. This is because at the time of writing the test, self is - // always an unknown value of dynamic type during validation. As a result, - // validation of conditions which refer to resource arguments is not - // possible until plan time. For now we exercise the code by referring to - // an input variable. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -variable "input" { - type = string - default = "foo" -} - -resource "aws_instance" "test" { - foo = var.input - - lifecycle { - postcondition { - condition = length(one(var.input)) == 1 - error_message = "You can't do that." - } - } -} - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } - if got, want := diags.Err().Error(), "Invalid function argument"; !strings.Contains(got, want) { - t.Errorf("unexpected error.\ngot: %s\nshould contain: %q", got, want) - } -} - -func TestContext2Validate_postcondition_badErrorMessage(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "aws_instance" "test" { - foo = "foo" - - lifecycle { - postcondition { - condition = self.foo != "foo" - error_message = "This is a bad use of a function: ${one("foo")}." - } - } -} - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") - } - if got, want := diags.Err().Error(), "Invalid function argument"; !strings.Contains(got, want) { - t.Errorf("unexpected error.\ngot: %s\nshould contain: %q", got, want) - } -} - -func TestContext2Validate_precondition_count(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - foos = ["bar", "baz"] -} - -resource "aws_instance" "test" { - count = 3 - foo = local.foos[count.index] - - lifecycle { - precondition { - condition = count.index < length(local.foos) - error_message = "Insufficient foos." - } - } -} - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_postcondition_forEach(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -locals { - foos = toset(["bar", "baz", "boop"]) -} - -resource "aws_instance" "test" { - for_each = local.foos - foo = "foo" - - lifecycle { - postcondition { - condition = length(each.value) == 3 - error_message = "Short foo required, not \"${each.key}\"." - } - } -} - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - if diags.HasErrors() { - t.Fatal(diags.ErrWithWarnings()) - } -} - -func TestContext2Validate_deprecatedAttr(t *testing.T) { - p := testProvider("aws") - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true, Deprecated: true}, - }, - }, - }, - }) - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "aws_instance" "test" { -} -locals { - deprecated = aws_instance.test.foo -} - - `, - }) - - ctx := testContext2(t, &ContextOpts{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), - }, - }) - - diags := ctx.Validate(m) - warn := diags.ErrWithWarnings().Error() - if !strings.Contains(warn, `The attribute "foo" is deprecated`) { - t.Fatalf("expected deprecated warning, got: %q\n", warn) - } -} diff --git a/internal/terraform/context_walk.go b/internal/terraform/context_walk.go deleted file mode 100644 index 523fa738541f..000000000000 --- a/internal/terraform/context_walk.go +++ /dev/null @@ -1,144 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/refactoring" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// graphWalkOpts captures some transient values we use (and possibly mutate) -// during a graph walk. -// -// The way these options get used unfortunately varies between the different -// walkOperation types. This is a historical design wart that dates back to -// us using the same graph structure for all operations; hopefully we'll -// make the necessary differences between the walk types more explicit someday. -type graphWalkOpts struct { - InputState *states.State - Changes *plans.Changes - Config *configs.Config - - // PlanTimeCheckResults should be populated during the apply phase with - // the snapshot of check results that was generated during the plan step. - // - // This then propagates the decisions about which checkable objects exist - // from the plan phase into the apply phase without having to re-compute - // the module and resource expansion. - PlanTimeCheckResults *states.CheckResults - - MoveResults refactoring.MoveResults -} - -func (c *Context) walk(graph *Graph, operation walkOperation, opts *graphWalkOpts) (*ContextGraphWalker, tfdiags.Diagnostics) { - log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - - walker := c.graphWalker(operation, opts) - - // Watch for a stop so we can call the provider Stop() API. - watchStop, watchWait := c.watchStop(walker) - - // Walk the real graph, this will block until it completes - diags := graph.Walk(walker) - - // Close the channel so the watcher stops, and wait for it to return. - close(watchStop) - <-watchWait - - return walker, diags -} - -func (c *Context) graphWalker(operation walkOperation, opts *graphWalkOpts) *ContextGraphWalker { - var state *states.SyncState - var refreshState *states.SyncState - var prevRunState *states.SyncState - - // NOTE: None of the SyncState objects must directly wrap opts.InputState, - // because we use those to mutate the state object and opts.InputState - // belongs to our caller and thus we must treat it as immutable. - // - // To account for that, most of our SyncState values created below end up - // wrapping a _deep copy_ of opts.InputState instead. - inputState := opts.InputState - if inputState == nil { - // Lots of callers use nil to represent the "empty" case where we've - // not run Apply yet, so we tolerate that. - inputState = states.NewState() - } - - switch operation { - case walkValidate: - // validate should not use any state - state = states.NewState().SyncWrapper() - - // validate currently uses the plan graph, so we have to populate the - // refreshState and the prevRunState. - refreshState = states.NewState().SyncWrapper() - prevRunState = states.NewState().SyncWrapper() - - case walkPlan, walkPlanDestroy, walkImport: - state = inputState.DeepCopy().SyncWrapper() - refreshState = inputState.DeepCopy().SyncWrapper() - prevRunState = inputState.DeepCopy().SyncWrapper() - - // For both of our new states we'll discard the previous run's - // check results, since we can still refer to them from the - // prevRunState object if we need to. - state.DiscardCheckResults() - refreshState.DiscardCheckResults() - - default: - state = inputState.DeepCopy().SyncWrapper() - // Only plan-like walks use refreshState and prevRunState - - // Discard the input state's check results, because we should create - // a new set as a result of the graph walk. - state.DiscardCheckResults() - } - - changes := opts.Changes - if changes == nil { - // Several of our non-plan walks end up sharing codepaths with the - // plan walk and thus expect to generate planned changes even though - // we don't care about them. To avoid those crashing, we'll just - // insert a placeholder changes object which'll get discarded - // afterwards. - changes = plans.NewChanges() - } - - if opts.Config == nil { - panic("Context.graphWalker call without Config") - } - - checkState := checks.NewState(opts.Config) - if opts.PlanTimeCheckResults != nil { - // We'll re-report all of the same objects we determined during the - // plan phase so that we can repeat the checks during the apply - // phase to finalize them. - for _, configElem := range opts.PlanTimeCheckResults.ConfigResults.Elems { - if configElem.Value.ObjectAddrsKnown() { - configAddr := configElem.Key - checkState.ReportCheckableObjects(configAddr, configElem.Value.ObjectResults.Keys()) - } - } - } - - return &ContextGraphWalker{ - Context: c, - State: state, - Config: opts.Config, - RefreshState: refreshState, - PrevRunState: prevRunState, - Changes: changes.SyncWrapper(), - Checks: checkState, - InstanceExpander: instances.NewExpander(), - MoveResults: opts.MoveResults, - Operation: operation, - StopContext: c.runContext, - } -} diff --git a/internal/terraform/diagnostics.go b/internal/terraform/diagnostics.go deleted file mode 100644 index 26f22f06ce6c..000000000000 --- a/internal/terraform/diagnostics.go +++ /dev/null @@ -1,42 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// This file contains some package-local helpers for working with diagnostics. -// For the main diagnostics API, see the separate "tfdiags" package. - -// diagnosticCausedByUnknown is an implementation of -// tfdiags.DiagnosticExtraBecauseUnknown which we can use in the "Extra" field -// of a diagnostic to indicate that the problem was caused by unknown values -// being involved in an expression evaluation. -// -// When using this, set the Extra to diagnosticCausedByUnknown(true) and also -// populate the EvalContext and Expression fields of the diagnostic so that -// the diagnostic renderer can use all of that information together to assist -// the user in understanding what was unknown. -type diagnosticCausedByUnknown bool - -var _ tfdiags.DiagnosticExtraBecauseUnknown = diagnosticCausedByUnknown(true) - -func (e diagnosticCausedByUnknown) DiagnosticCausedByUnknown() bool { - return bool(e) -} - -// diagnosticCausedBySensitive is an implementation of -// tfdiags.DiagnosticExtraBecauseSensitive which we can use in the "Extra" field -// of a diagnostic to indicate that the problem was caused by sensitive values -// being involved in an expression evaluation. -// -// When using this, set the Extra to diagnosticCausedBySensitive(true) and also -// populate the EvalContext and Expression fields of the diagnostic so that -// the diagnostic renderer can use all of that information together to assist -// the user in understanding what was sensitive. -type diagnosticCausedBySensitive bool - -var _ tfdiags.DiagnosticExtraBecauseSensitive = diagnosticCausedBySensitive(true) - -func (e diagnosticCausedBySensitive) DiagnosticCausedBySensitive() bool { - return bool(e) -} diff --git a/internal/terraform/eval_conditions.go b/internal/terraform/eval_conditions.go deleted file mode 100644 index 58877011ce5c..000000000000 --- a/internal/terraform/eval_conditions.go +++ /dev/null @@ -1,238 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// evalCheckRules ensures that all of the given check rules pass against -// the given HCL evaluation context. -// -// If any check rules produce an unknown result then they will be silently -// ignored on the assumption that the same checks will be run again later -// with fewer unknown values in the EvalContext. -// -// If any of the rules do not pass, the returned diagnostics will contain -// errors. Otherwise, it will either be empty or contain only warnings. -func evalCheckRules(typ addrs.CheckType, rules []*configs.CheckRule, ctx EvalContext, self addrs.Checkable, keyData instances.RepetitionData, diagSeverity tfdiags.Severity) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - checkState := ctx.Checks() - if !checkState.ConfigHasChecks(self.ConfigCheckable()) { - // We have nothing to do if this object doesn't have any checks, - // but the "rules" slice should agree that we don't. - if ct := len(rules); ct != 0 { - panic(fmt.Sprintf("check state says that %s should have no rules, but it has %d", self, ct)) - } - return diags - } - - if len(rules) == 0 { - // Nothing to do - return nil - } - - severity := diagSeverity.ToHCL() - - for i, rule := range rules { - result, ruleDiags := evalCheckRule(typ, rule, ctx, self, keyData, severity) - diags = diags.Append(ruleDiags) - - log.Printf("[TRACE] evalCheckRules: %s status is now %s", self, result.Status) - if result.Status == checks.StatusFail { - checkState.ReportCheckFailure(self, typ, i, result.FailureMessage) - } else { - checkState.ReportCheckResult(self, typ, i, result.Status) - } - } - - return diags -} - -type checkResult struct { - Status checks.Status - FailureMessage string -} - -func evalCheckRule(typ addrs.CheckType, rule *configs.CheckRule, ctx EvalContext, self addrs.Checkable, keyData instances.RepetitionData, severity hcl.DiagnosticSeverity) (checkResult, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - const errInvalidCondition = "Invalid condition result" - - refs, moreDiags := lang.ReferencesInExpr(rule.Condition) - diags = diags.Append(moreDiags) - moreRefs, moreDiags := lang.ReferencesInExpr(rule.ErrorMessage) - diags = diags.Append(moreDiags) - refs = append(refs, moreRefs...) - - var selfReference addrs.Referenceable - // Only resource postconditions can refer to self - if typ == addrs.ResourcePostcondition { - switch s := self.(type) { - case addrs.AbsResourceInstance: - selfReference = s.Resource - default: - panic(fmt.Sprintf("Invalid self reference type %t", self)) - } - } - scope := ctx.EvaluationScope(selfReference, keyData) - - hclCtx, moreDiags := scope.EvalContext(refs) - diags = diags.Append(moreDiags) - - resultVal, hclDiags := rule.Condition.Value(hclCtx) - diags = diags.Append(hclDiags) - - // NOTE: Intentionally not passing the caller's selected severity in here, - // because this reports errors in the configuration itself, not the failure - // of an otherwise-valid condition. - errorMessage, moreDiags := evalCheckErrorMessage(rule.ErrorMessage, hclCtx) - diags = diags.Append(moreDiags) - - if diags.HasErrors() { - log.Printf("[TRACE] evalCheckRule: %s: %s", typ, diags.Err().Error()) - return checkResult{Status: checks.StatusError}, diags - } - - if !resultVal.IsKnown() { - // We'll wait until we've learned more, then. - return checkResult{Status: checks.StatusUnknown}, diags - } - if resultVal.IsNull() { - // NOTE: Intentionally not passing the caller's selected severity in here, - // because this reports errors in the configuration itself, not the failure - // of an otherwise-valid condition. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidCondition, - Detail: "Condition expression must return either true or false, not null.", - Subject: rule.Condition.Range().Ptr(), - Expression: rule.Condition, - EvalContext: hclCtx, - }) - return checkResult{Status: checks.StatusError}, diags - } - var err error - resultVal, err = convert.Convert(resultVal, cty.Bool) - if err != nil { - // NOTE: Intentionally not passing the caller's selected severity in here, - // because this reports errors in the configuration itself, not the failure - // of an otherwise-valid condition. - detail := fmt.Sprintf("Invalid condition result value: %s.", tfdiags.FormatError(err)) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidCondition, - Detail: detail, - Subject: rule.Condition.Range().Ptr(), - Expression: rule.Condition, - EvalContext: hclCtx, - }) - return checkResult{Status: checks.StatusError}, diags - } - - // The condition result may be marked if the expression refers to a - // sensitive value. - resultVal, _ = resultVal.Unmark() - - status := checks.StatusForCtyValue(resultVal) - - if status != checks.StatusFail { - return checkResult{Status: status}, diags - } - - errorMessageForDiags := errorMessage - if errorMessageForDiags == "" { - errorMessageForDiags = "This check failed, but has an invalid error message as described in the other accompanying messages." - } - diags = diags.Append(&hcl.Diagnostic{ - // The caller gets to choose the severity of this one, because we - // treat condition failures as warnings in the presence of - // certain special planning options. - Severity: severity, - Summary: fmt.Sprintf("%s failed", typ.Description()), - Detail: errorMessageForDiags, - Subject: rule.Condition.Range().Ptr(), - Expression: rule.Condition, - EvalContext: hclCtx, - }) - - return checkResult{ - Status: status, - FailureMessage: errorMessage, - }, diags -} - -// evalCheckErrorMessage makes a best effort to evaluate the given expression, -// as an error message string. -// -// It will either return a non-empty message string or it'll return diagnostics -// with either errors or warnings that explain why the given expression isn't -// acceptable. -func evalCheckErrorMessage(expr hcl.Expression, hclCtx *hcl.EvalContext) (string, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - val, hclDiags := expr.Value(hclCtx) - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return "", diags - } - - val, err := convert.Convert(val, cty.String) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid error message", - Detail: fmt.Sprintf("Unsuitable value for error message: %s.", tfdiags.FormatError(err)), - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - }) - return "", diags - } - if !val.IsKnown() { - return "", diags - } - if val.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid error message", - Detail: "Unsuitable value for error message: must not be null.", - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - }) - return "", diags - } - - val, valMarks := val.Unmark() - if _, sensitive := valMarks[marks.Sensitive]; sensitive { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Error message refers to sensitive values", - Detail: `The error expression used to explain this condition refers to sensitive values, so Terraform will not display the resulting message. - -You can correct this by removing references to sensitive values, or by carefully using the nonsensitive() function if the expression will not reveal the sensitive data.`, - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - }) - return "", diags - } - - // NOTE: We've discarded any other marks the string might have been carrying, - // aside from the sensitive mark. - - return strings.TrimSpace(val.AsString()), diags -} diff --git a/internal/terraform/eval_context.go b/internal/terraform/eval_context.go deleted file mode 100644 index fedf223051ab..000000000000 --- a/internal/terraform/eval_context.go +++ /dev/null @@ -1,204 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/refactoring" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// EvalContext is the interface that is given to eval nodes to execute. -type EvalContext interface { - // Stopped returns a channel that is closed when evaluation is stopped - // via Terraform.Context.Stop() - Stopped() <-chan struct{} - - // Path is the current module path. - Path() addrs.ModuleInstance - - // Hook is used to call hook methods. The callback is called for each - // hook and should return the hook action to take and the error. - Hook(func(Hook) (HookAction, error)) error - - // Input is the UIInput object for interacting with the UI. - Input() UIInput - - // InitProvider initializes the provider with the given address, and returns - // the implementation of the resource provider or an error. - // - // It is an error to initialize the same provider more than once. This - // method will panic if the module instance address of the given provider - // configuration does not match the Path() of the EvalContext. - InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) - - // Provider gets the provider instance with the given address (already - // initialized) or returns nil if the provider isn't initialized. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - // InitProvider must've been called on the EvalContext of the module - // that owns the given provider before calling this method. - Provider(addrs.AbsProviderConfig) providers.Interface - - // ProviderSchema retrieves the schema for a particular provider, which - // must have already been initialized with InitProvider. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - ProviderSchema(addrs.AbsProviderConfig) (*ProviderSchema, error) - - // CloseProvider closes provider connections that aren't needed anymore. - // - // This method will panic if the module instance address of the given - // provider configuration does not match the Path() of the EvalContext. - CloseProvider(addrs.AbsProviderConfig) error - - // ConfigureProvider configures the provider with the given - // configuration. This is a separate context call because this call - // is used to store the provider configuration for inheritance lookups - // with ParentProviderConfig(). - // - // This method will panic if the module instance address of the given - // provider configuration does not match the Path() of the EvalContext. - ConfigureProvider(addrs.AbsProviderConfig, cty.Value) tfdiags.Diagnostics - - // ProviderInput and SetProviderInput are used to configure providers - // from user input. - // - // These methods will panic if the module instance address of the given - // provider configuration does not match the Path() of the EvalContext. - ProviderInput(addrs.AbsProviderConfig) map[string]cty.Value - SetProviderInput(addrs.AbsProviderConfig, map[string]cty.Value) - - // Provisioner gets the provisioner instance with the given name. - Provisioner(string) (provisioners.Interface, error) - - // ProvisionerSchema retrieves the main configuration schema for a - // particular provisioner, which must have already been initialized with - // InitProvisioner. - ProvisionerSchema(string) (*configschema.Block, error) - - // CloseProvisioner closes all provisioner plugins. - CloseProvisioners() error - - // EvaluateBlock takes the given raw configuration block and associated - // schema and evaluates it to produce a value of an object type that - // conforms to the implied type of the schema. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - // - // The "key" argument is also optional. If given, it is the instance key - // of the current object within the multi-instance container it belongs - // to. For example, on a resource block with "count" set this should be - // set to a different addrs.IntKey for each instance created from that - // block. Set this to addrs.NoKey if not appropriate. - // - // The returned body is an expanded version of the given body, with any - // "dynamic" blocks replaced with zero or more static blocks. This can be - // used to extract correct source location information about attributes of - // the returned object value. - EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) - - // EvaluateExpr takes the given HCL expression and evaluates it to produce - // a value. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) - - // EvaluateReplaceTriggeredBy takes the raw reference expression from the - // config, and returns the evaluated *addrs.Reference along with a boolean - // indicating if that reference forces replacement. - EvaluateReplaceTriggeredBy(expr hcl.Expression, repData instances.RepetitionData) (*addrs.Reference, bool, tfdiags.Diagnostics) - - // EvaluationScope returns a scope that can be used to evaluate reference - // addresses in this context. - EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope - - // SetRootModuleArgument defines the value for one variable of the root - // module. The caller must ensure that given value is a suitable - // "final value" for the variable, which means that it's already converted - // and validated to match any configured constraints and validation rules. - // - // Calling this function multiple times with the same variable address - // will silently overwrite the value provided by a previous call. - SetRootModuleArgument(addrs.InputVariable, cty.Value) - - // SetModuleCallArgument defines the value for one input variable of a - // particular child module call. The caller must ensure that the given - // value is a suitable "final value" for the variable, which means that - // it's already converted and validated to match any configured - // constraints and validation rules. - // - // Calling this function multiple times with the same variable address - // will silently overwrite the value provided by a previous call. - SetModuleCallArgument(addrs.ModuleCallInstance, addrs.InputVariable, cty.Value) - - // GetVariableValue returns the value provided for the input variable with - // the given address, or cty.DynamicVal if the variable hasn't been assigned - // a value yet. - // - // Most callers should deal with variable values only indirectly via - // EvaluationScope and the other expression evaluation functions, but - // this is provided because variables tend to be evaluated outside of - // the context of the module they belong to and so we sometimes need to - // override the normal expression evaluation behavior. - GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value - - // Changes returns the writer object that can be used to write new proposed - // changes into the global changes set. - Changes() *plans.ChangesSync - - // State returns a wrapper object that provides safe concurrent access to - // the global state. - State() *states.SyncState - - // Checks returns the object that tracks the state of any custom checks - // declared in the configuration. - Checks() *checks.State - - // RefreshState returns a wrapper object that provides safe concurrent - // access to the state used to store the most recently refreshed resource - // values. - RefreshState() *states.SyncState - - // PrevRunState returns a wrapper object that provides safe concurrent - // access to the state which represents the result of the previous run, - // updated only so that object data conforms to current schemas for - // meaningful comparison with RefreshState. - PrevRunState() *states.SyncState - - // InstanceExpander returns a helper object for tracking the expansion of - // graph nodes during the plan phase in response to "count" and "for_each" - // arguments. - // - // The InstanceExpander is a global object that is shared across all of the - // EvalContext objects for a given configuration. - InstanceExpander() *instances.Expander - - // MoveResults returns a map describing the results of handling any - // resource instance move statements prior to the graph walk, so that - // the graph walk can then record that information appropriately in other - // artifacts produced by the graph walk. - // - // This data structure is created prior to the graph walk and read-only - // thereafter, so callers must not modify the returned map or any other - // objects accessible through it. - MoveResults() refactoring.MoveResults - - // WithPath returns a copy of the context with the internal path set to the - // path argument. - WithPath(path addrs.ModuleInstance) EvalContext -} diff --git a/internal/terraform/eval_context_builtin.go b/internal/terraform/eval_context_builtin.go deleted file mode 100644 index d66b9dd08060..000000000000 --- a/internal/terraform/eval_context_builtin.go +++ /dev/null @@ -1,504 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/refactoring" - "github.com/hashicorp/terraform/version" - - "github.com/hashicorp/terraform/internal/states" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/tfdiags" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// BuiltinEvalContext is an EvalContext implementation that is used by -// Terraform by default. -type BuiltinEvalContext struct { - // StopContext is the context used to track whether we're complete - StopContext context.Context - - // PathValue is the Path that this context is operating within. - PathValue addrs.ModuleInstance - - // pathSet indicates that this context was explicitly created for a - // specific path, and can be safely used for evaluation. This lets us - // differentiate between PathValue being unset, and the zero value which is - // equivalent to RootModuleInstance. Path and Evaluation methods will - // panic if this is not set. - pathSet bool - - // Evaluator is used for evaluating expressions within the scope of this - // eval context. - Evaluator *Evaluator - - // VariableValues contains the variable values across all modules. This - // structure is shared across the entire containing context, and so it - // may be accessed only when holding VariableValuesLock. - // The keys of the first level of VariableValues are the string - // representations of addrs.ModuleInstance values. The second-level keys - // are variable names within each module instance. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - // Plugins is a library of plugin components (providers and provisioners) - // available for use during a graph walk. - Plugins *contextPlugins - - Hooks []Hook - InputValue UIInput - ProviderCache map[string]providers.Interface - ProviderInputConfig map[string]map[string]cty.Value - ProviderLock *sync.Mutex - ProvisionerCache map[string]provisioners.Interface - ProvisionerLock *sync.Mutex - ChangesValue *plans.ChangesSync - StateValue *states.SyncState - ChecksValue *checks.State - RefreshStateValue *states.SyncState - PrevRunStateValue *states.SyncState - InstanceExpanderValue *instances.Expander - MoveResultsValue refactoring.MoveResults -} - -// BuiltinEvalContext implements EvalContext -var _ EvalContext = (*BuiltinEvalContext)(nil) - -func (ctx *BuiltinEvalContext) WithPath(path addrs.ModuleInstance) EvalContext { - newCtx := *ctx - newCtx.pathSet = true - newCtx.PathValue = path - return &newCtx -} - -func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { - // This can happen during tests. During tests, we just block forever. - if ctx.StopContext == nil { - return nil - } - - return ctx.StopContext.Done() -} - -func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - for _, h := range ctx.Hooks { - action, err := fn(h) - if err != nil { - return err - } - - switch action { - case HookActionContinue: - continue - case HookActionHalt: - // Return an early exit error to trigger an early exit - log.Printf("[WARN] Early exit triggered by hook: %T", h) - return nil - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) Input() UIInput { - return ctx.InputValue -} - -func (ctx *BuiltinEvalContext) InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) { - // If we already initialized, it is an error - if p := ctx.Provider(addr); p != nil { - return nil, fmt.Errorf("%s is already initialized", addr) - } - - // Warning: make sure to acquire these locks AFTER the call to Provider - // above, since it also acquires locks. - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := addr.String() - - p, err := ctx.Plugins.NewProviderInstance(addr.Provider) - if err != nil { - return nil, err - } - - log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", addr.String(), addr) - ctx.ProviderCache[key] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - return ctx.ProviderCache[addr.String()] -} - -func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) (*ProviderSchema, error) { - return ctx.Plugins.ProviderSchema(addr.Provider) -} - -func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.AbsProviderConfig) error { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := addr.String() - provider := ctx.ProviderCache[key] - if provider != nil { - delete(ctx.ProviderCache, key) - return provider.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.AbsProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if !addr.Module.Equal(ctx.Path().Module()) { - // This indicates incorrect use of ConfigureProvider: it should be used - // only from the module that the provider configuration belongs to. - panic(fmt.Sprintf("%s configured by wrong module %s", addr, ctx.Path())) - } - - p := ctx.Provider(addr) - if p == nil { - diags = diags.Append(fmt.Errorf("%s not initialized", addr)) - return diags - } - - providerSchema, err := ctx.ProviderSchema(addr) - if err != nil { - diags = diags.Append(fmt.Errorf("failed to read schema for %s: %s", addr, err)) - return diags - } - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("schema for %s is not available", addr)) - return diags - } - - req := providers.ConfigureProviderRequest{ - TerraformVersion: version.String(), - Config: cfg, - } - - resp := p.ConfigureProvider(req) - return resp.Diagnostics -} - -func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.AbsProviderConfig) map[string]cty.Value { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - if !pc.Module.Equal(ctx.Path().Module()) { - // This indicates incorrect use of InitProvider: it should be used - // only from the module that the provider configuration belongs to. - panic(fmt.Sprintf("%s initialized by wrong module %s", pc, ctx.Path())) - } - - if !ctx.Path().IsRoot() { - // Only root module provider configurations can have input. - return nil - } - - return ctx.ProviderInputConfig[pc.String()] -} - -func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.AbsProviderConfig, c map[string]cty.Value) { - absProvider := pc - if !pc.Module.IsRoot() { - // Only root module provider configurations can have input. - log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") - return - } - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderInputConfig[absProvider.String()] = c - ctx.ProviderLock.Unlock() -} - -func (ctx *BuiltinEvalContext) Provisioner(n string) (provisioners.Interface, error) { - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - p, ok := ctx.ProvisionerCache[n] - if !ok { - var err error - p, err = ctx.Plugins.NewProvisionerInstance(n) - if err != nil { - return nil, err - } - - ctx.ProvisionerCache[n] = p - } - - return p, nil -} - -func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) (*configschema.Block, error) { - return ctx.Plugins.ProvisionerSchema(n) -} - -func (ctx *BuiltinEvalContext) CloseProvisioners() error { - var diags tfdiags.Diagnostics - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - for name, prov := range ctx.ProvisionerCache { - err := prov.Close() - if err != nil { - diags = diags.Append(fmt.Errorf("provisioner.Close %s: %s", name, err)) - } - } - - return diags.Err() -} - -func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - scope := ctx.EvaluationScope(self, keyData) - body, evalDiags := scope.ExpandBlock(body, schema) - diags = diags.Append(evalDiags) - val, evalDiags := scope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - return val, body, diags -} - -func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) - return scope.EvalExpr(expr, wantType) -} - -func (ctx *BuiltinEvalContext) EvaluateReplaceTriggeredBy(expr hcl.Expression, repData instances.RepetitionData) (*addrs.Reference, bool, tfdiags.Diagnostics) { - - // get the reference to lookup changes in the plan - ref, diags := evalReplaceTriggeredByExpr(expr, repData) - if diags.HasErrors() { - return nil, false, diags - } - - var changes []*plans.ResourceInstanceChangeSrc - // store the address once we get it for validation - var resourceAddr addrs.Resource - - // The reference is either a resource or resource instance - switch sub := ref.Subject.(type) { - case addrs.Resource: - resourceAddr = sub - rc := sub.Absolute(ctx.Path()) - changes = ctx.Changes().GetChangesForAbsResource(rc) - case addrs.ResourceInstance: - resourceAddr = sub.ContainingResource() - rc := sub.Absolute(ctx.Path()) - change := ctx.Changes().GetResourceInstanceChange(rc, states.CurrentGen) - if change != nil { - // we'll generate an error below if there was no change - changes = append(changes, change) - } - } - - // Do some validation to make sure we are expecting a change at all - cfg := ctx.Evaluator.Config.Descendent(ctx.Path().Module()) - resCfg := cfg.Module.ResourceByAddr(resourceAddr) - if resCfg == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A resource %s has not been declared in %s`, ref.Subject, moduleDisplayAddr(ctx.Path())), - Subject: expr.Range().Ptr(), - }) - return nil, false, diags - } - - if len(changes) == 0 { - // If the resource is valid there should always be at least one change. - diags = diags.Append(fmt.Errorf("no change found for %s in %s", ref.Subject, moduleDisplayAddr(ctx.Path()))) - return nil, false, diags - } - - // If we don't have a traversal beyond the resource, then we can just look - // for any change. - if len(ref.Remaining) == 0 { - for _, c := range changes { - switch c.ChangeSrc.Action { - // Only immediate changes to the resource will trigger replacement. - case plans.Update, plans.DeleteThenCreate, plans.CreateThenDelete: - return ref, true, diags - } - } - - // no change triggered - return nil, false, diags - } - - // This must be an instances to have a remaining traversal, which means a - // single change. - change := changes[0] - - // Make sure the change is actionable. A create or delete action will have - // a change in value, but are not valid for our purposes here. - switch change.ChangeSrc.Action { - case plans.Update, plans.DeleteThenCreate, plans.CreateThenDelete: - // OK - default: - return nil, false, diags - } - - // Since we have a traversal after the resource reference, we will need to - // decode the changes, which means we need a schema. - providerAddr := change.ProviderAddr - schema, err := ctx.ProviderSchema(providerAddr) - if err != nil { - diags = diags.Append(err) - return nil, false, diags - } - - resAddr := change.Addr.ContainingResource().Resource - resSchema, _ := schema.SchemaForResourceType(resAddr.Mode, resAddr.Type) - ty := resSchema.ImpliedType() - - before, err := change.ChangeSrc.Before.Decode(ty) - if err != nil { - diags = diags.Append(err) - return nil, false, diags - } - - after, err := change.ChangeSrc.After.Decode(ty) - if err != nil { - diags = diags.Append(err) - return nil, false, diags - } - - path := traversalToPath(ref.Remaining) - attrBefore, _ := path.Apply(before) - attrAfter, _ := path.Apply(after) - - if attrBefore == cty.NilVal || attrAfter == cty.NilVal { - replace := attrBefore != attrAfter - return ref, replace, diags - } - - replace := !attrBefore.RawEquals(attrAfter) - - return ref, replace, diags -} - -func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData instances.RepetitionData) *lang.Scope { - if !ctx.pathSet { - panic("context path not set") - } - data := &evaluationStateData{ - Evaluator: ctx.Evaluator, - ModulePath: ctx.PathValue, - InstanceKeyData: keyData, - Operation: ctx.Evaluator.Operation, - } - scope := ctx.Evaluator.Scope(data, self) - - // ctx.PathValue is the path of the module that contains whatever - // expression the caller will be trying to evaluate, so this will - // activate only the experiments from that particular module, to - // be consistent with how experiment checking in the "configs" - // package itself works. The nil check here is for robustness in - // incompletely-mocked testing situations; mc should never be nil in - // real situations. - if mc := ctx.Evaluator.Config.DescendentForInstance(ctx.PathValue); mc != nil { - scope.SetActiveExperiments(mc.Module.ActiveExperiments) - } - return scope -} - -func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { - if !ctx.pathSet { - panic("context path not set") - } - return ctx.PathValue -} - -func (ctx *BuiltinEvalContext) SetRootModuleArgument(addr addrs.InputVariable, v cty.Value) { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - log.Printf("[TRACE] BuiltinEvalContext: Storing final value for variable %s", addr.Absolute(addrs.RootModuleInstance)) - key := addrs.RootModuleInstance.String() - args := ctx.VariableValues[key] - if args == nil { - args = make(map[string]cty.Value) - ctx.VariableValues[key] = args - } - args[addr.Name] = v -} - -func (ctx *BuiltinEvalContext) SetModuleCallArgument(callAddr addrs.ModuleCallInstance, varAddr addrs.InputVariable, v cty.Value) { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - if !ctx.pathSet { - panic("context path not set") - } - - childPath := callAddr.ModuleInstance(ctx.PathValue) - log.Printf("[TRACE] BuiltinEvalContext: Storing final value for variable %s", varAddr.Absolute(childPath)) - key := childPath.String() - args := ctx.VariableValues[key] - if args == nil { - args = make(map[string]cty.Value) - ctx.VariableValues[key] = args - } - args[varAddr.Name] = v -} - -func (ctx *BuiltinEvalContext) GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - modKey := addr.Module.String() - modVars := ctx.VariableValues[modKey] - val, ok := modVars[addr.Variable.Name] - if !ok { - return cty.DynamicVal - } - return val -} - -func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { - return ctx.ChangesValue -} - -func (ctx *BuiltinEvalContext) State() *states.SyncState { - return ctx.StateValue -} - -func (ctx *BuiltinEvalContext) Checks() *checks.State { - return ctx.ChecksValue -} - -func (ctx *BuiltinEvalContext) RefreshState() *states.SyncState { - return ctx.RefreshStateValue -} - -func (ctx *BuiltinEvalContext) PrevRunState() *states.SyncState { - return ctx.PrevRunStateValue -} - -func (ctx *BuiltinEvalContext) InstanceExpander() *instances.Expander { - return ctx.InstanceExpanderValue -} - -func (ctx *BuiltinEvalContext) MoveResults() refactoring.MoveResults { - return ctx.MoveResultsValue -} diff --git a/internal/terraform/eval_context_builtin_test.go b/internal/terraform/eval_context_builtin_test.go deleted file mode 100644 index 0db0096a75ed..000000000000 --- a/internal/terraform/eval_context_builtin_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package terraform - -import ( - "reflect" - "sync" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/providers" - "github.com/zclconf/go-cty/cty" -) - -func TestBuiltinEvalContextProviderInput(t *testing.T) { - var lock sync.Mutex - cache := make(map[string]map[string]cty.Value) - - ctx1 := testBuiltinEvalContext(t) - ctx1 = ctx1.WithPath(addrs.RootModuleInstance).(*BuiltinEvalContext) - ctx1.ProviderInputConfig = cache - ctx1.ProviderLock = &lock - - ctx2 := testBuiltinEvalContext(t) - ctx2 = ctx2.WithPath(addrs.RootModuleInstance.Child("child", addrs.NoKey)).(*BuiltinEvalContext) - ctx2.ProviderInputConfig = cache - ctx2.ProviderLock = &lock - - providerAddr1 := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("foo"), - } - providerAddr2 := addrs.AbsProviderConfig{ - Module: addrs.RootModule.Child("child"), - Provider: addrs.NewDefaultProvider("foo"), - } - - expected1 := map[string]cty.Value{"value": cty.StringVal("foo")} - ctx1.SetProviderInput(providerAddr1, expected1) - - try2 := map[string]cty.Value{"value": cty.StringVal("bar")} - ctx2.SetProviderInput(providerAddr2, try2) // ignored because not a root module - - actual1 := ctx1.ProviderInput(providerAddr1) - actual2 := ctx2.ProviderInput(providerAddr2) - - if !reflect.DeepEqual(actual1, expected1) { - t.Errorf("wrong result 1\ngot: %#v\nwant: %#v", actual1, expected1) - } - if actual2 != nil { - t.Errorf("wrong result 2\ngot: %#v\nwant: %#v", actual2, nil) - } -} - -func TestBuildingEvalContextInitProvider(t *testing.T) { - var lock sync.Mutex - - testP := &MockProvider{} - - ctx := testBuiltinEvalContext(t) - ctx = ctx.WithPath(addrs.RootModuleInstance).(*BuiltinEvalContext) - ctx.ProviderLock = &lock - ctx.ProviderCache = make(map[string]providers.Interface) - ctx.Plugins = newContextPlugins(map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): providers.FactoryFixed(testP), - }, nil) - - providerAddrDefault := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("test"), - } - providerAddrAlias := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("test"), - Alias: "foo", - } - - _, err := ctx.InitProvider(providerAddrDefault) - if err != nil { - t.Fatalf("error initializing provider test: %s", err) - } - _, err = ctx.InitProvider(providerAddrAlias) - if err != nil { - t.Fatalf("error initializing provider test.foo: %s", err) - } -} - -func testBuiltinEvalContext(t *testing.T) *BuiltinEvalContext { - return &BuiltinEvalContext{} -} diff --git a/internal/terraform/eval_context_mock.go b/internal/terraform/eval_context_mock.go deleted file mode 100644 index 24159ef95502..000000000000 --- a/internal/terraform/eval_context_mock.go +++ /dev/null @@ -1,401 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/refactoring" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// MockEvalContext is a mock version of EvalContext that can be used -// for tests. -type MockEvalContext struct { - StoppedCalled bool - StoppedValue <-chan struct{} - - HookCalled bool - HookHook Hook - HookError error - - InputCalled bool - InputInput UIInput - - InitProviderCalled bool - InitProviderType string - InitProviderAddr addrs.AbsProviderConfig - InitProviderProvider providers.Interface - InitProviderError error - - ProviderCalled bool - ProviderAddr addrs.AbsProviderConfig - ProviderProvider providers.Interface - - ProviderSchemaCalled bool - ProviderSchemaAddr addrs.AbsProviderConfig - ProviderSchemaSchema *ProviderSchema - ProviderSchemaError error - - CloseProviderCalled bool - CloseProviderAddr addrs.AbsProviderConfig - CloseProviderProvider providers.Interface - - ProviderInputCalled bool - ProviderInputAddr addrs.AbsProviderConfig - ProviderInputValues map[string]cty.Value - - SetProviderInputCalled bool - SetProviderInputAddr addrs.AbsProviderConfig - SetProviderInputValues map[string]cty.Value - - ConfigureProviderFn func( - addr addrs.AbsProviderConfig, - cfg cty.Value) tfdiags.Diagnostics // overrides the other values below, if set - ConfigureProviderCalled bool - ConfigureProviderAddr addrs.AbsProviderConfig - ConfigureProviderConfig cty.Value - ConfigureProviderDiags tfdiags.Diagnostics - - ProvisionerCalled bool - ProvisionerName string - ProvisionerProvisioner provisioners.Interface - - ProvisionerSchemaCalled bool - ProvisionerSchemaName string - ProvisionerSchemaSchema *configschema.Block - ProvisionerSchemaError error - - CloseProvisionersCalled bool - - EvaluateBlockCalled bool - EvaluateBlockBody hcl.Body - EvaluateBlockSchema *configschema.Block - EvaluateBlockSelf addrs.Referenceable - EvaluateBlockKeyData InstanceKeyEvalData - EvaluateBlockResultFunc func( - body hcl.Body, - schema *configschema.Block, - self addrs.Referenceable, - keyData InstanceKeyEvalData, - ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateBlockResult cty.Value - EvaluateBlockExpandedBody hcl.Body - EvaluateBlockDiags tfdiags.Diagnostics - - EvaluateExprCalled bool - EvaluateExprExpr hcl.Expression - EvaluateExprWantType cty.Type - EvaluateExprSelf addrs.Referenceable - EvaluateExprResultFunc func( - expr hcl.Expression, - wantType cty.Type, - self addrs.Referenceable, - ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateExprResult cty.Value - EvaluateExprDiags tfdiags.Diagnostics - - EvaluationScopeCalled bool - EvaluationScopeSelf addrs.Referenceable - EvaluationScopeKeyData InstanceKeyEvalData - EvaluationScopeScope *lang.Scope - - PathCalled bool - PathPath addrs.ModuleInstance - - SetRootModuleArgumentCalled bool - SetRootModuleArgumentAddr addrs.InputVariable - SetRootModuleArgumentValue cty.Value - SetRootModuleArgumentFunc func(addr addrs.InputVariable, v cty.Value) - - SetModuleCallArgumentCalled bool - SetModuleCallArgumentModuleCall addrs.ModuleCallInstance - SetModuleCallArgumentVariable addrs.InputVariable - SetModuleCallArgumentValue cty.Value - SetModuleCallArgumentFunc func(callAddr addrs.ModuleCallInstance, varAddr addrs.InputVariable, v cty.Value) - - GetVariableValueCalled bool - GetVariableValueAddr addrs.AbsInputVariableInstance - GetVariableValueValue cty.Value - GetVariableValueFunc func(addr addrs.AbsInputVariableInstance) cty.Value // supersedes GetVariableValueValue - - ChangesCalled bool - ChangesChanges *plans.ChangesSync - - StateCalled bool - StateState *states.SyncState - - ChecksCalled bool - ChecksState *checks.State - - RefreshStateCalled bool - RefreshStateState *states.SyncState - - PrevRunStateCalled bool - PrevRunStateState *states.SyncState - - MoveResultsCalled bool - MoveResultsResults refactoring.MoveResults - - InstanceExpanderCalled bool - InstanceExpanderExpander *instances.Expander -} - -// MockEvalContext implements EvalContext -var _ EvalContext = (*MockEvalContext)(nil) - -func (c *MockEvalContext) Stopped() <-chan struct{} { - c.StoppedCalled = true - return c.StoppedValue -} - -func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - c.HookCalled = true - if c.HookHook != nil { - if _, err := fn(c.HookHook); err != nil { - return err - } - } - - return c.HookError -} - -func (c *MockEvalContext) Input() UIInput { - c.InputCalled = true - return c.InputInput -} - -func (c *MockEvalContext) InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) { - c.InitProviderCalled = true - c.InitProviderType = addr.String() - c.InitProviderAddr = addr - return c.InitProviderProvider, c.InitProviderError -} - -func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - c.ProviderCalled = true - c.ProviderAddr = addr - return c.ProviderProvider -} - -func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) (*ProviderSchema, error) { - c.ProviderSchemaCalled = true - c.ProviderSchemaAddr = addr - return c.ProviderSchemaSchema, c.ProviderSchemaError -} - -func (c *MockEvalContext) CloseProvider(addr addrs.AbsProviderConfig) error { - c.CloseProviderCalled = true - c.CloseProviderAddr = addr - return nil -} - -func (c *MockEvalContext) ConfigureProvider(addr addrs.AbsProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - - c.ConfigureProviderCalled = true - c.ConfigureProviderAddr = addr - c.ConfigureProviderConfig = cfg - if c.ConfigureProviderFn != nil { - return c.ConfigureProviderFn(addr, cfg) - } - return c.ConfigureProviderDiags -} - -func (c *MockEvalContext) ProviderInput(addr addrs.AbsProviderConfig) map[string]cty.Value { - c.ProviderInputCalled = true - c.ProviderInputAddr = addr - return c.ProviderInputValues -} - -func (c *MockEvalContext) SetProviderInput(addr addrs.AbsProviderConfig, vals map[string]cty.Value) { - c.SetProviderInputCalled = true - c.SetProviderInputAddr = addr - c.SetProviderInputValues = vals -} - -func (c *MockEvalContext) Provisioner(n string) (provisioners.Interface, error) { - c.ProvisionerCalled = true - c.ProvisionerName = n - return c.ProvisionerProvisioner, nil -} - -func (c *MockEvalContext) ProvisionerSchema(n string) (*configschema.Block, error) { - c.ProvisionerSchemaCalled = true - c.ProvisionerSchemaName = n - return c.ProvisionerSchemaSchema, c.ProvisionerSchemaError -} - -func (c *MockEvalContext) CloseProvisioners() error { - c.CloseProvisionersCalled = true - return nil -} - -func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - c.EvaluateBlockCalled = true - c.EvaluateBlockBody = body - c.EvaluateBlockSchema = schema - c.EvaluateBlockSelf = self - c.EvaluateBlockKeyData = keyData - if c.EvaluateBlockResultFunc != nil { - return c.EvaluateBlockResultFunc(body, schema, self, keyData) - } - return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags -} - -func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - c.EvaluateExprCalled = true - c.EvaluateExprExpr = expr - c.EvaluateExprWantType = wantType - c.EvaluateExprSelf = self - if c.EvaluateExprResultFunc != nil { - return c.EvaluateExprResultFunc(expr, wantType, self) - } - return c.EvaluateExprResult, c.EvaluateExprDiags -} - -func (c *MockEvalContext) EvaluateReplaceTriggeredBy(hcl.Expression, instances.RepetitionData) (*addrs.Reference, bool, tfdiags.Diagnostics) { - return nil, false, nil -} - -// installSimpleEval is a helper to install a simple mock implementation of -// both EvaluateBlock and EvaluateExpr into the receiver. -// -// These default implementations will either evaluate the given input against -// the scope in field EvaluationScopeScope or, if it is nil, with no eval -// context at all so that only constant values may be used. -// -// This function overwrites any existing functions installed in fields -// EvaluateBlockResultFunc and EvaluateExprResultFunc. -func (c *MockEvalContext) installSimpleEval() { - c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - var diags tfdiags.Diagnostics - body, diags = scope.ExpandBlock(body, schema) - if diags.HasErrors() { - return cty.DynamicVal, body, diags - } - val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return cty.DynamicVal, body, diags - } - return val, body, diags - } - - // Fallback codepath supporting constant values only. - val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) - return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) - } - c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - return scope.EvalExpr(expr, wantType) - } - - // Fallback codepath supporting constant values only. - var diags tfdiags.Diagnostics - val, hclDiags := expr.Value(nil) - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return cty.DynamicVal, diags - } - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - diags = diags.Append(err) - return cty.DynamicVal, diags - } - return val, diags - } -} - -func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - c.EvaluationScopeCalled = true - c.EvaluationScopeSelf = self - c.EvaluationScopeKeyData = keyData - return c.EvaluationScopeScope -} - -func (c *MockEvalContext) WithPath(path addrs.ModuleInstance) EvalContext { - newC := *c - newC.PathPath = path - return &newC -} - -func (c *MockEvalContext) Path() addrs.ModuleInstance { - c.PathCalled = true - return c.PathPath -} - -func (c *MockEvalContext) SetRootModuleArgument(addr addrs.InputVariable, v cty.Value) { - c.SetRootModuleArgumentCalled = true - c.SetRootModuleArgumentAddr = addr - c.SetRootModuleArgumentValue = v - if c.SetRootModuleArgumentFunc != nil { - c.SetRootModuleArgumentFunc(addr, v) - } -} - -func (c *MockEvalContext) SetModuleCallArgument(callAddr addrs.ModuleCallInstance, varAddr addrs.InputVariable, v cty.Value) { - c.SetModuleCallArgumentCalled = true - c.SetModuleCallArgumentModuleCall = callAddr - c.SetModuleCallArgumentVariable = varAddr - c.SetModuleCallArgumentValue = v - if c.SetModuleCallArgumentFunc != nil { - c.SetModuleCallArgumentFunc(callAddr, varAddr, v) - } -} - -func (c *MockEvalContext) GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value { - c.GetVariableValueCalled = true - c.GetVariableValueAddr = addr - if c.GetVariableValueFunc != nil { - return c.GetVariableValueFunc(addr) - } - return c.GetVariableValueValue -} - -func (c *MockEvalContext) Changes() *plans.ChangesSync { - c.ChangesCalled = true - return c.ChangesChanges -} - -func (c *MockEvalContext) State() *states.SyncState { - c.StateCalled = true - return c.StateState -} - -func (c *MockEvalContext) Checks() *checks.State { - c.ChecksCalled = true - return c.ChecksState -} - -func (c *MockEvalContext) RefreshState() *states.SyncState { - c.RefreshStateCalled = true - return c.RefreshStateState -} - -func (c *MockEvalContext) PrevRunState() *states.SyncState { - c.PrevRunStateCalled = true - return c.PrevRunStateState -} - -func (c *MockEvalContext) MoveResults() refactoring.MoveResults { - c.MoveResultsCalled = true - return c.MoveResultsResults -} - -func (c *MockEvalContext) InstanceExpander() *instances.Expander { - c.InstanceExpanderCalled = true - return c.InstanceExpanderExpander -} diff --git a/internal/terraform/eval_count.go b/internal/terraform/eval_count.go deleted file mode 100644 index d4ab1a998f60..000000000000 --- a/internal/terraform/eval_count.go +++ /dev/null @@ -1,107 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// evaluateCountExpression is our standard mechanism for interpreting an -// expression given for a "count" argument on a resource or a module. This -// should be called during expansion in order to determine the final count -// value. -// -// evaluateCountExpression differs from evaluateCountExpressionValue by -// returning an error if the count value is not known, and converting the -// cty.Value to an integer. -func evaluateCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { - countVal, diags := evaluateCountExpressionValue(expr, ctx) - if !countVal.IsKnown() { - // Currently this is a rather bad outcome from a UX standpoint, since we have - // no real mechanism to deal with this situation and all we can do is produce - // an error message. - // FIXME: In future, implement a built-in mechanism for deferring changes that - // can't yet be predicted, and use it to guide the user through several - // plan/apply steps until the desired configuration is eventually reached. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, - Subject: expr.Range().Ptr(), - - // TODO: Also populate Expression and EvalContext in here, but - // we can't easily do that right now because the hcl.EvalContext - // (which is not the same as the ctx we have in scope here) is - // hidden away inside evaluateCountExpressionValue. - Extra: diagnosticCausedByUnknown(true), - }) - } - - if countVal.IsNull() || !countVal.IsKnown() { - return -1, diags - } - - count, _ := countVal.AsBigFloat().Int64() - return int(count), diags -} - -// evaluateCountExpressionValue is like evaluateCountExpression -// except that it returns a cty.Value which must be a cty.Number and can be -// unknown. -func evaluateCountExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - nullCount := cty.NullVal(cty.Number) - if expr == nil { - return nullCount, nil - } - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return nullCount, diags - } - - // Unmark the count value, sensitive values are allowed in count but not for_each, - // as using it here will not disclose the sensitive value - countVal, _ = countVal.Unmark() - - switch { - case countVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return nullCount, diags - - case !countVal.IsKnown(): - return cty.UnknownVal(cty.Number), diags - } - - var count int - err := gocty.FromCtyValue(countVal, &count) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return nullCount, diags - } - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: must be greater than or equal to zero.`, - Subject: expr.Range().Ptr(), - }) - return nullCount, diags - } - - return countVal, diags -} diff --git a/internal/terraform/eval_count_test.go b/internal/terraform/eval_count_test.go deleted file mode 100644 index 8d3a51b48813..000000000000 --- a/internal/terraform/eval_count_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package terraform - -import ( - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/zclconf/go-cty/cty" -) - -func TestEvaluateCountExpression(t *testing.T) { - tests := map[string]struct { - Expr hcl.Expression - Count int - }{ - "zero": { - hcltest.MockExprLiteral(cty.NumberIntVal(0)), - 0, - }, - "expression with marked value": { - hcltest.MockExprLiteral(cty.NumberIntVal(8).Mark(marks.Sensitive)), - 8, - }, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - ctx := &MockEvalContext{} - ctx.installSimpleEval() - countVal, diags := evaluateCountExpression(test.Expr, ctx) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - - if !reflect.DeepEqual(countVal, test.Count) { - t.Errorf( - "wrong map value\ngot: %swant: %s", - spew.Sdump(countVal), spew.Sdump(test.Count), - ) - } - }) - } -} diff --git a/internal/terraform/eval_for_each.go b/internal/terraform/eval_for_each.go deleted file mode 100644 index 3c80ebff01d3..000000000000 --- a/internal/terraform/eval_for_each.go +++ /dev/null @@ -1,193 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// evaluateForEachExpression is our standard mechanism for interpreting an -// expression given for a "for_each" argument on a resource or a module. This -// should be called during expansion in order to determine the final keys and -// values. -// -// evaluateForEachExpression differs from evaluateForEachExpressionValue by -// returning an error if the count value is not known, and converting the -// cty.Value to a map[string]cty.Value for compatibility with other calls. -func evaluateForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { - forEachVal, diags := evaluateForEachExpressionValue(expr, ctx, false) - // forEachVal might be unknown, but if it is then there should already - // be an error about it in diags, which we'll return below. - - if forEachVal.IsNull() || !forEachVal.IsKnown() || markSafeLengthInt(forEachVal) == 0 { - // we check length, because an empty set return a nil map - return map[string]cty.Value{}, diags - } - - return forEachVal.AsValueMap(), diags -} - -// evaluateForEachExpressionValue is like evaluateForEachExpression -// except that it returns a cty.Value map or set which can be unknown. -func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext, allowUnknown bool) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - nullMap := cty.NullVal(cty.Map(cty.DynamicPseudoType)) - - if expr == nil { - return nullMap, diags - } - - refs, moreDiags := lang.ReferencesInExpr(expr) - diags = diags.Append(moreDiags) - scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) - var hclCtx *hcl.EvalContext - if scope != nil { - hclCtx, moreDiags = scope.EvalContext(refs) - } else { - // This shouldn't happen in real code, but it can unfortunately arise - // in unit tests due to incompletely-implemented mocks. :( - hclCtx = &hcl.EvalContext{} - } - diags = diags.Append(moreDiags) - if diags.HasErrors() { // Can't continue if we don't even have a valid scope - return nullMap, diags - } - - forEachVal, forEachDiags := expr.Value(hclCtx) - diags = diags.Append(forEachDiags) - - // If a whole map is marked, or a set contains marked values (which means the set is then marked) - // give an error diagnostic as this value cannot be used in for_each - if forEachVal.HasMark(marks.Sensitive) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - Extra: diagnosticCausedBySensitive(true), - }) - } - - if diags.HasErrors() { - return nullMap, diags - } - ty := forEachVal.Type() - - const errInvalidUnknownDetailMap = "The \"for_each\" map includes keys derived from resource attributes that cannot be determined until apply, and so Terraform cannot determine the full set of keys that will identify the instances of this resource.\n\nWhen working with unknown values in for_each, it's better to define the map keys statically in your configuration and place apply-time results only in the map values.\n\nAlternatively, you could use the -target planning option to first apply only the resources that the for_each value depends on, and then apply a second time to fully converge." - const errInvalidUnknownDetailSet = "The \"for_each\" set includes values derived from resource attributes that cannot be determined until apply, and so Terraform cannot determine the full set of keys that will identify the instances of this resource.\n\nWhen working with unknown values in for_each, it's better to use a map value where the keys are defined statically in your configuration and where only the values contain apply-time results.\n\nAlternatively, you could use the -target planning option to first apply only the resources that the for_each value depends on, and then apply a second time to fully converge." - - switch { - case forEachVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - }) - return nullMap, diags - case !forEachVal.IsKnown(): - if !allowUnknown { - var detailMsg string - switch { - case ty.IsSetType(): - detailMsg = errInvalidUnknownDetailSet - default: - detailMsg = errInvalidUnknownDetailMap - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: detailMsg, - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - Extra: diagnosticCausedByUnknown(true), - }) - } - // ensure that we have a map, and not a DynamicValue - return cty.UnknownVal(cty.Map(cty.DynamicPseudoType)), diags - - case !(ty.IsMapType() || ty.IsSetType() || ty.IsObjectType()): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, ty.FriendlyName()), - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - }) - return nullMap, diags - - case markSafeLengthInt(forEachVal) == 0: - // If the map is empty ({}), return an empty map, because cty will - // return nil when representing {} AsValueMap. This also covers an empty - // set (toset([])) - return forEachVal, diags - } - - if ty.IsSetType() { - // since we can't use a set values that are unknown, we treat the - // entire set as unknown - if !forEachVal.IsWhollyKnown() { - if !allowUnknown { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: errInvalidUnknownDetailSet, - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - Extra: diagnosticCausedByUnknown(true), - }) - } - return cty.UnknownVal(ty), diags - } - - if ty.ElementType() != cty.String { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - }) - return cty.NullVal(ty), diags - } - - // A set of strings may contain null, which makes it impossible to - // convert to a map, so we must return an error - it := forEachVal.ElementIterator() - for it.Next() { - item, _ := it.Element() - if item.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: `The given "for_each" argument value is unsuitable: "for_each" sets must not contain null values.`, - Subject: expr.Range().Ptr(), - Expression: expr, - EvalContext: hclCtx, - }) - return cty.NullVal(ty), diags - } - } - } - - return forEachVal, nil -} - -// markSafeLengthInt allows calling LengthInt on marked values safely -func markSafeLengthInt(val cty.Value) int { - v, _ := val.UnmarkDeep() - return v.LengthInt() -} diff --git a/internal/terraform/eval_for_each_test.go b/internal/terraform/eval_for_each_test.go deleted file mode 100644 index 05dba9cabc46..000000000000 --- a/internal/terraform/eval_for_each_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package terraform - -import ( - "reflect" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -func TestEvaluateForEachExpression_valid(t *testing.T) { - tests := map[string]struct { - Expr hcl.Expression - ForEachMap map[string]cty.Value - }{ - "empty set": { - hcltest.MockExprLiteral(cty.SetValEmpty(cty.String)), - map[string]cty.Value{}, - }, - "multi-value string set": { - hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")})), - map[string]cty.Value{ - "a": cty.StringVal("a"), - "b": cty.StringVal("b"), - }, - }, - "empty map": { - hcltest.MockExprLiteral(cty.MapValEmpty(cty.Bool)), - map[string]cty.Value{}, - }, - "map": { - hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ - "a": cty.BoolVal(true), - "b": cty.BoolVal(false), - })), - map[string]cty.Value{ - "a": cty.BoolVal(true), - "b": cty.BoolVal(false), - }, - }, - "map containing unknown values": { - hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ - "a": cty.UnknownVal(cty.Bool), - "b": cty.UnknownVal(cty.Bool), - })), - map[string]cty.Value{ - "a": cty.UnknownVal(cty.Bool), - "b": cty.UnknownVal(cty.Bool), - }, - }, - "map containing sensitive values, but strings are literal": { - hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ - "a": cty.BoolVal(true).Mark(marks.Sensitive), - "b": cty.BoolVal(false), - })), - map[string]cty.Value{ - "a": cty.BoolVal(true).Mark(marks.Sensitive), - "b": cty.BoolVal(false), - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - ctx := &MockEvalContext{} - ctx.installSimpleEval() - forEachMap, diags := evaluateForEachExpression(test.Expr, ctx) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - - if !reflect.DeepEqual(forEachMap, test.ForEachMap) { - t.Errorf( - "wrong map value\ngot: %swant: %s", - spew.Sdump(forEachMap), spew.Sdump(test.ForEachMap), - ) - } - - }) - } -} - -func TestEvaluateForEachExpression_errors(t *testing.T) { - tests := map[string]struct { - Expr hcl.Expression - Summary, DetailSubstring string - CausedByUnknown, CausedBySensitive bool - }{ - "null set": { - hcltest.MockExprLiteral(cty.NullVal(cty.Set(cty.String))), - "Invalid for_each argument", - `the given "for_each" argument value is null`, - false, false, - }, - "string": { - hcltest.MockExprLiteral(cty.StringVal("i am definitely a set")), - "Invalid for_each argument", - "must be a map, or set of strings, and you have provided a value of type string", - false, false, - }, - "list": { - hcltest.MockExprLiteral(cty.ListVal([]cty.Value{cty.StringVal("a"), cty.StringVal("a")})), - "Invalid for_each argument", - "must be a map, or set of strings, and you have provided a value of type list", - false, false, - }, - "tuple": { - hcltest.MockExprLiteral(cty.TupleVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")})), - "Invalid for_each argument", - "must be a map, or set of strings, and you have provided a value of type tuple", - false, false, - }, - "unknown string set": { - hcltest.MockExprLiteral(cty.UnknownVal(cty.Set(cty.String))), - "Invalid for_each argument", - "set includes values derived from resource attributes that cannot be determined until apply", - true, false, - }, - "unknown map": { - hcltest.MockExprLiteral(cty.UnknownVal(cty.Map(cty.Bool))), - "Invalid for_each argument", - "map includes keys derived from resource attributes that cannot be determined until apply", - true, false, - }, - "marked map": { - hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ - "a": cty.BoolVal(true), - "b": cty.BoolVal(false), - }).Mark(marks.Sensitive)), - "Invalid for_each argument", - "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", - false, true, - }, - "set containing booleans": { - hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.BoolVal(true)})), - "Invalid for_each set argument", - "supports maps and sets of strings, but you have provided a set containing type bool", - false, false, - }, - "set containing null": { - hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.NullVal(cty.String)})), - "Invalid for_each set argument", - "must not contain null values", - false, false, - }, - "set containing unknown value": { - hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.UnknownVal(cty.String)})), - "Invalid for_each argument", - "set includes values derived from resource attributes that cannot be determined until apply", - true, false, - }, - "set containing dynamic unknown value": { - hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.UnknownVal(cty.DynamicPseudoType)})), - "Invalid for_each argument", - "set includes values derived from resource attributes that cannot be determined until apply", - true, false, - }, - "set containing marked values": { - hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.StringVal("beep").Mark(marks.Sensitive), cty.StringVal("boop")})), - "Invalid for_each argument", - "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", - false, true, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - ctx := &MockEvalContext{} - ctx.installSimpleEval() - _, diags := evaluateForEachExpression(test.Expr, ctx) - - if len(diags) != 1 { - t.Fatalf("got %d diagnostics; want 1", diags) - } - if got, want := diags[0].Severity(), tfdiags.Error; got != want { - t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) - } - if got, want := diags[0].Description().Summary, test.Summary; got != want { - t.Errorf("wrong diagnostic summary\ngot: %s\nwant: %s", got, want) - } - if got, want := diags[0].Description().Detail, test.DetailSubstring; !strings.Contains(got, want) { - t.Errorf("wrong diagnostic detail\ngot: %s\nwant substring: %s", got, want) - } - if fromExpr := diags[0].FromExpr(); fromExpr != nil { - if fromExpr.Expression == nil { - t.Errorf("diagnostic does not refer to an expression") - } - if fromExpr.EvalContext == nil { - t.Errorf("diagnostic does not refer to an EvalContext") - } - } else { - t.Errorf("diagnostic does not support FromExpr\ngot: %s", spew.Sdump(diags[0])) - } - - if got, want := tfdiags.DiagnosticCausedByUnknown(diags[0]), test.CausedByUnknown; got != want { - t.Errorf("wrong result from tfdiags.DiagnosticCausedByUnknown\ngot: %#v\nwant: %#v", got, want) - } - if got, want := tfdiags.DiagnosticCausedBySensitive(diags[0]), test.CausedBySensitive; got != want { - t.Errorf("wrong result from tfdiags.DiagnosticCausedBySensitive\ngot: %#v\nwant: %#v", got, want) - } - }) - } -} - -func TestEvaluateForEachExpressionKnown(t *testing.T) { - tests := map[string]hcl.Expression{ - "unknown string set": hcltest.MockExprLiteral(cty.UnknownVal(cty.Set(cty.String))), - "unknown map": hcltest.MockExprLiteral(cty.UnknownVal(cty.Map(cty.Bool))), - } - - for name, expr := range tests { - t.Run(name, func(t *testing.T) { - ctx := &MockEvalContext{} - ctx.installSimpleEval() - forEachVal, diags := evaluateForEachExpressionValue(expr, ctx, true) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - - if forEachVal.IsKnown() { - t.Error("got known, want unknown") - } - }) - } -} diff --git a/internal/terraform/eval_provider.go b/internal/terraform/eval_provider.go deleted file mode 100644 index a97f347e404f..000000000000 --- a/internal/terraform/eval_provider.go +++ /dev/null @@ -1,59 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/providers" -) - -func buildProviderConfig(ctx EvalContext, addr addrs.AbsProviderConfig, config *configs.Provider) hcl.Body { - var configBody hcl.Body - if config != nil { - configBody = config.Config - } - - var inputBody hcl.Body - inputConfig := ctx.ProviderInput(addr) - if len(inputConfig) > 0 { - inputBody = configs.SynthBody("", inputConfig) - } - - switch { - case configBody != nil && inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) - return hcl.MergeBodies([]hcl.Body{inputBody, configBody}) - case configBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) - return configBody - case inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) - return inputBody - default: - log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) - return hcl.EmptyBody() - } -} - -// getProvider returns the providers.Interface and schema for a given provider. -func getProvider(ctx EvalContext, addr addrs.AbsProviderConfig) (providers.Interface, *ProviderSchema, error) { - if addr.Provider.Type == "" { - // Should never happen - panic("GetProvider used with uninitialized provider configuration address") - } - provider := ctx.Provider(addr) - if provider == nil { - return nil, &ProviderSchema{}, fmt.Errorf("provider %s not initialized", addr) - } - // Not all callers require a schema, so we will leave checking for a nil - // schema to the callers. - schema, err := ctx.ProviderSchema(addr) - if err != nil { - return nil, &ProviderSchema{}, fmt.Errorf("failed to read schema for provider %s: %w", addr, err) - } - return provider, schema, nil -} diff --git a/internal/terraform/eval_provider_test.go b/internal/terraform/eval_provider_test.go deleted file mode 100644 index 0a1aeca703e9..000000000000 --- a/internal/terraform/eval_provider_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" -) - -func TestBuildProviderConfig(t *testing.T) { - configBody := configs.SynthBody("", map[string]cty.Value{ - "set_in_config": cty.StringVal("config"), - }) - providerAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("foo"), - } - - ctx := &MockEvalContext{ - // The input values map is expected to contain only keys that aren't - // already present in the config, since we skip prompting for - // attributes that are already set. - ProviderInputValues: map[string]cty.Value{ - "set_by_input": cty.StringVal("input"), - }, - } - gotBody := buildProviderConfig(ctx, providerAddr, &configs.Provider{ - Name: "foo", - Config: configBody, - }) - - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "set_in_config": {Type: cty.String, Optional: true}, - "set_by_input": {Type: cty.String, Optional: true}, - }, - } - got, diags := hcldec.Decode(gotBody, schema.DecoderSpec(), nil) - if diags.HasErrors() { - t.Fatalf("body decode failed: %s", diags.Error()) - } - - // We expect the provider config with the added input value - want := cty.ObjectVal(map[string]cty.Value{ - "set_in_config": cty.StringVal("config"), - "set_by_input": cty.StringVal("input"), - }) - if !got.RawEquals(want) { - t.Fatalf("incorrect merged config\ngot: %#v\nwant: %#v", got, want) - } -} diff --git a/internal/terraform/eval_variable.go b/internal/terraform/eval_variable.go deleted file mode 100644 index 8878886383e4..000000000000 --- a/internal/terraform/eval_variable.go +++ /dev/null @@ -1,394 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func prepareFinalInputVariableValue(addr addrs.AbsInputVariableInstance, raw *InputValue, cfg *configs.Variable) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - convertTy := cfg.ConstraintType - log.Printf("[TRACE] prepareFinalInputVariableValue: preparing %s", addr) - - var defaultVal cty.Value - if cfg.Default != cty.NilVal { - log.Printf("[TRACE] prepareFinalInputVariableValue: %s has a default value", addr) - var err error - defaultVal, err = convert.Convert(cfg.Default, convertTy) - if err != nil { - // Validation of the declaration should typically catch this, - // but we'll check it here too to be robust. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for module argument", - Detail: fmt.Sprintf( - "The default value for variable %q is incompatible with its type constraint: %s.", - cfg.Name, err, - ), - Subject: &cfg.DeclRange, - }) - // We'll return a placeholder unknown value to avoid producing - // redundant downstream errors. - return cty.UnknownVal(cfg.Type), diags - } - } - - var sourceRange tfdiags.SourceRange - var nonFileSource string - if raw.HasSourceRange() { - sourceRange = raw.SourceRange - } else { - // If the value came from a place that isn't a file and thus doesn't - // have its own source range, we'll use the declaration range as - // our source range and generate some slightly different error - // messages. - sourceRange = tfdiags.SourceRangeFromHCL(cfg.DeclRange) - switch raw.SourceType { - case ValueFromCLIArg: - nonFileSource = fmt.Sprintf("set using -var=\"%s=...\"", addr.Variable.Name) - case ValueFromEnvVar: - nonFileSource = fmt.Sprintf("set using the TF_VAR_%s environment variable", addr.Variable.Name) - case ValueFromInput: - nonFileSource = "set using an interactive prompt" - default: - nonFileSource = "set from outside of the configuration" - } - } - - given := raw.Value - if given == cty.NilVal { // The variable wasn't set at all (even to null) - log.Printf("[TRACE] prepareFinalInputVariableValue: %s has no defined value", addr) - if cfg.Required() { - // NOTE: The CLI layer typically checks for itself whether all of - // the required _root_ module variables are set, which would - // mask this error with a more specific one that refers to the - // CLI features for setting such variables. We can get here for - // child module variables, though. - log.Printf("[ERROR] prepareFinalInputVariableValue: %s is required but is not set", addr) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Required variable not set`, - Detail: fmt.Sprintf(`The variable %q is required, but is not set.`, addr.Variable.Name), - Subject: cfg.DeclRange.Ptr(), - }) - // We'll return a placeholder unknown value to avoid producing - // redundant downstream errors. - return cty.UnknownVal(cfg.Type), diags - } - - given = defaultVal // must be set, because we checked above that the variable isn't required - } - - // Apply defaults from the variable's type constraint to the converted value, - // unless the converted value is null. We do not apply defaults to top-level - // null values, as doing so could prevent assigning null to a nullable - // variable. - if cfg.TypeDefaults != nil && !given.IsNull() { - given = cfg.TypeDefaults.Apply(given) - } - - val, err := convert.Convert(given, convertTy) - if err != nil { - log.Printf("[ERROR] prepareFinalInputVariableValue: %s has unsuitable type\n got: %s\n want: %s", addr, given.Type(), convertTy) - var detail string - var subject *hcl.Range - if nonFileSource != "" { - detail = fmt.Sprintf( - "Unsuitable value for %s %s: %s.", - addr, nonFileSource, err, - ) - subject = cfg.DeclRange.Ptr() - } else { - detail = fmt.Sprintf( - "The given value is not suitable for %s declared at %s: %s.", - addr, cfg.DeclRange.String(), err, - ) - subject = sourceRange.ToHCL().Ptr() - - // In some workflows, the operator running terraform does not have access to the variables - // themselves. They are for example stored in encrypted files that will be used by the CI toolset - // and not by the operator directly. In such a case, the failing secret value should not be - // displayed to the operator - if cfg.Sensitive { - detail = fmt.Sprintf( - "The given value is not suitable for %s, which is sensitive: %s. Invalid value defined at %s.", - addr, err, sourceRange.ToHCL(), - ) - subject = cfg.DeclRange.Ptr() - } - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for input variable", - Detail: detail, - Subject: subject, - }) - // We'll return a placeholder unknown value to avoid producing - // redundant downstream errors. - return cty.UnknownVal(cfg.Type), diags - } - - // By the time we get here, we know: - // - val matches the variable's type constraint - // - val is definitely not cty.NilVal, but might be a null value if the given was already null. - // - // That means we just need to handle the case where the value is null, - // which might mean we need to use the default value, or produce an error. - // - // For historical reasons we do this only for a "non-nullable" variable. - // Nullable variables just appear as null if they were set to null, - // regardless of any default value. - if val.IsNull() && !cfg.Nullable { - log.Printf("[TRACE] prepareFinalInputVariableValue: %s is defined as null", addr) - if defaultVal != cty.NilVal { - val = defaultVal - } else { - log.Printf("[ERROR] prepareFinalInputVariableValue: %s is non-nullable but set to null, and is required", addr) - if nonFileSource != "" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Required variable not set`, - Detail: fmt.Sprintf( - "Unsuitable value for %s %s: required variable may not be set to null.", - addr, nonFileSource, - ), - Subject: cfg.DeclRange.Ptr(), - }) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Required variable not set`, - Detail: fmt.Sprintf( - "The given value is not suitable for %s defined at %s: required variable may not be set to null.", - addr, cfg.DeclRange.String(), - ), - Subject: sourceRange.ToHCL().Ptr(), - }) - } - // Stub out our return value so that the semantic checker doesn't - // produce redundant downstream errors. - val = cty.UnknownVal(cfg.Type) - } - } - - return val, diags -} - -// evalVariableValidations ensures that all of the configured custom validations -// for a variable are passing. -// -// This must be used only after any side-effects that make the value of the -// variable available for use in expression evaluation, such as -// EvalModuleCallArgument for variables in descendent modules. -func evalVariableValidations(addr addrs.AbsInputVariableInstance, config *configs.Variable, expr hcl.Expression, ctx EvalContext) (diags tfdiags.Diagnostics) { - if config == nil || len(config.Validations) == 0 { - log.Printf("[TRACE] evalVariableValidations: no validation rules declared for %s, so skipping", addr) - return nil - } - log.Printf("[TRACE] evalVariableValidations: validating %s", addr) - - // Variable nodes evaluate in the parent module to where they were declared - // because the value expression (n.Expr, if set) comes from the calling - // "module" block in the parent module. - // - // Validation expressions are statically validated (during configuration - // loading) to refer only to the variable being validated, so we can - // bypass our usual evaluation machinery here and just produce a minimal - // evaluation context containing just the required value, and thus avoid - // the problem that ctx's evaluation functions refer to the wrong module. - val := ctx.GetVariableValue(addr) - if val == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "No final value for variable", - Detail: fmt.Sprintf("Terraform doesn't have a final value for %s during validation. This is a bug in Terraform; please report it!", addr), - }) - return diags - } - hclCtx := &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - config.Name: val, - }), - }, - Functions: ctx.EvaluationScope(nil, EvalDataForNoInstanceKey).Functions(), - } - - for _, validation := range config.Validations { - const errInvalidCondition = "Invalid variable validation result" - const errInvalidValue = "Invalid value for variable" - var ruleDiags tfdiags.Diagnostics - - result, moreDiags := validation.Condition.Value(hclCtx) - ruleDiags = ruleDiags.Append(moreDiags) - errorValue, errorDiags := validation.ErrorMessage.Value(hclCtx) - - // The following error handling is a workaround to preserve backwards - // compatibility. Due to an implementation quirk, all prior versions of - // Terraform would treat error messages specified using JSON - // configuration syntax (.tf.json) as string literals, even if they - // contained the "${" template expression operator. This behaviour did - // not match that of HCL configuration syntax, where a template - // expression would result in a validation error. - // - // As a result, users writing or generating JSON configuration syntax - // may have specified error messages which are invalid template - // expressions. As we add support for error message expressions, we are - // unable to perfectly distinguish between these two cases. - // - // To ensure that we don't break backwards compatibility, we have the - // below fallback logic if the error message fails to evaluate. This - // should only have any effect for JSON configurations. The gohcl - // DecodeExpression function behaves differently when the source of the - // expression is a JSON configuration file and a nil context is passed. - if errorDiags.HasErrors() { - // Attempt to decode the expression as a string literal. Passing - // nil as the context forces a JSON syntax string value to be - // interpreted as a string literal. - var errorString string - moreErrorDiags := gohcl.DecodeExpression(validation.ErrorMessage, nil, &errorString) - if !moreErrorDiags.HasErrors() { - // Decoding succeeded, meaning that this is a JSON syntax - // string value. We rewrap that as a cty value to allow later - // decoding to succeed. - errorValue = cty.StringVal(errorString) - - // This warning diagnostic explains this odd behaviour, while - // giving us an escape hatch to change this to a hard failure - // in some future Terraform 1.x version. - errorDiags = hcl.Diagnostics{ - &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Validation error message expression is invalid", - Detail: fmt.Sprintf("The error message provided could not be evaluated as an expression, so Terraform is interpreting it as a string literal.\n\nIn future versions of Terraform, this will be considered an error. Please file a GitHub issue if this would break your workflow.\n\n%s", errorDiags.Error()), - Subject: validation.ErrorMessage.Range().Ptr(), - Context: validation.DeclRange.Ptr(), - Expression: validation.ErrorMessage, - EvalContext: hclCtx, - }, - } - } - - // We want to either report the original diagnostics if the - // fallback failed, or the warning generated above if it succeeded. - ruleDiags = ruleDiags.Append(errorDiags) - } - - diags = diags.Append(ruleDiags) - - if ruleDiags.HasErrors() { - log.Printf("[TRACE] evalVariableValidations: %s rule %s check rule evaluation failed: %s", addr, validation.DeclRange, ruleDiags.Err().Error()) - } - if !result.IsKnown() { - log.Printf("[TRACE] evalVariableValidations: %s rule %s condition value is unknown, so skipping validation for now", addr, validation.DeclRange) - continue // We'll wait until we've learned more, then. - } - if result.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidCondition, - Detail: "Validation condition expression must return either true or false, not null.", - Subject: validation.Condition.Range().Ptr(), - Expression: validation.Condition, - EvalContext: hclCtx, - }) - continue - } - var err error - result, err = convert.Convert(result, cty.Bool) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidCondition, - Detail: fmt.Sprintf("Invalid validation condition result value: %s.", tfdiags.FormatError(err)), - Subject: validation.Condition.Range().Ptr(), - Expression: validation.Condition, - EvalContext: hclCtx, - }) - continue - } - - // Validation condition may be marked if the input variable is bound to - // a sensitive value. This is irrelevant to the validation process, so - // we discard the marks now. - result, _ = result.Unmark() - - if result.True() { - continue - } - - var errorMessage string - if !errorDiags.HasErrors() && errorValue.IsKnown() && !errorValue.IsNull() { - var err error - errorValue, err = convert.Convert(errorValue, cty.String) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid error message", - Detail: fmt.Sprintf("Unsuitable value for error message: %s.", tfdiags.FormatError(err)), - Subject: validation.ErrorMessage.Range().Ptr(), - Expression: validation.ErrorMessage, - EvalContext: hclCtx, - }) - } else { - if marks.Has(errorValue, marks.Sensitive) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - - Summary: "Error message refers to sensitive values", - Detail: `The error expression used to explain this condition refers to sensitive values. Terraform will not display the resulting message. - -You can correct this by removing references to sensitive values, or by carefully using the nonsensitive() function if the expression will not reveal the sensitive data.`, - - Subject: validation.ErrorMessage.Range().Ptr(), - Expression: validation.ErrorMessage, - EvalContext: hclCtx, - }) - errorMessage = "The error message included a sensitive value, so it will not be displayed." - } else { - errorMessage = strings.TrimSpace(errorValue.AsString()) - } - } - } - if errorMessage == "" { - errorMessage = "Failed to evaluate condition error message." - } - - if expr != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidValue, - Detail: fmt.Sprintf("%s\n\nThis was checked by the validation rule at %s.", errorMessage, validation.DeclRange.String()), - Subject: expr.Range().Ptr(), - Expression: validation.Condition, - EvalContext: hclCtx, - }) - } else { - // Since we don't have a source expression for a root module - // variable, we'll just report the error from the perspective - // of the variable declaration itself. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidValue, - Detail: fmt.Sprintf("%s\n\nThis was checked by the validation rule at %s.", errorMessage, validation.DeclRange.String()), - Subject: config.DeclRange.Ptr(), - Expression: validation.Condition, - EvalContext: hclCtx, - }) - } - } - - return diags -} diff --git a/internal/terraform/eval_variable_test.go b/internal/terraform/eval_variable_test.go deleted file mode 100644 index 3821d8f8ca00..000000000000 --- a/internal/terraform/eval_variable_test.go +++ /dev/null @@ -1,1345 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestPrepareFinalInputVariableValue(t *testing.T) { - // This is just a concise way to define a bunch of *configs.Variable - // objects to use in our tests below. We're only going to decode this - // config, not fully evaluate it. - cfgSrc := ` - variable "nullable_required" { - } - variable "nullable_optional_default_string" { - default = "hello" - } - variable "nullable_optional_default_null" { - default = null - } - variable "constrained_string_nullable_required" { - type = string - } - variable "constrained_string_nullable_optional_default_string" { - type = string - default = "hello" - } - variable "constrained_string_nullable_optional_default_bool" { - type = string - default = true - } - variable "constrained_string_nullable_optional_default_null" { - type = string - default = null - } - variable "required" { - nullable = false - } - variable "optional_default_string" { - nullable = false - default = "hello" - } - variable "constrained_string_required" { - nullable = false - type = string - } - variable "constrained_string_optional_default_string" { - nullable = false - type = string - default = "hello" - } - variable "constrained_string_optional_default_bool" { - nullable = false - type = string - default = true - } - variable "constrained_string_sensitive_required" { - sensitive = true - nullable = false - type = string - } - variable "complex_type_with_nested_default_optional" { - type = set(object({ - name = string - schedules = set(object({ - name = string - cold_storage_after = optional(number, 10) - })) - })) - } - variable "complex_type_with_nested_complex_types" { - type = object({ - name = string - nested_object = object({ - name = string - value = optional(string, "foo") - }) - nested_object_with_default = optional(object({ - name = string - value = optional(string, "bar") - }), { - name = "nested_object_with_default" - }) - }) - } - // https://github.com/hashicorp/terraform/issues/32152 - // This variable was originally added to test that optional attribute - // metadata is stripped from empty default collections. Essentially, you - // should be able to mix and match custom and default values for the - // optional_list attribute. - variable "complex_type_with_empty_default_and_nested_optional" { - type = list(object({ - name = string - optional_list = optional(list(object({ - string = string - optional_string = optional(string) - })), []) - })) - } - // https://github.com/hashicorp/terraform/issues/32160#issuecomment-1302783910 - // These variables were added to test the specific use case from this - // GitHub comment. - variable "empty_object_with_optional_nested_object_with_optional_bool" { - type = object({ - thing = optional(object({ - flag = optional(bool, false) - })) - }) - default = {} - } - variable "populated_object_with_optional_nested_object_with_optional_bool" { - type = object({ - thing = optional(object({ - flag = optional(bool, false) - })) - }) - default = { - thing = {} - } - } - variable "empty_object_with_default_nested_object_with_optional_bool" { - type = object({ - thing = optional(object({ - flag = optional(bool, false) - }), {}) - }) - default = {} - } - // https://github.com/hashicorp/terraform/issues/32160 - // This variable was originally added to test that optional objects do - // get created containing only their defaults. Instead they should be - // left empty. We do not expect nested_object to be created just because - // optional_string has a default value. - variable "object_with_nested_object_with_required_and_optional_attributes" { - type = object({ - nested_object = optional(object({ - string = string - optional_string = optional(string, "optional") - })) - }) - } - // https://github.com/hashicorp/terraform/issues/32157 - // Similar to above, we want to see that merging combinations of the - // nested_object into a single collection doesn't crash because of - // inconsistent elements. - variable "list_with_nested_object_with_required_and_optional_attributes" { - type = list(object({ - nested_object = optional(object({ - string = string - optional_string = optional(string, "optional") - })) - })) - } - // https://github.com/hashicorp/terraform/issues/32109 - // This variable was originally introduced to test the behaviour of - // the dynamic type constraint. You should be able to use the 'any' - // constraint and introduce empty, null, and populated values into the - // list. - variable "list_with_nested_list_of_any" { - type = list(object({ - a = string - b = optional(list(any)) - })) - default = [ - { - a = "a" - }, - { - a = "b" - b = [1] - } - ] - } - // https://github.com/hashicorp/terraform/issues/32396 - // This variable was originally introduced to test the behaviour of the - // dynamic type constraint. You should be able to set primitive types in - // the list consistently. - variable "list_with_nested_collections_dynamic_with_default" { - type = list( - object({ - name = optional(string, "default") - taints = optional(list(map(any)), []) - }) - ) - } - // https://github.com/hashicorp/terraform/issues/32752 - // This variable was introduced to make sure the evaluation doesn't - // crash even when the types are wrong. - variable "invalid_nested_type" { - type = map( - object({ - rules = map( - object({ - destination_addresses = optional(list(string), []) - }) - ) - }) - ) - default = {} - } - ` - cfg := testModuleInline(t, map[string]string{ - "main.tf": cfgSrc, - }) - variableConfigs := cfg.Module.Variables - - // Because we loaded our pseudo-module from a temporary file, the - // declaration source ranges will have unpredictable filenames. We'll - // fix that here just to make things easier below. - for _, vc := range variableConfigs { - vc.DeclRange.Filename = "main.tf" - } - - tests := []struct { - varName string - given cty.Value - want cty.Value - wantErr string - }{ - // nullable_required - { - "nullable_required", - cty.NilVal, - cty.UnknownVal(cty.DynamicPseudoType), - `Required variable not set: The variable "nullable_required" is required, but is not set.`, - }, - { - "nullable_required", - cty.NullVal(cty.DynamicPseudoType), - cty.NullVal(cty.DynamicPseudoType), - ``, // "required" for a nullable variable means only that it must be set, even if it's set to null - }, - { - "nullable_required", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "nullable_required", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // nullable_optional_default_string - { - "nullable_optional_default_string", - cty.NilVal, - cty.StringVal("hello"), // the declared default value - ``, - }, - { - "nullable_optional_default_string", - cty.NullVal(cty.DynamicPseudoType), - cty.NullVal(cty.DynamicPseudoType), // nullable variables can be really set to null, masking the default - ``, - }, - { - "nullable_optional_default_string", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "nullable_optional_default_string", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // nullable_optional_default_null - { - "nullable_optional_default_null", - cty.NilVal, - cty.NullVal(cty.DynamicPseudoType), // the declared default value - ``, - }, - { - "nullable_optional_default_null", - cty.NullVal(cty.String), - cty.NullVal(cty.String), // nullable variables can be really set to null, masking the default - ``, - }, - { - "nullable_optional_default_null", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "nullable_optional_default_null", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // constrained_string_nullable_required - { - "constrained_string_nullable_required", - cty.NilVal, - cty.UnknownVal(cty.String), - `Required variable not set: The variable "constrained_string_nullable_required" is required, but is not set.`, - }, - { - "constrained_string_nullable_required", - cty.NullVal(cty.DynamicPseudoType), - cty.NullVal(cty.String), // the null value still gets converted to match the type constraint - ``, // "required" for a nullable variable means only that it must be set, even if it's set to null - }, - { - "constrained_string_nullable_required", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "constrained_string_nullable_required", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // constrained_string_nullable_optional_default_string - { - "constrained_string_nullable_optional_default_string", - cty.NilVal, - cty.StringVal("hello"), // the declared default value - ``, - }, - { - "constrained_string_nullable_optional_default_string", - cty.NullVal(cty.DynamicPseudoType), - cty.NullVal(cty.String), // nullable variables can be really set to null, masking the default - ``, - }, - { - "constrained_string_nullable_optional_default_string", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "constrained_string_nullable_optional_default_string", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // constrained_string_nullable_optional_default_bool - { - "constrained_string_nullable_optional_default_bool", - cty.NilVal, - cty.StringVal("true"), // the declared default value, automatically converted to match type constraint - ``, - }, - { - "constrained_string_nullable_optional_default_bool", - cty.NullVal(cty.DynamicPseudoType), - cty.NullVal(cty.String), // nullable variables can be really set to null, masking the default - ``, - }, - { - "constrained_string_nullable_optional_default_bool", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "constrained_string_nullable_optional_default_bool", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // constrained_string_nullable_optional_default_null - { - "constrained_string_nullable_optional_default_null", - cty.NilVal, - cty.NullVal(cty.String), - ``, - }, - { - "constrained_string_nullable_optional_default_null", - cty.NullVal(cty.DynamicPseudoType), - cty.NullVal(cty.String), - ``, - }, - { - "constrained_string_nullable_optional_default_null", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "constrained_string_nullable_optional_default_null", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // required - { - "required", - cty.NilVal, - cty.UnknownVal(cty.DynamicPseudoType), - `Required variable not set: The variable "required" is required, but is not set.`, - }, - { - "required", - cty.NullVal(cty.DynamicPseudoType), - cty.UnknownVal(cty.DynamicPseudoType), - `Required variable not set: Unsuitable value for var.required set from outside of the configuration: required variable may not be set to null.`, - }, - { - "required", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "required", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // optional_default_string - { - "optional_default_string", - cty.NilVal, - cty.StringVal("hello"), // the declared default value - ``, - }, - { - "optional_default_string", - cty.NullVal(cty.DynamicPseudoType), - cty.StringVal("hello"), // the declared default value - ``, - }, - { - "optional_default_string", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "optional_default_string", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // constrained_string_required - { - "constrained_string_required", - cty.NilVal, - cty.UnknownVal(cty.String), - `Required variable not set: The variable "constrained_string_required" is required, but is not set.`, - }, - { - "constrained_string_required", - cty.NullVal(cty.DynamicPseudoType), - cty.UnknownVal(cty.String), - `Required variable not set: Unsuitable value for var.constrained_string_required set from outside of the configuration: required variable may not be set to null.`, - }, - { - "constrained_string_required", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "constrained_string_required", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // constrained_string_optional_default_string - { - "constrained_string_optional_default_string", - cty.NilVal, - cty.StringVal("hello"), // the declared default value - ``, - }, - { - "constrained_string_optional_default_string", - cty.NullVal(cty.DynamicPseudoType), - cty.StringVal("hello"), // the declared default value - ``, - }, - { - "constrained_string_optional_default_string", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "constrained_string_optional_default_string", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - - // constrained_string_optional_default_bool - { - "constrained_string_optional_default_bool", - cty.NilVal, - cty.StringVal("true"), // the declared default value, automatically converted to match type constraint - ``, - }, - { - "constrained_string_optional_default_bool", - cty.NullVal(cty.DynamicPseudoType), - cty.StringVal("true"), // the declared default value, automatically converted to match type constraint - ``, - }, - { - "constrained_string_optional_default_bool", - cty.StringVal("ahoy"), - cty.StringVal("ahoy"), - ``, - }, - { - "constrained_string_optional_default_bool", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - { - "list_with_nested_collections_dynamic_with_default", - cty.TupleVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("default"), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("complex"), - "taints": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("my_key"), - "value": cty.StringVal("my_value"), - }), - }), - }), - }), - cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("default"), - "taints": cty.ListValEmpty(cty.Map(cty.String)), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("complex"), - "taints": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("my_key"), - "value": cty.StringVal("my_value"), - }), - }), - }), - }), - ``, - }, - - // complex types - - { - "complex_type_with_nested_default_optional", - cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("test1"), - "schedules": cty.SetVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "name": cty.StringVal("daily"), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("test2"), - "schedules": cty.SetVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "name": cty.StringVal("daily"), - }), - cty.MapVal(map[string]cty.Value{ - "name": cty.StringVal("weekly"), - "cold_storage_after": cty.StringVal("0"), - }), - }), - }), - }), - cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("test1"), - "schedules": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("daily"), - "cold_storage_after": cty.NumberIntVal(10), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("test2"), - "schedules": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("daily"), - "cold_storage_after": cty.NumberIntVal(10), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("weekly"), - "cold_storage_after": cty.NumberIntVal(0), - }), - }), - }), - }), - ``, - }, - { - "complex_type_with_nested_complex_types", - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("object"), - "nested_object": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("nested_object"), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("object"), - "nested_object": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("nested_object"), - "value": cty.StringVal("foo"), - }), - "nested_object_with_default": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("nested_object_with_default"), - "value": cty.StringVal("bar"), - }), - }), - ``, - }, - { - "complex_type_with_empty_default_and_nested_optional", - cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("abc"), - "optional_list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal("child"), - "optional_string": cty.NullVal(cty.String), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("def"), - "optional_list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ - "string": cty.String, - "optional_string": cty.String, - }))), - }), - }), - cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("abc"), - "optional_list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal("child"), - "optional_string": cty.NullVal(cty.String), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("def"), - "optional_list": cty.ListValEmpty(cty.Object(map[string]cty.Type{ - "string": cty.String, - "optional_string": cty.String, - })), - }), - }), - ``, - }, - { - "object_with_nested_object_with_required_and_optional_attributes", - cty.EmptyObjectVal, - cty.ObjectVal(map[string]cty.Value{ - "nested_object": cty.NullVal(cty.Object(map[string]cty.Type{ - "string": cty.String, - "optional_string": cty.String, - })), - }), - ``, - }, - { - "empty_object_with_optional_nested_object_with_optional_bool", - cty.NilVal, - cty.ObjectVal(map[string]cty.Value{ - "thing": cty.NullVal(cty.Object(map[string]cty.Type{ - "flag": cty.Bool, - })), - }), - ``, - }, - { - "populated_object_with_optional_nested_object_with_optional_bool", - cty.NilVal, - cty.ObjectVal(map[string]cty.Value{ - "thing": cty.ObjectVal(map[string]cty.Value{ - "flag": cty.False, - }), - }), - ``, - }, - { - "empty_object_with_default_nested_object_with_optional_bool", - cty.NilVal, - cty.ObjectVal(map[string]cty.Value{ - "thing": cty.ObjectVal(map[string]cty.Value{ - "flag": cty.False, - }), - }), - ``, - }, - { - "list_with_nested_object_with_required_and_optional_attributes", - cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "nested_object": cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal("string"), - "optional_string": cty.NullVal(cty.String), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "nested_object": cty.NullVal(cty.Object(map[string]cty.Type{ - "string": cty.String, - "optional_string": cty.String, - })), - }), - }), - cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "nested_object": cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal("string"), - "optional_string": cty.StringVal("optional"), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "nested_object": cty.NullVal(cty.Object(map[string]cty.Type{ - "string": cty.String, - "optional_string": cty.String, - })), - }), - }), - ``, - }, - { - "list_with_nested_list_of_any", - cty.NilVal, - cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - "b": cty.NullVal(cty.List(cty.Number)), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("b"), - "b": cty.ListVal([]cty.Value{ - cty.NumberIntVal(1), - }), - }), - }), - ``, - }, - { - "list_with_nested_collections_dynamic_with_default", - cty.TupleVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("default"), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("complex"), - "taints": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("my_key"), - "value": cty.StringVal("my_value"), - }), - }), - }), - }), - cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("default"), - "taints": cty.ListValEmpty(cty.Map(cty.String)), - }), - cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("complex"), - "taints": cty.ListVal([]cty.Value{ - cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("my_key"), - "value": cty.StringVal("my_value"), - }), - }), - }), - }), - ``, - }, - { - "invalid_nested_type", - cty.MapVal(map[string]cty.Value{ - "mysql": cty.ObjectVal(map[string]cty.Value{ - "rules": cty.ObjectVal(map[string]cty.Value{ - "destination_addresses": cty.ListVal([]cty.Value{cty.StringVal("192.168.0.1")}), - }), - }), - }), - cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ - "rules": cty.Map(cty.Object(map[string]cty.Type{ - "destination_addresses": cty.List(cty.String), - })), - }))), - `Invalid value for input variable: Unsuitable value for var.invalid_nested_type set from outside of the configuration: incorrect map element type: attribute "rules": element "destination_addresses": object required.`, - }, - - // sensitive - { - "constrained_string_sensitive_required", - cty.UnknownVal(cty.String), - cty.UnknownVal(cty.String), - ``, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%s %#v", test.varName, test.given), func(t *testing.T) { - varAddr := addrs.InputVariable{Name: test.varName}.Absolute(addrs.RootModuleInstance) - varCfg := variableConfigs[test.varName] - if varCfg == nil { - t.Fatalf("invalid variable name %q", test.varName) - } - - t.Logf( - "test case\nvariable: %s\nconstraint: %#v\ndefault: %#v\nnullable: %#v\ngiven value: %#v", - varAddr, - varCfg.Type, - varCfg.Default, - varCfg.Nullable, - test.given, - ) - - rawVal := &InputValue{ - Value: test.given, - SourceType: ValueFromCaller, - } - - got, diags := prepareFinalInputVariableValue( - varAddr, rawVal, varCfg, - ) - - if test.wantErr != "" { - if !diags.HasErrors() { - t.Errorf("unexpected success\nwant error: %s", test.wantErr) - } else if got, want := diags.Err().Error(), test.wantErr; got != want { - t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) - } - } else { - if diags.HasErrors() { - t.Errorf("unexpected error\ngot: %s", diags.Err().Error()) - } - } - - // NOTE: should still have returned some reasonable value even if there was an error - if !test.want.RawEquals(got) { - t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, test.want) - } - }) - } - - t.Run("SourceType error message variants", func(t *testing.T) { - tests := []struct { - SourceType ValueSourceType - SourceRange tfdiags.SourceRange - WantTypeErr string - WantNullErr string - }{ - { - ValueFromUnknown, - tfdiags.SourceRange{}, - `Invalid value for input variable: Unsuitable value for var.constrained_string_required set from outside of the configuration: string required.`, - `Required variable not set: Unsuitable value for var.constrained_string_required set from outside of the configuration: required variable may not be set to null.`, - }, - { - ValueFromConfig, - tfdiags.SourceRange{ - Filename: "example.tf", - Start: tfdiags.SourcePos(hcl.InitialPos), - End: tfdiags.SourcePos(hcl.InitialPos), - }, - `Invalid value for input variable: The given value is not suitable for var.constrained_string_required declared at main.tf:32,3-41: string required.`, - `Required variable not set: The given value is not suitable for var.constrained_string_required defined at main.tf:32,3-41: required variable may not be set to null.`, - }, - { - ValueFromAutoFile, - tfdiags.SourceRange{ - Filename: "example.auto.tfvars", - Start: tfdiags.SourcePos(hcl.InitialPos), - End: tfdiags.SourcePos(hcl.InitialPos), - }, - `Invalid value for input variable: The given value is not suitable for var.constrained_string_required declared at main.tf:32,3-41: string required.`, - `Required variable not set: The given value is not suitable for var.constrained_string_required defined at main.tf:32,3-41: required variable may not be set to null.`, - }, - { - ValueFromNamedFile, - tfdiags.SourceRange{ - Filename: "example.tfvars", - Start: tfdiags.SourcePos(hcl.InitialPos), - End: tfdiags.SourcePos(hcl.InitialPos), - }, - `Invalid value for input variable: The given value is not suitable for var.constrained_string_required declared at main.tf:32,3-41: string required.`, - `Required variable not set: The given value is not suitable for var.constrained_string_required defined at main.tf:32,3-41: required variable may not be set to null.`, - }, - { - ValueFromCLIArg, - tfdiags.SourceRange{}, - `Invalid value for input variable: Unsuitable value for var.constrained_string_required set using -var="constrained_string_required=...": string required.`, - `Required variable not set: Unsuitable value for var.constrained_string_required set using -var="constrained_string_required=...": required variable may not be set to null.`, - }, - { - ValueFromEnvVar, - tfdiags.SourceRange{}, - `Invalid value for input variable: Unsuitable value for var.constrained_string_required set using the TF_VAR_constrained_string_required environment variable: string required.`, - `Required variable not set: Unsuitable value for var.constrained_string_required set using the TF_VAR_constrained_string_required environment variable: required variable may not be set to null.`, - }, - { - ValueFromInput, - tfdiags.SourceRange{}, - `Invalid value for input variable: Unsuitable value for var.constrained_string_required set using an interactive prompt: string required.`, - `Required variable not set: Unsuitable value for var.constrained_string_required set using an interactive prompt: required variable may not be set to null.`, - }, - { - // NOTE: This isn't actually a realistic case for this particular - // function, because if we have a value coming from a plan then - // we must be in the apply step, and we shouldn't be able to - // get past the plan step if we have invalid variable values, - // and during planning we'll always have other source types. - ValueFromPlan, - tfdiags.SourceRange{}, - `Invalid value for input variable: Unsuitable value for var.constrained_string_required set from outside of the configuration: string required.`, - `Required variable not set: Unsuitable value for var.constrained_string_required set from outside of the configuration: required variable may not be set to null.`, - }, - { - ValueFromCaller, - tfdiags.SourceRange{}, - `Invalid value for input variable: Unsuitable value for var.constrained_string_required set from outside of the configuration: string required.`, - `Required variable not set: Unsuitable value for var.constrained_string_required set from outside of the configuration: required variable may not be set to null.`, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%s %s", test.SourceType, test.SourceRange.StartString()), func(t *testing.T) { - varAddr := addrs.InputVariable{Name: "constrained_string_required"}.Absolute(addrs.RootModuleInstance) - varCfg := variableConfigs[varAddr.Variable.Name] - t.Run("type error", func(t *testing.T) { - rawVal := &InputValue{ - Value: cty.EmptyObjectVal, - SourceType: test.SourceType, - SourceRange: test.SourceRange, - } - - _, diags := prepareFinalInputVariableValue( - varAddr, rawVal, varCfg, - ) - if !diags.HasErrors() { - t.Fatalf("unexpected success; want error") - } - - if got, want := diags.Err().Error(), test.WantTypeErr; got != want { - t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) - } - }) - t.Run("null error", func(t *testing.T) { - rawVal := &InputValue{ - Value: cty.NullVal(cty.DynamicPseudoType), - SourceType: test.SourceType, - SourceRange: test.SourceRange, - } - - _, diags := prepareFinalInputVariableValue( - varAddr, rawVal, varCfg, - ) - if !diags.HasErrors() { - t.Fatalf("unexpected success; want error") - } - - if got, want := diags.Err().Error(), test.WantNullErr; got != want { - t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) - } - }) - }) - } - }) - - t.Run("SensitiveVariable error message variants, with source variants", func(t *testing.T) { - tests := []struct { - SourceType ValueSourceType - SourceRange tfdiags.SourceRange - WantTypeErr string - HideSubject bool - }{ - { - ValueFromUnknown, - tfdiags.SourceRange{}, - "Invalid value for input variable: Unsuitable value for var.constrained_string_sensitive_required set from outside of the configuration: string required.", - false, - }, - { - ValueFromConfig, - tfdiags.SourceRange{ - Filename: "example.tfvars", - Start: tfdiags.SourcePos(hcl.InitialPos), - End: tfdiags.SourcePos(hcl.InitialPos), - }, - `Invalid value for input variable: The given value is not suitable for var.constrained_string_sensitive_required, which is sensitive: string required. Invalid value defined at example.tfvars:1,1-1.`, - true, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%s %s", test.SourceType, test.SourceRange.StartString()), func(t *testing.T) { - varAddr := addrs.InputVariable{Name: "constrained_string_sensitive_required"}.Absolute(addrs.RootModuleInstance) - varCfg := variableConfigs[varAddr.Variable.Name] - t.Run("type error", func(t *testing.T) { - rawVal := &InputValue{ - Value: cty.EmptyObjectVal, - SourceType: test.SourceType, - SourceRange: test.SourceRange, - } - - _, diags := prepareFinalInputVariableValue( - varAddr, rawVal, varCfg, - ) - if !diags.HasErrors() { - t.Fatalf("unexpected success; want error") - } - - if got, want := diags.Err().Error(), test.WantTypeErr; got != want { - t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) - } - - if test.HideSubject { - if got, want := diags[0].Source().Subject.StartString(), test.SourceRange.StartString(); got == want { - t.Errorf("Subject start should have been hidden, but was %s", got) - } - } - }) - }) - } - }) -} - -// These tests cover the JSON syntax configuration edge case handling, -// the background of which is described in detail in comments in the -// evalVariableValidations function. Future versions of Terraform may -// be able to remove this behaviour altogether. -func TestEvalVariableValidations_jsonErrorMessageEdgeCase(t *testing.T) { - cfgSrc := `{ - "variable": { - "valid": { - "type": "string", - "validation": { - "condition": "${var.valid != \"bar\"}", - "error_message": "Valid template string ${var.valid}" - } - }, - "invalid": { - "type": "string", - "validation": { - "condition": "${var.invalid != \"bar\"}", - "error_message": "Invalid template string ${" - } - } - } -} -` - cfg := testModuleInline(t, map[string]string{ - "main.tf.json": cfgSrc, - }) - variableConfigs := cfg.Module.Variables - - // Because we loaded our pseudo-module from a temporary file, the - // declaration source ranges will have unpredictable filenames. We'll - // fix that here just to make things easier below. - for _, vc := range variableConfigs { - vc.DeclRange.Filename = "main.tf.json" - for _, v := range vc.Validations { - v.DeclRange.Filename = "main.tf.json" - } - } - - tests := []struct { - varName string - given cty.Value - wantErr []string - wantWarn []string - }{ - // Valid variable validation declaration, assigned value which passes - // the condition generates no diagnostics. - { - varName: "valid", - given: cty.StringVal("foo"), - }, - // Assigning a value which fails the condition generates an error - // message with the expression successfully evaluated. - { - varName: "valid", - given: cty.StringVal("bar"), - wantErr: []string{ - "Invalid value for variable", - "Valid template string bar", - }, - }, - // Invalid variable validation declaration due to an unparseable - // template string. Assigning a value which passes the condition - // results in a warning about the error message. - { - varName: "invalid", - given: cty.StringVal("foo"), - wantWarn: []string{ - "Validation error message expression is invalid", - "Missing expression; Expected the start of an expression, but found the end of the file.", - }, - }, - // Assigning a value which fails the condition generates an error - // message including the configured string interpreted as a literal - // value, and the same warning diagnostic as above. - { - varName: "invalid", - given: cty.StringVal("bar"), - wantErr: []string{ - "Invalid value for variable", - "Invalid template string ${", - }, - wantWarn: []string{ - "Validation error message expression is invalid", - "Missing expression; Expected the start of an expression, but found the end of the file.", - }, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%s %#v", test.varName, test.given), func(t *testing.T) { - varAddr := addrs.InputVariable{Name: test.varName}.Absolute(addrs.RootModuleInstance) - varCfg := variableConfigs[test.varName] - if varCfg == nil { - t.Fatalf("invalid variable name %q", test.varName) - } - - // Build a mock context to allow the function under test to - // retrieve the variable value and evaluate the expressions - ctx := &MockEvalContext{} - - // We need a minimal scope to allow basic functions to be passed to - // the HCL scope - ctx.EvaluationScopeScope = &lang.Scope{} - ctx.GetVariableValueFunc = func(addr addrs.AbsInputVariableInstance) cty.Value { - if got, want := addr.String(), varAddr.String(); got != want { - t.Errorf("incorrect argument to GetVariableValue: got %s, want %s", got, want) - } - return test.given - } - - gotDiags := evalVariableValidations( - varAddr, varCfg, nil, ctx, - ) - - if len(test.wantErr) == 0 && len(test.wantWarn) == 0 { - if len(gotDiags) > 0 { - t.Errorf("no diags expected, got %s", gotDiags.Err().Error()) - } - } else { - wantErrs: - for _, want := range test.wantErr { - for _, diag := range gotDiags { - if diag.Severity() != tfdiags.Error { - continue - } - desc := diag.Description() - if strings.Contains(desc.Summary, want) || strings.Contains(desc.Detail, want) { - continue wantErrs - } - } - t.Errorf("no error diagnostics found containing %q\ngot: %s", want, gotDiags.Err().Error()) - } - - wantWarns: - for _, want := range test.wantWarn { - for _, diag := range gotDiags { - if diag.Severity() != tfdiags.Warning { - continue - } - desc := diag.Description() - if strings.Contains(desc.Summary, want) || strings.Contains(desc.Detail, want) { - continue wantWarns - } - } - t.Errorf("no warning diagnostics found containing %q\ngot: %s", want, gotDiags.Err().Error()) - } - } - }) - } -} - -func TestEvalVariableValidations_sensitiveValues(t *testing.T) { - cfgSrc := ` -variable "foo" { - type = string - sensitive = true - default = "boop" - - validation { - condition = length(var.foo) == 4 - error_message = "Foo must be 4 characters, not ${length(var.foo)}" - } -} - -variable "bar" { - type = string - sensitive = true - default = "boop" - - validation { - condition = length(var.bar) == 4 - error_message = "Bar must be 4 characters, not ${nonsensitive(length(var.bar))}." - } -} -` - cfg := testModuleInline(t, map[string]string{ - "main.tf": cfgSrc, - }) - variableConfigs := cfg.Module.Variables - - // Because we loaded our pseudo-module from a temporary file, the - // declaration source ranges will have unpredictable filenames. We'll - // fix that here just to make things easier below. - for _, vc := range variableConfigs { - vc.DeclRange.Filename = "main.tf" - for _, v := range vc.Validations { - v.DeclRange.Filename = "main.tf" - } - } - - tests := []struct { - varName string - given cty.Value - wantErr []string - }{ - // Validations pass on a sensitive variable with an error message which - // would generate a sensitive value - { - varName: "foo", - given: cty.StringVal("boop"), - }, - // Assigning a value which fails the condition generates a sensitive - // error message, which is elided and generates another error - { - varName: "foo", - given: cty.StringVal("bap"), - wantErr: []string{ - "Invalid value for variable", - "The error message included a sensitive value, so it will not be displayed.", - "Error message refers to sensitive values", - }, - }, - // Validations pass on a sensitive variable with a correctly defined - // error message - { - varName: "bar", - given: cty.StringVal("boop"), - }, - // Assigning a value which fails the condition generates a nonsensitive - // error message, which is displayed - { - varName: "bar", - given: cty.StringVal("bap"), - wantErr: []string{ - "Invalid value for variable", - "Bar must be 4 characters, not 3.", - }, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%s %#v", test.varName, test.given), func(t *testing.T) { - varAddr := addrs.InputVariable{Name: test.varName}.Absolute(addrs.RootModuleInstance) - varCfg := variableConfigs[test.varName] - if varCfg == nil { - t.Fatalf("invalid variable name %q", test.varName) - } - - // Build a mock context to allow the function under test to - // retrieve the variable value and evaluate the expressions - ctx := &MockEvalContext{} - - // We need a minimal scope to allow basic functions to be passed to - // the HCL scope - ctx.EvaluationScopeScope = &lang.Scope{} - ctx.GetVariableValueFunc = func(addr addrs.AbsInputVariableInstance) cty.Value { - if got, want := addr.String(), varAddr.String(); got != want { - t.Errorf("incorrect argument to GetVariableValue: got %s, want %s", got, want) - } - if varCfg.Sensitive { - return test.given.Mark(marks.Sensitive) - } else { - return test.given - } - } - - gotDiags := evalVariableValidations( - varAddr, varCfg, nil, ctx, - ) - - if len(test.wantErr) == 0 { - if len(gotDiags) > 0 { - t.Errorf("no diags expected, got %s", gotDiags.Err().Error()) - } - } else { - wantErrs: - for _, want := range test.wantErr { - for _, diag := range gotDiags { - if diag.Severity() != tfdiags.Error { - continue - } - desc := diag.Description() - if strings.Contains(desc.Summary, want) || strings.Contains(desc.Detail, want) { - continue wantErrs - } - } - t.Errorf("no error diagnostics found containing %q\ngot: %s", want, gotDiags.Err().Error()) - } - } - }) - } -} diff --git a/internal/terraform/evaluate.go b/internal/terraform/evaluate.go deleted file mode 100644 index d680136d8dcd..000000000000 --- a/internal/terraform/evaluate.go +++ /dev/null @@ -1,966 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "os" - "path/filepath" - "sync" - - "github.com/agext/levenshtein" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// Evaluator provides the necessary contextual data for evaluating expressions -// for a particular walk operation. -type Evaluator struct { - // Operation defines what type of operation this evaluator is being used - // for. - Operation walkOperation - - // Meta is contextual metadata about the current operation. - Meta *ContextMeta - - // Config is the root node in the configuration tree. - Config *configs.Config - - // VariableValues is a map from variable names to their associated values, - // within the module indicated by ModulePath. VariableValues is modified - // concurrently, and so it must be accessed only while holding - // VariableValuesLock. - // - // The first map level is string representations of addr.ModuleInstance - // values, while the second level is variable names. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - // Plugins is the library of available plugin components (providers and - // provisioners) that we have available to help us evaluate expressions - // that interact with plugin-provided objects. - // - // From this we only access the schemas of the plugins, and don't otherwise - // interact with plugin instances. - Plugins *contextPlugins - - // State is the current state, embedded in a wrapper that ensures that - // it can be safely accessed and modified concurrently. - State *states.SyncState - - // Changes is the set of proposed changes, embedded in a wrapper that - // ensures they can be safely accessed and modified concurrently. - Changes *plans.ChangesSync -} - -// Scope creates an evaluation scope for the given module path and optional -// resource. -// -// If the "self" argument is nil then the "self" object is not available -// in evaluated expressions. Otherwise, it behaves as an alias for the given -// address. -func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { - return &lang.Scope{ - Data: data, - SelfAddr: self, - PureOnly: e.Operation != walkApply && e.Operation != walkDestroy && e.Operation != walkEval, - BaseDir: ".", // Always current working directory for now. - } -} - -// evaluationStateData is an implementation of lang.Data that resolves -// references primarily (but not exclusively) using information from a State. -type evaluationStateData struct { - Evaluator *Evaluator - - // ModulePath is the path through the dynamic module tree to the module - // that references will be resolved relative to. - ModulePath addrs.ModuleInstance - - // InstanceKeyData describes the values, if any, that are accessible due - // to repetition of a containing object using "count" or "for_each" - // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, - // since the user specifies in that case which variable name to locally - // shadow.) - InstanceKeyData InstanceKeyEvalData - - // Operation records the type of walk the evaluationStateData is being used - // for. - Operation walkOperation -} - -// InstanceKeyEvalData is the old name for instances.RepetitionData, aliased -// here for compatibility. In new code, use instances.RepetitionData instead. -type InstanceKeyEvalData = instances.RepetitionData - -// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for -// evaluating in a context that has the given instance key. -// -// The forEachMap argument can be nil when preparing for evaluation -// in a context where each.value is prohibited, such as a destroy-time -// provisioner. In that case, the returned EachValue will always be -// cty.NilVal. -func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { - var evalData InstanceKeyEvalData - if key == nil { - return evalData - } - - keyValue := key.Value() - switch keyValue.Type() { - case cty.String: - evalData.EachKey = keyValue - evalData.EachValue = forEachMap[keyValue.AsString()] - case cty.Number: - evalData.CountIndex = keyValue - } - return evalData -} - -// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance -// key values at all, suitable for use in contexts where no keyed instance -// is relevant. -var EvalDataForNoInstanceKey = InstanceKeyEvalData{} - -// evaluationStateData must implement lang.Data -var _ lang.Data = (*evaluationStateData)(nil) - -func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "index": - idxVal := d.InstanceKeyData.CountIndex - if idxVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "count" in non-counted context`, - Detail: `The "count" object can only be used in "module", "resource", and "data" blocks, and only when the "count" argument is set.`, - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.Number), diags - } - return idxVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "count" attribute`, - Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var returnVal cty.Value - switch addr.Name { - - case "key": - returnVal = d.InstanceKeyData.EachKey - case "value": - returnVal = d.InstanceKeyData.EachValue - - if returnVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `each.value cannot be used in this context`, - Detail: `A reference to "each.value" has been used in a context in which it is unavailable, such as when the configuration no longer contains the value in its "for_each" expression. Remove this reference to each.value in your configuration to work around this error.`, - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.DynamicPseudoType), diags - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "each" attribute`, - Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - if returnVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "each" in context without for_each`, - Detail: `The "each" object can be used only in "module" or "resource" blocks, and only when the "for_each" argument is set.`, - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.DynamicPseudoType), diags - } - return returnVal, diags -} - -func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Variables[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Variables { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } else { - suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared input variable`, - Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - d.Evaluator.VariableValuesLock.Lock() - defer d.Evaluator.VariableValuesLock.Unlock() - - // During the validate walk, input variables are always unknown so - // that we are validating the configuration for all possible input values - // rather than for a specific set. Checking against a specific set of - // input values then happens during the plan walk. - // - // This is important because otherwise the validation walk will tend to be - // overly strict, requiring expressions throughout the configuration to - // be complicated to accommodate all possible inputs, whereas returning - // unknown here allows for simpler patterns like using input values as - // guards to broadly enable/disable resources, avoid processing things - // that are disabled, etc. Terraform's static validation leans towards - // being liberal in what it accepts because the subsequent plan walk has - // more information available and so can be more conservative. - if d.Operation == walkValidate { - // Ensure variable sensitivity is captured in the validate walk - if config.Sensitive { - return cty.UnknownVal(config.Type).Mark(marks.Sensitive), diags - } - return cty.UnknownVal(config.Type), diags - } - - moduleAddrStr := d.ModulePath.String() - vals := d.Evaluator.VariableValues[moduleAddrStr] - if vals == nil { - return cty.UnknownVal(config.Type), diags - } - - // d.Evaluator.VariableValues should always contain valid "final values" - // for variables, which is to say that they have already had type - // conversions, validations, and default value handling applied to them. - // Those are the responsibility of the graph notes representing the - // variable declarations. Therefore here we just trust that we already - // have a correct value. - - val, isSet := vals[addr.Name] - if !isSet { - // We should not be able to get here without having a valid value - // for every variable, so this always indicates a bug in either - // the graph builder (not including all the needed nodes) or in - // the graph nodes representing variables. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to unresolved input variable`, - Detail: fmt.Sprintf( - `The final value for %s is missing in Terraform's evaluation context. This is a bug in Terraform; please report it!`, - addr.Absolute(d.ModulePath), - ), - Subject: rng.ToHCL().Ptr(), - }) - val = cty.UnknownVal(config.Type) - } - - // Mark if sensitive - if config.Sensitive { - val = val.Mark(marks.Sensitive) - } - - return val, diags -} - -func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Locals[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Locals { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared local value`, - Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) - if val == cty.NilVal { - // Not evaluated yet? - val = cty.DynamicVal - } - - return val, diags -} - -func (d *evaluationStateData) GetModule(addr addrs.ModuleCall, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Output results live in the module that declares them, which is one of - // the child module instances of our current module path. - moduleAddr := d.ModulePath.Module().Child(addr.Name) - - parentCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - callConfig, ok := parentCfg.Module.ModuleCalls[addr.Name] - if !ok { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - // We'll consult the configuration to see what output names we are - // expecting, so we can ensure the resulting object is of the expected - // type even if our data is incomplete for some reason. - moduleConfig := d.Evaluator.Config.Descendent(moduleAddr) - if moduleConfig == nil { - // should never happen, since we have a valid module call above, this - // should be caught during static validation. - panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) - } - outputConfigs := moduleConfig.Module.Outputs - - // Collect all the relevant outputs that current exist in the state. - // We know the instance path up to this point, and the child module name, - // so we only need to store these by instance key. - stateMap := map[addrs.InstanceKey]map[string]cty.Value{} - for _, output := range d.Evaluator.State.ModuleOutputs(d.ModulePath, addr) { - val := output.Value - if output.Sensitive { - val = val.Mark(marks.Sensitive) - } - - _, callInstance := output.Addr.Module.CallInstance() - instance, ok := stateMap[callInstance.Key] - if !ok { - instance = map[string]cty.Value{} - stateMap[callInstance.Key] = instance - } - - instance[output.Addr.OutputValue.Name] = val - } - - // Get all changes that reside for this module call within our path. - // The change contains the full addr, so we can key these with strings. - changesMap := map[addrs.InstanceKey]map[string]*plans.OutputChangeSrc{} - for _, change := range d.Evaluator.Changes.GetOutputChanges(d.ModulePath, addr) { - _, callInstance := change.Addr.Module.CallInstance() - instance, ok := changesMap[callInstance.Key] - if !ok { - instance = map[string]*plans.OutputChangeSrc{} - changesMap[callInstance.Key] = instance - } - - instance[change.Addr.OutputValue.Name] = change - } - - // Build up all the module objects, creating a map of values for each - // module instance. - moduleInstances := map[addrs.InstanceKey]map[string]cty.Value{} - - // create a dummy object type for validation below - unknownMap := map[string]cty.Type{} - - // the structure is based on the configuration, so iterate through all the - // defined outputs, and add any instance state or changes we find. - for _, cfg := range outputConfigs { - // record the output names for validation - unknownMap[cfg.Name] = cty.DynamicPseudoType - - // get all instance output for this path from the state - for key, states := range stateMap { - outputState, ok := states[cfg.Name] - if !ok { - continue - } - - instance, ok := moduleInstances[key] - if !ok { - instance = map[string]cty.Value{} - moduleInstances[key] = instance - } - - instance[cfg.Name] = outputState - } - - // any pending changes override the state state values - for key, changes := range changesMap { - changeSrc, ok := changes[cfg.Name] - if !ok { - continue - } - - instance, ok := moduleInstances[key] - if !ok { - instance = map[string]cty.Value{} - moduleInstances[key] = instance - } - - change, err := changeSrc.Decode() - if err != nil { - // This should happen only if someone has tampered with a plan - // file, so we won't bother with a pretty error for it. - diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) - instance[cfg.Name] = cty.DynamicVal - continue - } - - instance[cfg.Name] = change.After - - if change.Sensitive { - instance[cfg.Name] = change.After.Mark(marks.Sensitive) - } - } - } - - var ret cty.Value - - // compile the outputs into the correct value type for the each mode - switch { - case callConfig.Count != nil: - // figure out what the last index we have is - length := -1 - for key := range moduleInstances { - intKey, ok := key.(addrs.IntKey) - if !ok { - // old key from state which is being dropped - continue - } - if int(intKey) >= length { - length = int(intKey) + 1 - } - } - - if length > 0 { - vals := make([]cty.Value, length) - for key, instance := range moduleInstances { - intKey, ok := key.(addrs.IntKey) - if !ok { - // old key from state which is being dropped - continue - } - - vals[int(intKey)] = cty.ObjectVal(instance) - } - - // Insert unknown values where there are any missing instances - for i, v := range vals { - if v.IsNull() { - vals[i] = cty.DynamicVal - continue - } - } - ret = cty.TupleVal(vals) - } else { - ret = cty.EmptyTupleVal - } - - case callConfig.ForEach != nil: - vals := make(map[string]cty.Value) - for key, instance := range moduleInstances { - strKey, ok := key.(addrs.StringKey) - if !ok { - continue - } - - vals[string(strKey)] = cty.ObjectVal(instance) - } - - if len(vals) > 0 { - ret = cty.ObjectVal(vals) - } else { - ret = cty.EmptyObjectVal - } - - default: - val, ok := moduleInstances[addrs.NoKey] - if !ok { - // create the object if there wasn't one known - val = map[string]cty.Value{} - for k := range outputConfigs { - val[k] = cty.DynamicVal - } - } - - ret = cty.ObjectVal(val) - } - - // The module won't be expanded during validation, so we need to return an - // unknown value. This will ensure the types looks correct, since we built - // the objects based on the configuration. - if d.Operation == walkValidate { - // While we know the type here and it would be nice to validate whether - // indexes are valid or not, because tuples and objects have fixed - // numbers of elements we can't simply return an unknown value of the - // same type since we have not expanded any instances during - // validation. - // - // In order to validate the expression a little precisely, we'll create - // an unknown map or list here to get more type information. - ty := cty.Object(unknownMap) - switch { - case callConfig.Count != nil: - ret = cty.UnknownVal(cty.List(ty)) - case callConfig.ForEach != nil: - ret = cty.UnknownVal(cty.Map(ty)) - default: - ret = cty.UnknownVal(ty) - } - } - - return ret, diags -} - -func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "cwd": - var err error - var wd string - if d.Evaluator.Meta != nil { - // Meta is always non-nil in the normal case, but some test cases - // are not so realistic. - wd = d.Evaluator.Meta.OriginalWorkingDir - } - if wd == "" { - wd, err = os.Getwd() - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Failed to get working directory`, - Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - } - // The current working directory should always be absolute, whether we - // just looked it up or whether we were relying on ContextMeta's - // (possibly non-normalized) path. - wd, err = filepath.Abs(wd) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Failed to get working directory`, - Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - return cty.StringVal(filepath.ToSlash(wd)), diags - - case "module": - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) - } - sourceDir := moduleConfig.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - case "root": - sourceDir := d.Evaluator.Config.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - default: - suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "path" attribute`, - Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - // First we'll consult the configuration to see if an resource of this - // name is declared at all. - moduleAddr := d.ModulePath - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) - } - - config := moduleConfig.Module.ResourceByAddr(addr) - if config == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Type, addr.Name, moduleDisplayAddr(moduleAddr)), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - // Build the provider address from configuration, since we may not have - // state available in all cases. - // We need to build an abs provider address, but we can use a default - // instance since we're only interested in the schema. - schema := d.getResourceSchema(addr, config.Provider) - if schema == nil { - // This shouldn't happen, since validation before we get here should've - // taken care of it, but we'll show a reasonable error message anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource type schema`, - Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, config.Provider), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - ty := schema.ImpliedType() - - rs := d.Evaluator.State.Resource(addr.Absolute(d.ModulePath)) - - if rs == nil { - switch d.Operation { - case walkPlan, walkApply: - // During plan and apply as we evaluate each removed instance they - // are removed from the working state. Since we know there are no - // instances, return an empty container of the expected type. - switch { - case config.Count != nil: - return cty.EmptyTupleVal, diags - case config.ForEach != nil: - return cty.EmptyObjectVal, diags - default: - // While we can reference an expanded resource with 0 - // instances, we cannot reference instances that do not exist. - // Due to the fact that we may have direct references to - // instances that may end up in a root output during destroy - // (since a planned destroy cannot yet remove root outputs), we - // need to return a dynamic value here to allow evaluation to - // continue. - log.Printf("[ERROR] unknown instance %q referenced during %s", addr.Absolute(d.ModulePath), d.Operation) - return cty.DynamicVal, diags - } - - case walkImport: - // Import does not yet plan resource changes, so new resources from - // config are not going to be found here. Once walkImport fully - // plans resources, this case should not longer be needed. - // In the single instance case, we can return a typed unknown value - // for the instance to better satisfy other expressions using the - // value. This of course will not help if statically known - // attributes are expected to be known elsewhere, but reduces the - // number of problematic configs for now. - // Unlike in plan and apply above we can't be sure the count or - // for_each instances are empty, so we return a DynamicVal. We - // don't really have a good value to return otherwise -- empty - // values will fail for direct index expressions, and unknown - // Lists and Maps could fail in some type unifications. - switch { - case config.Count != nil: - return cty.DynamicVal, diags - case config.ForEach != nil: - return cty.DynamicVal, diags - default: - return cty.UnknownVal(ty), diags - } - - default: - // We should only end up here during the validate walk, - // since later walks should have at least partial states populated - // for all resources in the configuration. - return cty.DynamicVal, diags - } - } - - // Decode all instances in the current state - instances := map[addrs.InstanceKey]cty.Value{} - pendingDestroy := d.Operation == walkDestroy - for key, is := range rs.Instances { - if is == nil || is.Current == nil { - // Assume we're dealing with an instance that hasn't been created yet. - instances[key] = cty.UnknownVal(ty) - continue - } - - instAddr := addr.Instance(key).Absolute(d.ModulePath) - - change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen) - if change != nil { - // Don't take any resources that are yet to be deleted into account. - // If the referenced resource is CreateBeforeDestroy, then orphaned - // instances will be in the state, as they are not destroyed until - // after their dependants are updated. - if change.Action == plans.Delete { - if !pendingDestroy { - continue - } - } - } - - // Planned resources are temporarily stored in state with empty values, - // and need to be replaced by the planned value here. - if is.Current.Status == states.ObjectPlanned { - if change == nil { - // If the object is in planned status then we should not get - // here, since we should have found a pending value in the plan - // above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), - Subject: &config.DeclRange, - }) - continue - } - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - - // If our provider schema contains sensitive values, mark those as sensitive - afterMarks := change.AfterValMarks - if schema.ContainsSensitive() { - afterMarks = append(afterMarks, schema.ValueMarks(val, nil)...) - } - - instances[key] = val.MarkWithPaths(afterMarks) - continue - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here we - // should have upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - - val := ios.Value - - // If our schema contains sensitive values, mark those as sensitive. - // Since decoding the instance object can also apply sensitivity marks, - // we must remove and combine those before remarking to avoid a double- - // mark error. - if schema.ContainsSensitive() { - var marks []cty.PathValueMarks - val, marks = val.UnmarkDeepWithPaths() - marks = append(marks, schema.ValueMarks(val, nil)...) - val = val.MarkWithPaths(marks) - } - instances[key] = val - } - - // ret should be populated with a valid value in all cases below - var ret cty.Value - - switch { - case config.Count != nil: - // figure out what the last index we have is - length := -1 - for key := range instances { - intKey, ok := key.(addrs.IntKey) - if !ok { - continue - } - if int(intKey) >= length { - length = int(intKey) + 1 - } - } - - if length > 0 { - vals := make([]cty.Value, length) - for key, instance := range instances { - intKey, ok := key.(addrs.IntKey) - if !ok { - // old key from state, which isn't valid for evaluation - continue - } - - vals[int(intKey)] = instance - } - - // Insert unknown values where there are any missing instances - for i, v := range vals { - if v == cty.NilVal { - vals[i] = cty.UnknownVal(ty) - } - } - ret = cty.TupleVal(vals) - } else { - ret = cty.EmptyTupleVal - } - - case config.ForEach != nil: - vals := make(map[string]cty.Value) - for key, instance := range instances { - strKey, ok := key.(addrs.StringKey) - if !ok { - // old key that is being dropped and not used for evaluation - continue - } - vals[string(strKey)] = instance - } - - if len(vals) > 0 { - // We use an object rather than a map here because resource schemas - // may include dynamically-typed attributes, which will then cause - // each instance to potentially have a different runtime type even - // though they all conform to the static schema. - ret = cty.ObjectVal(vals) - } else { - ret = cty.EmptyObjectVal - } - - default: - val, ok := instances[addrs.NoKey] - if !ok { - // if the instance is missing, insert an unknown value - val = cty.UnknownVal(ty) - } - - ret = val - } - - return ret, diags -} - -func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.Provider) *configschema.Block { - schema, _, err := d.Evaluator.Plugins.ResourceTypeSchema(providerAddr, addr.Mode, addr.Type) - if err != nil { - // We have plently other codepaths that will detect and report - // schema lookup errors before we'd reach this point, so we'll just - // treat a failure here the same as having no schema. - return nil - } - return schema -} - -func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "workspace": - workspaceName := d.Evaluator.Meta.Env - return cty.StringVal(workspaceName), diags - - case "env": - // Prior to Terraform 0.12 there was an attribute "env", which was - // an alias name for "workspace". This was deprecated and is now - // removed. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was renamed to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} - -// moduleDisplayAddr returns a string describing the given module instance -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleDisplayAddr(addr addrs.ModuleInstance) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/internal/terraform/evaluate_test.go b/internal/terraform/evaluate_test.go deleted file mode 100644 index 765efded6859..000000000000 --- a/internal/terraform/evaluate_test.go +++ /dev/null @@ -1,566 +0,0 @@ -package terraform - -import ( - "sync" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func TestEvaluatorGetTerraformAttr(t *testing.T) { - evaluator := &Evaluator{ - Meta: &ContextMeta{ - Env: "foo", - }, - } - data := &evaluationStateData{ - Evaluator: evaluator, - } - scope := evaluator.Scope(data, nil) - - t.Run("workspace", func(t *testing.T) { - want := cty.StringVal("foo") - got, diags := scope.Data.GetTerraformAttr(addrs.TerraformAttr{ - Name: "workspace", - }, tfdiags.SourceRange{}) - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - if !got.RawEquals(want) { - t.Errorf("wrong result %q; want %q", got, want) - } - }) -} - -func TestEvaluatorGetPathAttr(t *testing.T) { - evaluator := &Evaluator{ - Meta: &ContextMeta{ - Env: "foo", - }, - Config: &configs.Config{ - Module: &configs.Module{ - SourceDir: "bar/baz", - }, - }, - } - data := &evaluationStateData{ - Evaluator: evaluator, - } - scope := evaluator.Scope(data, nil) - - t.Run("module", func(t *testing.T) { - want := cty.StringVal("bar/baz") - got, diags := scope.Data.GetPathAttr(addrs.PathAttr{ - Name: "module", - }, tfdiags.SourceRange{}) - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - if !got.RawEquals(want) { - t.Errorf("wrong result %#v; want %#v", got, want) - } - }) - - t.Run("root", func(t *testing.T) { - want := cty.StringVal("bar/baz") - got, diags := scope.Data.GetPathAttr(addrs.PathAttr{ - Name: "root", - }, tfdiags.SourceRange{}) - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - if !got.RawEquals(want) { - t.Errorf("wrong result %#v; want %#v", got, want) - } - }) -} - -// This particularly tests that a sensitive attribute in config -// results in a value that has a "sensitive" cty Mark -func TestEvaluatorGetInputVariable(t *testing.T) { - evaluator := &Evaluator{ - Meta: &ContextMeta{ - Env: "foo", - }, - Config: &configs.Config{ - Module: &configs.Module{ - Variables: map[string]*configs.Variable{ - "some_var": { - Name: "some_var", - Sensitive: true, - Default: cty.StringVal("foo"), - Type: cty.String, - ConstraintType: cty.String, - }, - // Avoid double marking a value - "some_other_var": { - Name: "some_other_var", - Sensitive: true, - Default: cty.StringVal("bar"), - Type: cty.String, - ConstraintType: cty.String, - }, - }, - }, - }, - VariableValues: map[string]map[string]cty.Value{ - "": { - "some_var": cty.StringVal("bar"), - "some_other_var": cty.StringVal("boop").Mark(marks.Sensitive), - }, - }, - VariableValuesLock: &sync.Mutex{}, - } - - data := &evaluationStateData{ - Evaluator: evaluator, - } - scope := evaluator.Scope(data, nil) - - want := cty.StringVal("bar").Mark(marks.Sensitive) - got, diags := scope.Data.GetInputVariable(addrs.InputVariable{ - Name: "some_var", - }, tfdiags.SourceRange{}) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - if !got.RawEquals(want) { - t.Errorf("wrong result %#v; want %#v", got, want) - } - - want = cty.StringVal("boop").Mark(marks.Sensitive) - got, diags = scope.Data.GetInputVariable(addrs.InputVariable{ - Name: "some_other_var", - }, tfdiags.SourceRange{}) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - if !got.RawEquals(want) { - t.Errorf("wrong result %#v; want %#v", got, want) - } -} - -func TestEvaluatorGetResource(t *testing.T) { - stateSync := states.BuildState(func(ss *states.SyncState) { - ss.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo", "nesting_list": [{"sensitive_value":"abc"}], "nesting_map": {"foo":{"foo":"x"}}, "nesting_set": [{"baz":"abc"}], "nesting_single": {"boop":"abc"}, "nesting_nesting": {"nesting_list":[{"sensitive_value":"abc"}]}, "value":"hello"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }).SyncWrapper() - - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{ - "id": cty.StringVal("foo"), - }), - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "hashicorp", - Type: "test", - }, - } - - evaluator := &Evaluator{ - Meta: &ContextMeta{ - Env: "foo", - }, - Changes: plans.NewChanges().SyncWrapper(), - Config: &configs.Config{ - Module: &configs.Module{ - ManagedResources: map[string]*configs.Resource{ - "test_resource.foo": rc, - }, - }, - }, - State: stateSync, - Plugins: schemaOnlyProvidersForTesting(map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("test"): { - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "value": { - Type: cty.String, - Computed: true, - Sensitive: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nesting_list": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - "sensitive_value": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - Nesting: configschema.NestingList, - }, - "nesting_map": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - Nesting: configschema.NestingMap, - }, - "nesting_set": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "baz": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - Nesting: configschema.NestingSet, - }, - "nesting_single": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "boop": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - Nesting: configschema.NestingSingle, - }, - "nesting_nesting": { - Block: configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "nesting_list": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, - "sensitive_value": {Type: cty.String, Optional: true, Sensitive: true}, - }, - }, - Nesting: configschema.NestingList, - }, - }, - }, - Nesting: configschema.NestingSingle, - }, - }, - }, - }, - }, - }), - } - - data := &evaluationStateData{ - Evaluator: evaluator, - } - scope := evaluator.Scope(data, nil) - - want := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - "nesting_list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive), - "value": cty.NullVal(cty.String), - }), - }), - "nesting_map": cty.MapVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("x").Mark(marks.Sensitive)}), - }), - "nesting_nesting": cty.ObjectVal(map[string]cty.Value{ - "nesting_list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive), - "value": cty.NullVal(cty.String), - }), - }), - }), - "nesting_set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "baz": cty.StringVal("abc").Mark(marks.Sensitive), - }), - }), - "nesting_single": cty.ObjectVal(map[string]cty.Value{ - "boop": cty.StringVal("abc").Mark(marks.Sensitive), - }), - "value": cty.StringVal("hello").Mark(marks.Sensitive), - }) - - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - } - got, diags := scope.Data.GetResource(addr, tfdiags.SourceRange{}) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - - if !got.RawEquals(want) { - t.Errorf("wrong result:\ngot: %#v\nwant: %#v", got, want) - } -} - -// GetResource will return a planned object's After value -// if there is a change for that resource instance. -func TestEvaluatorGetResource_changes(t *testing.T) { - // Set up existing state - stateSync := states.BuildState(func(ss *states.SyncState) { - ss.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectPlanned, - AttrsJSON: []byte(`{"id":"foo", "to_mark_val":"tacos", "sensitive_value":"abc"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }).SyncWrapper() - - // Create a change for the existing state resource, - // to exercise retrieving the After value of the change - changesSync := plans.NewChanges().SyncWrapper() - change := &plans.ResourceInstanceChange{ - Addr: mustResourceInstanceAddr("test_resource.foo"), - ProviderAddr: addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("test"), - }, - Change: plans.Change{ - Action: plans.Update, - // Provide an After value that contains a marked value - After: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - "to_mark_val": cty.StringVal("pizza").Mark(marks.Sensitive), - "sensitive_value": cty.StringVal("abc"), - "sensitive_collection": cty.MapVal(map[string]cty.Value{ - "boop": cty.StringVal("beep"), - }), - }), - }, - } - - // Set up our schemas - schemas := &Schemas{ - Providers: map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("test"): { - Provider: &configschema.Block{}, - ResourceTypes: map[string]*configschema.Block{ - "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "to_mark_val": { - Type: cty.String, - Computed: true, - }, - "sensitive_value": { - Type: cty.String, - Computed: true, - Sensitive: true, - }, - "sensitive_collection": { - Type: cty.Map(cty.String), - Computed: true, - Sensitive: true, - }, - }, - }, - }, - }, - }, - } - - // The resource we'll inspect - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - } - schema, _ := schemas.ResourceTypeConfig(addrs.NewDefaultProvider("test"), addr.Mode, addr.Type) - // This encoding separates out the After's marks into its AfterValMarks - csrc, _ := change.Encode(schema.ImpliedType()) - changesSync.AppendResourceInstanceChange(csrc) - - evaluator := &Evaluator{ - Meta: &ContextMeta{ - Env: "foo", - }, - Changes: changesSync, - Config: &configs.Config{ - Module: &configs.Module{ - ManagedResources: map[string]*configs.Resource{ - "test_resource.foo": { - Mode: addrs.ManagedResourceMode, - Type: "test_resource", - Name: "foo", - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "hashicorp", - Type: "test", - }, - }, - }, - }, - }, - State: stateSync, - Plugins: schemaOnlyProvidersForTesting(schemas.Providers), - } - - data := &evaluationStateData{ - Evaluator: evaluator, - } - scope := evaluator.Scope(data, nil) - - want := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("foo"), - "to_mark_val": cty.StringVal("pizza").Mark(marks.Sensitive), - "sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive), - "sensitive_collection": cty.MapVal(map[string]cty.Value{ - "boop": cty.StringVal("beep"), - }).Mark(marks.Sensitive), - }) - - got, diags := scope.Data.GetResource(addr, tfdiags.SourceRange{}) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - - if !got.RawEquals(want) { - t.Errorf("wrong result:\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestEvaluatorGetModule(t *testing.T) { - // Create a new evaluator with an existing state - stateSync := states.BuildState(func(ss *states.SyncState) { - ss.SetOutputValue( - addrs.OutputValue{Name: "out"}.Absolute(addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "mod"}}), - cty.StringVal("bar"), - true, - ) - }).SyncWrapper() - evaluator := evaluatorForModule(stateSync, plans.NewChanges().SyncWrapper()) - data := &evaluationStateData{ - Evaluator: evaluator, - } - scope := evaluator.Scope(data, nil) - want := cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("bar").Mark(marks.Sensitive)}) - got, diags := scope.Data.GetModule(addrs.ModuleCall{ - Name: "mod", - }, tfdiags.SourceRange{}) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - if !got.RawEquals(want) { - t.Errorf("wrong result %#v; want %#v", got, want) - } - - // Changes should override the state value - changesSync := plans.NewChanges().SyncWrapper() - change := &plans.OutputChange{ - Addr: addrs.OutputValue{Name: "out"}.Absolute(addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "mod"}}), - Sensitive: true, - Change: plans.Change{ - After: cty.StringVal("baz"), - }, - } - cs, _ := change.Encode() - changesSync.AppendOutputChange(cs) - evaluator = evaluatorForModule(stateSync, changesSync) - data = &evaluationStateData{ - Evaluator: evaluator, - } - scope = evaluator.Scope(data, nil) - want = cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("baz").Mark(marks.Sensitive)}) - got, diags = scope.Data.GetModule(addrs.ModuleCall{ - Name: "mod", - }, tfdiags.SourceRange{}) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - if !got.RawEquals(want) { - t.Errorf("wrong result %#v; want %#v", got, want) - } - - // Test changes with empty state - evaluator = evaluatorForModule(states.NewState().SyncWrapper(), changesSync) - data = &evaluationStateData{ - Evaluator: evaluator, - } - scope = evaluator.Scope(data, nil) - want = cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("baz").Mark(marks.Sensitive)}) - got, diags = scope.Data.GetModule(addrs.ModuleCall{ - Name: "mod", - }, tfdiags.SourceRange{}) - - if len(diags) != 0 { - t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) - } - if !got.RawEquals(want) { - t.Errorf("wrong result %#v; want %#v", got, want) - } -} - -func evaluatorForModule(stateSync *states.SyncState, changesSync *plans.ChangesSync) *Evaluator { - return &Evaluator{ - Meta: &ContextMeta{ - Env: "foo", - }, - Config: &configs.Config{ - Module: &configs.Module{ - ModuleCalls: map[string]*configs.ModuleCall{ - "mod": { - Name: "mod", - }, - }, - }, - Children: map[string]*configs.Config{ - "mod": { - Path: addrs.Module{"module.mod"}, - Module: &configs.Module{ - Outputs: map[string]*configs.Output{ - "out": { - Name: "out", - Sensitive: true, - }, - }, - }, - }, - }, - }, - State: stateSync, - Changes: changesSync, - } -} diff --git a/internal/terraform/evaluate_triggers.go b/internal/terraform/evaluate_triggers.go deleted file mode 100644 index 31fd80e16b2d..000000000000 --- a/internal/terraform/evaluate_triggers.go +++ /dev/null @@ -1,143 +0,0 @@ -package terraform - -import ( - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -func evalReplaceTriggeredByExpr(expr hcl.Expression, keyData instances.RepetitionData) (*addrs.Reference, tfdiags.Diagnostics) { - var ref *addrs.Reference - var diags tfdiags.Diagnostics - - traversal, diags := triggersExprToTraversal(expr, keyData) - if diags.HasErrors() { - return nil, diags - } - - // We now have a static traversal, so we can just turn it into an addrs.Reference. - ref, ds := addrs.ParseRef(traversal) - diags = diags.Append(ds) - - return ref, diags -} - -// trggersExprToTraversal takes an hcl expression limited to the syntax allowed -// in replace_triggered_by, and converts it to a static traversal. The -// RepetitionData contains the data necessary to evaluate the only allowed -// variables in the expression, count.index and each.key. -func triggersExprToTraversal(expr hcl.Expression, keyData instances.RepetitionData) (hcl.Traversal, tfdiags.Diagnostics) { - var trav hcl.Traversal - var diags tfdiags.Diagnostics - - switch e := expr.(type) { - case *hclsyntax.RelativeTraversalExpr: - t, d := triggersExprToTraversal(e.Source, keyData) - diags = diags.Append(d) - trav = append(trav, t...) - trav = append(trav, e.Traversal...) - - case *hclsyntax.ScopeTraversalExpr: - // a static reference, we can just append the traversal - trav = append(trav, e.Traversal...) - - case *hclsyntax.IndexExpr: - // Get the collection from the index expression - t, d := triggersExprToTraversal(e.Collection, keyData) - diags = diags.Append(d) - if diags.HasErrors() { - return nil, diags - } - trav = append(trav, t...) - - // The index key is the only place where we could have variables that - // reference count and each, so we need to parse those independently. - idx, hclDiags := parseIndexKeyExpr(e.Key, keyData) - diags = diags.Append(hclDiags) - - trav = append(trav, idx) - - default: - // Something unexpected got through config validation. We're not sure - // what it is, but we'll point it out in the diagnostics for the user - // to fix. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid replace_triggered_by expression", - Detail: "Unexpected expression found in replace_triggered_by.", - Subject: e.Range().Ptr(), - }) - } - - return trav, diags -} - -// parseIndexKeyExpr takes an hcl.Expression and parses it as an index key, while -// evaluating any references to count.index or each.key. -func parseIndexKeyExpr(expr hcl.Expression, keyData instances.RepetitionData) (hcl.TraverseIndex, hcl.Diagnostics) { - idx := hcl.TraverseIndex{ - SrcRange: expr.Range(), - } - - trav, diags := hcl.RelTraversalForExpr(expr) - if diags.HasErrors() { - return idx, diags - } - - keyParts := []string{} - - for _, t := range trav { - attr, ok := t.(hcl.TraverseAttr) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index expression", - Detail: "Only constant values, count.index or each.key are allowed in index expressions.", - Subject: expr.Range().Ptr(), - }) - return idx, diags - } - keyParts = append(keyParts, attr.Name) - } - - switch strings.Join(keyParts, ".") { - case "count.index": - if keyData.CountIndex == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "count" in non-counted context`, - Detail: `The "count" object can only be used in "resource" blocks when the "count" argument is set.`, - Subject: expr.Range().Ptr(), - }) - } - idx.Key = keyData.CountIndex - - case "each.key": - if keyData.EachKey == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "each" in context without for_each`, - Detail: `The "each" object can be used only in "resource" blocks when the "for_each" argument is set.`, - Subject: expr.Range().Ptr(), - }) - } - idx.Key = keyData.EachKey - default: - // Something may have slipped through validation, probably from a json - // configuration. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index expression", - Detail: "Only constant values, count.index or each.key are allowed in index expressions.", - Subject: expr.Range().Ptr(), - }) - } - - return idx, diags - -} diff --git a/internal/terraform/evaluate_triggers_test.go b/internal/terraform/evaluate_triggers_test.go deleted file mode 100644 index d51b1c2be6b6..000000000000 --- a/internal/terraform/evaluate_triggers_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/zclconf/go-cty/cty" -) - -func TestEvalReplaceTriggeredBy(t *testing.T) { - tests := map[string]struct { - // Raw config expression from within replace_triggered_by list. - // If this does not contains any count or each references, it should - // directly parse into the same *addrs.Reference. - expr string - - // If the expression contains count or each, then we need to add - // repetition data, and the static string to parse into the desired - // *addrs.Reference - repData instances.RepetitionData - reference string - }{ - "single resource": { - expr: "test_resource.a", - }, - - "resource instance attr": { - expr: "test_resource.a.attr", - }, - - "resource instance index attr": { - expr: "test_resource.a[0].attr", - }, - - "resource instance count": { - expr: "test_resource.a[count.index]", - repData: instances.RepetitionData{ - CountIndex: cty.NumberIntVal(0), - }, - reference: "test_resource.a[0]", - }, - "resource instance for_each": { - expr: "test_resource.a[each.key].attr", - repData: instances.RepetitionData{ - EachKey: cty.StringVal("k"), - }, - reference: `test_resource.a["k"].attr`, - }, - "resource instance for_each map attr": { - expr: "test_resource.a[each.key].attr[each.key]", - repData: instances.RepetitionData{ - EachKey: cty.StringVal("k"), - }, - reference: `test_resource.a["k"].attr["k"]`, - }, - } - - for name, tc := range tests { - pos := hcl.Pos{Line: 1, Column: 1} - t.Run(name, func(t *testing.T) { - expr, hclDiags := hclsyntax.ParseExpression([]byte(tc.expr), "", pos) - if hclDiags.HasErrors() { - t.Fatal(hclDiags) - } - - got, diags := evalReplaceTriggeredByExpr(expr, tc.repData) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - want := tc.reference - if want == "" { - want = tc.expr - } - - // create the desired reference - traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(want), "", pos) - if travDiags.HasErrors() { - t.Fatal(travDiags) - } - ref, diags := addrs.ParseRef(traversal) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if got.DisplayString() != ref.DisplayString() { - t.Fatalf("expected %q: got %q", ref.DisplayString(), got.DisplayString()) - } - }) - } -} diff --git a/internal/terraform/evaluate_valid.go b/internal/terraform/evaluate_valid.go deleted file mode 100644 index 1d43cc4fce64..000000000000 --- a/internal/terraform/evaluate_valid.go +++ /dev/null @@ -1,318 +0,0 @@ -package terraform - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/didyoumean" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// StaticValidateReferences checks the given references against schemas and -// other statically-checkable rules, producing error diagnostics if any -// problems are found. -// -// If this method returns errors for a particular reference then evaluating -// that reference is likely to generate a very similar error, so callers should -// not run this method and then also evaluate the source expression(s) and -// merge the two sets of diagnostics together, since this will result in -// confusing redundant errors. -// -// This method can find more errors than can be found by evaluating an -// expression with a partially-populated scope, since it checks the referenced -// names directly against the schema rather than relying on evaluation errors. -// -// The result may include warning diagnostics if, for example, deprecated -// features are referenced. -func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, ref := range refs { - moreDiags := d.staticValidateReference(ref, self) - diags = diags.Append(moreDiags) - } - return diags -} - -func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if modCfg == nil { - // This is a bug in the caller rather than a problem with the - // reference, but rather than crashing out here in an unhelpful way - // we'll just ignore it and trust a different layer to catch it. - return nil - } - - if ref.Subject == addrs.Self { - // The "self" address is a special alias for the address given as - // our self parameter here, if present. - if self == nil { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner, connection, and postcondition blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - return diags - } - - synthRef := *ref // shallow copy - synthRef.Subject = self - ref = &synthRef - } - - switch addr := ref.Subject.(type) { - - // For static validation we validate both resource and resource instance references the same way. - // We mostly disregard the index, though we do some simple validation of - // its _presence_ in staticValidateSingleResourceReference and - // staticValidateMultiResourceReference respectively. - case addrs.Resource: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - return diags - case addrs.ResourceInstance: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) - return diags - - // We also handle all module call references the same way, disregarding index. - case addrs.ModuleCall: - return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallInstance: - return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallInstanceOutput: - // This one is a funny one because we will take the output name referenced - // and use it to fake up a "remaining" that would make sense for the - // module call itself, rather than for the specific output, and then - // we can just re-use our static module call validation logic. - remain := make(hcl.Traversal, len(ref.Remaining)+1) - copy(remain[1:], ref.Remaining) - remain[0] = hcl.TraverseAttr{ - Name: addr.Name, - - // Using the whole reference as the source range here doesn't exactly - // match how HCL would normally generate an attribute traversal, - // but is close enough for our purposes. - SrcRange: ref.SourceRange.ToHCL(), - } - return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) - - default: - // Anything else we'll just permit through without any static validation - // and let it be caught during dynamic evaluation, in evaluate.go . - return nil - } -} - -func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - // If we have at least one step in "remain" and this resource has - // "count" set then we know for sure this in invalid because we have - // something like: - // aws_instance.foo.bar - // ...when we really need - // aws_instance.foo[count.index].bar - - // It is _not_ safe to do this check when remain is empty, because that - // would also match aws_instance.foo[count.index].bar due to `count.index` - // not being statically-resolvable as part of a reference, and match - // direct references to the whole aws_instance.foo tuple. - if len(remain) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if cfg.Count != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - if cfg.ForEach != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - - return diags -} - -func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if addr.Key == addrs.NoKey { - // This is a different path into staticValidateSingleResourceReference - return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) - } else { - if cfg.Count == nil && cfg.ForEach == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unexpected resource instance key`, - Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), - Subject: rng.ToHCL().Ptr(), - }) - } - } - - return diags -} - -func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - var modeAdjective string - switch addr.Mode { - case addrs.ManagedResourceMode: - modeAdjective = "managed" - case addrs.DataResourceMode: - modeAdjective = "data" - default: - // should never happen - modeAdjective = "" - } - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - var suggestion string - // A common mistake is omitting the data. prefix when trying to refer - // to a data resource, so we'll add a special hint for that. - if addr.Mode == addrs.ManagedResourceMode { - candidateAddr := addr // not a pointer, so this is a copy - candidateAddr.Mode = addrs.DataResourceMode - if candidateCfg := modCfg.Module.ResourceByAddr(candidateAddr); candidateCfg != nil { - suggestion = fmt.Sprintf("\n\nDid you mean the data resource %s?", candidateAddr) - } - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.%s`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - providerFqn := modCfg.Module.ProviderForLocalConfig(cfg.ProviderConfigAddr()) - schema, _, err := d.Evaluator.Plugins.ResourceTypeSchema(providerFqn, addr.Mode, addr.Type) - if err != nil { - // Prior validation should've taken care of a schema lookup error, - // so we should never get here but we'll handle it here anyway for - // robustness. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Failed provider schema lookup`, - Detail: fmt.Sprintf(`Couldn't load schema for %s resource type %q in %s: %s.`, modeAdjective, addr.Type, providerFqn.String(), err), - Subject: rng.ToHCL().Ptr(), - }) - } - - if schema == nil { - // Prior validation should've taken care of a resource block with an - // unsupported type, so we should never get here but we'll handle it - // here anyway for robustness. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource type`, - Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerFqn.String()), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - // As a special case we'll detect attempts to access an attribute called - // "count" and produce a special error for it, since versions of Terraform - // prior to v0.12 offered this as a weird special case that we can no - // longer support. - if len(remain) > 0 { - if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource count attribute`, - Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - } - - // If we got this far then we'll try to validate the remaining traversal - // steps against our schema. - moreDiags := schema.StaticValidateTraversal(remain) - diags = diags.Append(moreDiags) - - return diags -} - -func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // For now, our focus here is just in testing that the referenced module - // call exists. All other validation is deferred until evaluation time. - _, exists := modCfg.Module.ModuleCalls[addr.Name] - if !exists { - var suggestions []string - for name := range modCfg.Module.ModuleCalls { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - return diags -} - -// moduleConfigDisplayAddr returns a string describing the given module -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleConfigDisplayAddr(addr addrs.Module) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/internal/terraform/evaluate_valid_test.go b/internal/terraform/evaluate_valid_test.go deleted file mode 100644 index cfdfdea1f5e1..000000000000 --- a/internal/terraform/evaluate_valid_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang" -) - -func TestStaticValidateReferences(t *testing.T) { - tests := []struct { - Ref string - WantErr string - }{ - { - "aws_instance.no_count", - ``, - }, - { - "aws_instance.count", - ``, - }, - { - "aws_instance.count[0]", - ``, - }, - { - "aws_instance.nonexist", - `Reference to undeclared resource: A managed resource "aws_instance" "nonexist" has not been declared in the root module.`, - }, - { - "beep.boop", - `Reference to undeclared resource: A managed resource "beep" "boop" has not been declared in the root module. - -Did you mean the data resource data.beep.boop?`, - }, - { - "aws_instance.no_count[0]", - `Unexpected resource instance key: Because aws_instance.no_count does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, - }, - { - "aws_instance.count.foo", - // In this case we return two errors that are somewhat redundant with - // one another, but we'll accept that because they both report the - // problem from different perspectives and so give the user more - // opportunity to understand what's going on here. - `2 problems: - -- Missing resource instance key: Because aws_instance.count has "count" set, its attributes must be accessed on specific instances. - -For example, to correlate with indices of a referring resource, use: - aws_instance.count[count.index] -- Unsupported attribute: This object has no argument, nested block, or exported attribute named "foo".`, - }, - { - "boop_instance.yep", - ``, - }, - { - "boop_whatever.nope", - `Invalid resource type: A managed resource type "boop_whatever" is not supported by provider "registry.terraform.io/foobar/beep".`, - }, - } - - cfg := testModule(t, "static-validate-refs") - evaluator := &Evaluator{ - Config: cfg, - Plugins: schemaOnlyProvidersForTesting(map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("aws"): { - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": {}, - }, - }, - addrs.MustParseProviderSourceString("foobar/beep"): { - ResourceTypes: map[string]*configschema.Block{ - // intentional mismatch between resource type prefix and provider type - "boop_instance": {}, - }, - }, - }), - } - - for _, test := range tests { - t.Run(test.Ref, func(t *testing.T) { - traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Ref), "", hcl.Pos{Line: 1, Column: 1}) - if hclDiags.HasErrors() { - t.Fatal(hclDiags.Error()) - } - - refs, diags := lang.References([]hcl.Traversal{traversal}) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - data := &evaluationStateData{ - Evaluator: evaluator, - } - - diags = data.StaticValidateReferences(refs, nil) - if diags.HasErrors() { - if test.WantErr == "" { - t.Fatalf("Unexpected diagnostics: %s", diags.Err()) - } - - gotErr := diags.Err().Error() - if gotErr != test.WantErr { - t.Fatalf("Wrong diagnostics\ngot: %s\nwant: %s", gotErr, test.WantErr) - } - return - } - - if test.WantErr != "" { - t.Fatalf("Expected diagnostics, but got none\nwant: %s", test.WantErr) - } - }) - } -} diff --git a/internal/terraform/execute.go b/internal/terraform/execute.go deleted file mode 100644 index 8c3a6fe15e0c..000000000000 --- a/internal/terraform/execute.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform/internal/tfdiags" - -// GraphNodeExecutable is the interface that graph nodes must implement to -// enable execution. -type GraphNodeExecutable interface { - Execute(EvalContext, walkOperation) tfdiags.Diagnostics -} diff --git a/internal/terraform/features.go b/internal/terraform/features.go deleted file mode 100644 index 97c77bdbd001..000000000000 --- a/internal/terraform/features.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -import "os" - -// This file holds feature flags for the next release - -var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/internal/terraform/graph.go b/internal/terraform/graph.go deleted file mode 100644 index 38d0fad6ed5c..000000000000 --- a/internal/terraform/graph.go +++ /dev/null @@ -1,135 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/tfdiags" - - "github.com/hashicorp/terraform/internal/addrs" - - "github.com/hashicorp/terraform/internal/dag" -) - -// Graph represents the graph that Terraform uses to represent resources -// and their dependencies. -type Graph struct { - // Graph is the actual DAG. This is embedded so you can call the DAG - // methods directly. - dag.AcyclicGraph - - // Path is the path in the module tree that this Graph represents. - Path addrs.ModuleInstance -} - -func (g *Graph) DirectedGraph() dag.Grapher { - return &g.AcyclicGraph -} - -// Walk walks the graph with the given walker for callbacks. The graph -// will be walked with full parallelism, so the walker should expect -// to be called in concurrently. -func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { - return g.walk(walker) -} - -func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { - // The callbacks for enter/exiting a graph - ctx := walker.EvalContext() - - // Walk the graph. - walkFn := func(v dag.Vertex) (diags tfdiags.Diagnostics) { - // the walkFn is called asynchronously, and needs to be recovered - // separately in the case of a panic. - defer logging.PanicHandler() - - log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) - - defer func() { - if diags.HasErrors() { - for _, diag := range diags { - if diag.Severity() == tfdiags.Error { - desc := diag.Description() - log.Printf("[ERROR] vertex %q error: %s", dag.VertexName(v), desc.Summary) - } - } - log.Printf("[TRACE] vertex %q: visit complete, with errors", dag.VertexName(v)) - } else { - log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) - } - }() - - // vertexCtx is the context that we use when evaluating. This - // is normally the context of our graph but can be overridden - // with a GraphNodeModuleInstance impl. - vertexCtx := ctx - if pn, ok := v.(GraphNodeModuleInstance); ok { - vertexCtx = walker.EnterPath(pn.Path()) - defer walker.ExitPath(pn.Path()) - } - - // If the node is exec-able, then execute it. - if ev, ok := v.(GraphNodeExecutable); ok { - diags = diags.Append(walker.Execute(vertexCtx, ev)) - if diags.HasErrors() { - return - } - } - - // If the node is dynamically expanded, then expand it - if ev, ok := v.(GraphNodeDynamicExpandable); ok { - log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) - - g, err := ev.DynamicExpand(vertexCtx) - diags = diags.Append(err) - if diags.HasErrors() { - log.Printf("[TRACE] vertex %q: failed expanding dynamic subgraph: %s", dag.VertexName(v), err) - return - } - if g != nil { - // The subgraph should always be valid, per our normal acyclic - // graph validation rules. - if err := g.Validate(); err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Graph node has invalid dynamic subgraph", - fmt.Sprintf("The internal logic for %q generated an invalid dynamic subgraph: %s.\n\nThis is a bug in Terraform. Please report it!", dag.VertexName(v), err), - )) - return - } - // If we passed validation then there is exactly one root node. - // That root node should always be "rootNode", the singleton - // root node value. - if n, err := g.Root(); err != nil || n != dag.Vertex(rootNode) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Graph node has invalid dynamic subgraph", - fmt.Sprintf("The internal logic for %q generated an invalid dynamic subgraph: the root node is %T, which is not a suitable root node type.\n\nThis is a bug in Terraform. Please report it!", dag.VertexName(v), n), - )) - return - } - - // Walk the subgraph - log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) - subDiags := g.walk(walker) - diags = diags.Append(subDiags) - if subDiags.HasErrors() { - var errs []string - for _, d := range subDiags { - errs = append(errs, d.Description().Summary) - } - log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors: %s", dag.VertexName(v), strings.Join(errs, ",")) - return - } - log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) - } else { - log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) - } - } - return - } - - return g.AcyclicGraph.Walk(walkFn) -} diff --git a/internal/terraform/graph_builder.go b/internal/terraform/graph_builder.go deleted file mode 100644 index 1c69ee41f82d..000000000000 --- a/internal/terraform/graph_builder.go +++ /dev/null @@ -1,65 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// GraphBuilder is an interface that can be implemented and used with -// Terraform to build the graph that Terraform walks. -type GraphBuilder interface { - // Build builds the graph for the given module path. It is up to - // the interface implementation whether this build should expand - // the graph or not. - Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) -} - -// BasicGraphBuilder is a GraphBuilder that builds a graph out of a -// series of transforms and (optionally) validates the graph is a valid -// structure. -type BasicGraphBuilder struct { - Steps []GraphTransformer - // Optional name to add to the graph debug log - Name string -} - -func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - g := &Graph{Path: path} - - var lastStepStr string - for _, step := range b.Steps { - if step == nil { - continue - } - log.Printf("[TRACE] Executing graph transform %T", step) - - err := step.Transform(g) - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s ------", step, logging.Indent(thisStepStr)) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] Completed graph transform %T (no changes)", step) - } - - if err != nil { - if nf, isNF := err.(tfdiags.NonFatalError); isNF { - diags = diags.Append(nf.Diagnostics) - } else { - diags = diags.Append(err) - return g, diags - } - } - } - - if err := g.Validate(); err != nil { - log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) - diags = diags.Append(err) - return nil, diags - } - - return g, diags -} diff --git a/internal/terraform/graph_builder_apply.go b/internal/terraform/graph_builder_apply.go deleted file mode 100644 index 29e60ff6a0d8..000000000000 --- a/internal/terraform/graph_builder_apply.go +++ /dev/null @@ -1,175 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ApplyGraphBuilder implements GraphBuilder and is responsible for building -// a graph for applying a Terraform diff. -// -// Because the graph is built from the diff (vs. the config or state), -// this helps ensure that the apply-time graph doesn't modify any resources -// that aren't explicitly in the diff. There are other scenarios where the -// diff can be deviated, so this is just one layer of protection. -type ApplyGraphBuilder struct { - // Config is the configuration tree that the diff was built from. - Config *configs.Config - - // Changes describes the changes that we need apply. - Changes *plans.Changes - - // State is the current state - State *states.State - - // RootVariableValues are the root module input variables captured as - // part of the plan object, which we must reproduce in the apply step - // to get a consistent result. - RootVariableValues InputValues - - // Plugins is a library of the plug-in components (providers and - // provisioners) available for use. - Plugins *contextPlugins - - // Targets are resources to target. This is only required to make sure - // unnecessary outputs aren't included in the apply graph. The plan - // builder successfully handles targeting resources. In the future, - // outputs should go into the diff so that this is unnecessary. - Targets []addrs.Targetable - - // ForceReplace are the resource instance addresses that the user - // requested to force replacement for when creating the plan, if any. - // The apply step refers to these as part of verifying that the planned - // actions remain consistent between plan and apply. - ForceReplace []addrs.AbsResourceInstance - - // Plan Operation this graph will be used for. - Operation walkOperation -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Name: "ApplyGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &nodeExpandApplyableResource{ - NodeAbstractResource: a, - } - } - - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeApplyableResourceInstance{ - NodeAbstractResourceInstance: a, - forceReplace: b.ForceReplace, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config. During apply, - // we use this just to ensure that the whole-resource metadata is - // updated to reflect things such as whether the count argument is - // set in config, or which provider configuration manages each resource. - &ConfigTransformer{ - Concrete: concreteResource, - Config: b.Config, - }, - - // Add dynamic values - &RootVariableTransformer{Config: b.Config, RawValues: b.RootVariableValues}, - &ModuleVariableTransformer{Config: b.Config}, - &LocalTransformer{Config: b.Config}, - &OutputTransformer{ - Config: b.Config, - ApplyDestroy: b.Operation == walkDestroy, - }, - - // Creates all the resource instances represented in the diff, along - // with dependency edges against the whole-resource nodes added by - // ConfigTransformer above. - &DiffTransformer{ - Concrete: concreteResourceInstance, - State: b.State, - Changes: b.Changes, - Config: b.Config, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Create orphan output nodes - &OrphanOutputTransformer{Config: b.Config, State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // add providers - transformProviders(concreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Plugins: b.Plugins, Config: b.Config}, - - // Create expansion nodes for all of the module calls. This must - // come after all other transformers that create nodes representing - // objects that can belong to modules. - &ModuleExpansionTransformer{Config: b.Config}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - &AttachDependenciesTransformer{}, - - // Detect when create_before_destroy must be forced on for a particular - // node due to dependency edges, to avoid graph cycles during apply. - &ForcedCBDTransformer{}, - - // Destruction ordering - &DestroyEdgeTransformer{ - Changes: b.Changes, - Operation: b.Operation, - }, - &CBDEdgeTransformer{ - Config: b.Config, - State: b.State, - }, - - // We need to remove configuration nodes that are not used at all, as - // they may not be able to evaluate, especially during destroy. - // These include variables, locals, and instance expanders. - &pruneUnusedNodesTransformer{}, - - // Target - &TargetsTransformer{Targets: b.Targets}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // close the root module - &CloseRootModuleTransformer{}, - - // Perform the transitive reduction to make our graph a bit - // more understandable if possible (it usually is possible). - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/internal/terraform/graph_builder_apply_test.go b/internal/terraform/graph_builder_apply_test.go deleted file mode 100644 index 88ebcfefd1f3..000000000000 --- a/internal/terraform/graph_builder_apply_test.go +++ /dev/null @@ -1,751 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" -) - -func TestApplyGraphBuilder_impl(t *testing.T) { - var _ GraphBuilder = new(ApplyGraphBuilder) -} - -func TestApplyGraphBuilder(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.create"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.other"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - { - Addr: mustResourceInstanceAddr("module.child.test_object.create"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - }, - }, - { - Addr: mustResourceInstanceAddr("module.child.test_object.other"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - }, - }, - }, - } - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-basic"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong path %q", g.Path.String()) - } - - got := strings.TrimSpace(g.String()) - want := strings.TrimSpace(testApplyGraphBuilderStr) - if diff := cmp.Diff(want, got); diff != "" { - t.Fatalf("wrong result\n%s", diff) - } -} - -// This tests the ordering of two resources where a non-CBD depends -// on a CBD. GH-11349. -func TestApplyGraphBuilder_depCbd(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.A"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-dep-cbd"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - State: state, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong path %q", g.Path.String()) - } - - // We're going to go hunting for our deposed instance node here, so we - // can find out its key to use in the assertions below. - var dk states.DeposedKey - for _, v := range g.Vertices() { - tv, ok := v.(*NodeDestroyDeposedResourceInstanceObject) - if !ok { - continue - } - if dk != states.NotDeposed { - t.Fatalf("more than one deposed instance node in the graph; want only one") - } - dk = tv.DeposedKey - } - if dk == states.NotDeposed { - t.Fatalf("no deposed instance node in the graph; want one") - } - - destroyName := fmt.Sprintf("test_object.A (destroy deposed %s)", dk) - - // Create A, Modify B, Destroy A - testGraphHappensBefore( - t, g, - "test_object.A", - destroyName, - ) - testGraphHappensBefore( - t, g, - "test_object.A", - "test_object.B", - ) - testGraphHappensBefore( - t, g, - "test_object.B", - destroyName, - ) -} - -// This tests the ordering of two resources that are both CBD that -// require destroy/create. -func TestApplyGraphBuilder_doubleCBD(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.A"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - }, - } - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-double-cbd"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong path %q", g.Path.String()) - } - - // We're going to go hunting for our deposed instance node here, so we - // can find out its key to use in the assertions below. - var destroyA, destroyB string - for _, v := range g.Vertices() { - tv, ok := v.(*NodeDestroyDeposedResourceInstanceObject) - if !ok { - continue - } - - switch tv.Addr.Resource.Resource.Name { - case "A": - destroyA = fmt.Sprintf("test_object.A (destroy deposed %s)", tv.DeposedKey) - case "B": - destroyB = fmt.Sprintf("test_object.B (destroy deposed %s)", tv.DeposedKey) - default: - t.Fatalf("unknown instance: %s", tv.Addr) - } - } - - // Create A, Modify B, Destroy A - testGraphHappensBefore( - t, g, - "test_object.A", - destroyA, - ) - testGraphHappensBefore( - t, g, - "test_object.A", - "test_object.B", - ) - testGraphHappensBefore( - t, g, - "test_object.B", - destroyB, - ) -} - -// This tests the ordering of two resources being destroyed that depend -// on each other from only state. GH-11749 -func TestApplyGraphBuilder_destroyStateOnly(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("module.child.test_object.A"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - { - Addr: mustResourceInstanceAddr("module.child.test_object.B"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"bar"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.child.test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - b := &ApplyGraphBuilder{ - Config: testModule(t, "empty"), - Changes: changes, - State: state, - Plugins: simpleMockPluginLibrary(), - } - - g, diags := b.Build(addrs.RootModuleInstance) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong path %q", g.Path.String()) - } - - testGraphHappensBefore( - t, g, - "module.child.test_object.B (destroy)", - "module.child.test_object.A (destroy)") -} - -// This tests the ordering of destroying a single count of a resource. -func TestApplyGraphBuilder_destroyCount(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.A[1]"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - }, - } - - state := states.NewState() - root := state.RootModule() - addrA := mustResourceInstanceAddr("test_object.A[1]") - root.SetResourceInstanceCurrent( - addrA.Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B"}`), - Dependencies: []addrs.ConfigResource{addrA.ContainingResource().Config()}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-count"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - State: state, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong module path %q", g.Path) - } - - got := strings.TrimSpace(g.String()) - want := strings.TrimSpace(testApplyGraphBuilderDestroyCountStr) - if diff := cmp.Diff(want, got); diff != "" { - t.Fatalf("wrong result\n%s", diff) - } -} - -func TestApplyGraphBuilder_moduleDestroy(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("module.A.test_object.foo"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - { - Addr: mustResourceInstanceAddr("module.B.test_object.foo"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - }, - } - - state := states.NewState() - modA := state.EnsureModule(addrs.RootModuleInstance.Child("A", addrs.NoKey)) - modA.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - modB := state.EnsureModule(addrs.RootModuleInstance.Child("B", addrs.NoKey)) - modB.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.foo").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo","value":"foo"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.A.test_object.foo")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-module-destroy"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - State: state, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - testGraphHappensBefore( - t, g, - "module.B.test_object.foo (destroy)", - "module.A.test_object.foo (destroy)", - ) -} - -func TestApplyGraphBuilder_targetModule(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.foo"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - { - Addr: mustResourceInstanceAddr("module.child2.test_object.foo"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - }, - } - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-target-module"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child2", addrs.NoKey), - }, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - testGraphNotContains(t, g, "module.child1.output.instance_id") -} - -// Ensure that an update resulting from the removal of a resource happens after -// that resource is destroyed. -func TestApplyGraphBuilder_updateFromOrphan(t *testing.T) { - schemas := simpleTestSchemas() - instanceSchema := schemas.Providers[addrs.NewDefaultProvider("test")].ResourceTypes["test_object"] - - bBefore, _ := plans.NewDynamicValue( - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("b_id"), - "test_string": cty.StringVal("a_id"), - }), instanceSchema.ImpliedType()) - bAfter, _ := plans.NewDynamicValue( - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("b_id"), - "test_string": cty.StringVal("changed"), - }), instanceSchema.ImpliedType()) - - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.a"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.b"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - Before: bBefore, - After: bAfter, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "a", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a_id"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "b", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b_id","test_string":"a_id"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "a", - }, - Module: root.Addr.Module(), - }, - }, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-orphan-update"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - State: state, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := strings.TrimSpace(` -test_object.a (destroy) -test_object.b - test_object.a (destroy) -`) - - instanceGraph := filterInstances(g) - got := strings.TrimSpace(instanceGraph.String()) - - if got != expected { - t.Fatalf("expected:\n%s\ngot:\n%s", expected, got) - } -} - -// Ensure that an update resulting from the removal of a resource happens before -// a CBD resource is destroyed. -func TestApplyGraphBuilder_updateFromCBDOrphan(t *testing.T) { - schemas := simpleTestSchemas() - instanceSchema := schemas.Providers[addrs.NewDefaultProvider("test")].ResourceTypes["test_object"] - - bBefore, _ := plans.NewDynamicValue( - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("b_id"), - "test_string": cty.StringVal("a_id"), - }), instanceSchema.ImpliedType()) - bAfter, _ := plans.NewDynamicValue( - cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("b_id"), - "test_string": cty.StringVal("changed"), - }), instanceSchema.ImpliedType()) - - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.a"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.b"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - Before: bBefore, - After: bAfter, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "a", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a_id"}`), - CreateBeforeDestroy: true, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "b", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b_id","test_string":"a_id"}`), - Dependencies: []addrs.ConfigResource{ - { - Resource: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "a", - }, - Module: root.Addr.Module(), - }, - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-orphan-update"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - State: state, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := strings.TrimSpace(` -test_object.a (destroy) - test_object.b -test_object.b -`) - - instanceGraph := filterInstances(g) - got := strings.TrimSpace(instanceGraph.String()) - - if got != expected { - t.Fatalf("expected:\n%s\ngot:\n%s", expected, got) - } -} - -// The orphan clean up node should not be connected to a provider -func TestApplyGraphBuilder_orphanedWithProvider(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.A"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"].foo`), - ) - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-orphan-alias"), - Changes: changes, - Plugins: simpleMockPluginLibrary(), - State: state, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatal(err) - } - - // The cleanup node has no state or config of its own, so would create a - // default provider which we don't want. - testGraphNotContains(t, g, "provider.test") -} - -const testApplyGraphBuilderStr = ` -module.child (close) - module.child.test_object.other -module.child (expand) -module.child.test_object.create - module.child.test_object.create (expand) -module.child.test_object.create (expand) - module.child (expand) - provider["registry.terraform.io/hashicorp/test"] -module.child.test_object.other - module.child.test_object.create - module.child.test_object.other (expand) -module.child.test_object.other (expand) - module.child (expand) - provider["registry.terraform.io/hashicorp/test"] -provider["registry.terraform.io/hashicorp/test"] -provider["registry.terraform.io/hashicorp/test"] (close) - module.child.test_object.other - test_object.other -root - module.child (close) - provider["registry.terraform.io/hashicorp/test"] (close) -test_object.create - test_object.create (expand) -test_object.create (expand) - provider["registry.terraform.io/hashicorp/test"] -test_object.other - test_object.create - test_object.other (expand) -test_object.other (expand) - provider["registry.terraform.io/hashicorp/test"] -` - -const testApplyGraphBuilderDestroyCountStr = ` -provider["registry.terraform.io/hashicorp/test"] -provider["registry.terraform.io/hashicorp/test"] (close) - test_object.B -root - provider["registry.terraform.io/hashicorp/test"] (close) -test_object.A (expand) - provider["registry.terraform.io/hashicorp/test"] -test_object.A[1] (destroy) - provider["registry.terraform.io/hashicorp/test"] -test_object.B - test_object.A (expand) - test_object.A[1] (destroy) - test_object.B (expand) -test_object.B (expand) - provider["registry.terraform.io/hashicorp/test"] -` diff --git a/internal/terraform/graph_builder_eval.go b/internal/terraform/graph_builder_eval.go deleted file mode 100644 index cb3606d62c9f..000000000000 --- a/internal/terraform/graph_builder_eval.go +++ /dev/null @@ -1,108 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable -// for evaluating in-memory values (input variables, local values, output -// values) in the state without any other side-effects. -// -// This graph is used only in weird cases, such as the "terraform console" -// CLI command, where we need to evaluate expressions against the state -// without taking any other actions. -// -// The generated graph will include nodes for providers, resources, etc -// just to allow indirect dependencies to be resolved, but these nodes will -// not take any actions themselves since we assume that their parts of the -// state, if any, are already complete. -// -// Although the providers are never configured, they must still be available -// in order to obtain schema information used for type checking, etc. -type EvalGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the current state - State *states.State - - // RootVariableValues are the raw input values for root input variables - // given by the caller, which we'll resolve into final values as part - // of the plan walk. - RootVariableValues InputValues - - // Plugins is a library of plug-in components (providers and - // provisioners) available for use. - Plugins *contextPlugins -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Name: "EvalGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Steps() []GraphTransformer { - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeEvalableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Config: b.Config, - }, - - // Add dynamic values - &RootVariableTransformer{Config: b.Config, RawValues: b.RootVariableValues}, - &ModuleVariableTransformer{Config: b.Config}, - &LocalTransformer{Config: b.Config}, - &OutputTransformer{ - Config: b.Config, - Planning: true, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - transformProviders(concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Plugins: b.Plugins, Config: b.Config}, - - // Create expansion nodes for all of the module calls. This must - // come after all other transformers that create nodes representing - // objects that can belong to modules. - &ModuleExpansionTransformer{Config: b.Config}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Although we don't configure providers, we do still start them up - // to get their schemas, and so we must shut them down again here. - &CloseProviderTransformer{}, - - // Close root module - &CloseRootModuleTransformer{}, - - // Remove redundant edges to simplify the graph. - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/internal/terraform/graph_builder_plan.go b/internal/terraform/graph_builder_plan.go deleted file mode 100644 index f085e9aa2dad..000000000000 --- a/internal/terraform/graph_builder_plan.go +++ /dev/null @@ -1,305 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// PlanGraphBuilder is a GraphBuilder implementation that builds a graph for -// planning and for other "plan-like" operations which don't require an -// already-calculated plan as input. -// -// Unlike the apply graph builder, this graph builder: -// -// - Makes its decisions primarily based on the given configuration, which -// represents the desired state. -// -// - Ignores certain lifecycle concerns like create_before_destroy, because -// those are only important once we already know what action we're planning -// to take against a particular resource instance. -type PlanGraphBuilder struct { - // Config is the configuration tree to build a plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // RootVariableValues are the raw input values for root input variables - // given by the caller, which we'll resolve into final values as part - // of the plan walk. - RootVariableValues InputValues - - // Plugins is a library of plug-in components (providers and - // provisioners) available for use. - Plugins *contextPlugins - - // Targets are resources to target - Targets []addrs.Targetable - - // ForceReplace are resource instances where if we would normally have - // generated a NoOp or Update action then we'll force generating a replace - // action instead. Create and Delete actions are not affected. - ForceReplace []addrs.AbsResourceInstance - - // skipRefresh indicates that we should skip refreshing managed resources - skipRefresh bool - - // preDestroyRefresh indicates that we are executing the refresh which - // happens immediately before a destroy plan, which happens to use the - // normal planing mode so skipPlanChanges cannot be set. - preDestroyRefresh bool - - // skipPlanChanges indicates that we should skip the step of comparing - // prior state with configuration and generating planned changes to - // resource instances. (This is for the "refresh only" planning mode, - // where we _only_ do the refresh step.) - skipPlanChanges bool - - ConcreteProvider ConcreteProviderNodeFunc - ConcreteResource ConcreteResourceNodeFunc - ConcreteResourceInstance ConcreteResourceInstanceNodeFunc - ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc - ConcreteResourceInstanceDeposed ConcreteResourceInstanceDeposedNodeFunc - ConcreteModule ConcreteModuleNodeFunc - - // Plan Operation this graph will be used for. - Operation walkOperation - - // ImportTargets are the list of resources to import. - ImportTargets []*ImportTarget -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - log.Printf("[TRACE] building graph for %s", b.Operation) - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Name: "PlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Steps() []GraphTransformer { - switch b.Operation { - case walkPlan: - b.initPlan() - case walkPlanDestroy: - b.initDestroy() - case walkValidate: - b.initValidate() - case walkImport: - b.initImport() - default: - panic("invalid plan operation: " + b.Operation.String()) - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config - &ConfigTransformer{ - Concrete: b.ConcreteResource, - Config: b.Config, - - // Resources are not added from the config on destroy. - skip: b.Operation == walkPlanDestroy, - - importTargets: b.ImportTargets, - }, - - // Add dynamic values - &RootVariableTransformer{Config: b.Config, RawValues: b.RootVariableValues}, - &ModuleVariableTransformer{Config: b.Config}, - &LocalTransformer{Config: b.Config}, - &OutputTransformer{ - Config: b.Config, - RefreshOnly: b.skipPlanChanges || b.preDestroyRefresh, - PlanDestroy: b.Operation == walkPlanDestroy, - - // NOTE: We currently treat anything built with the plan graph - // builder as "planning" for our purposes here, because we share - // the same graph node implementation between all of the walk - // types and so the pre-planning walks still think they are - // producing a plan even though we immediately discard it. - Planning: true, - }, - - // Add orphan resources - &OrphanResourceInstanceTransformer{ - Concrete: b.ConcreteResourceOrphan, - State: b.State, - Config: b.Config, - skip: b.Operation == walkPlanDestroy, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can plan to destroy them. (During plan this will - // intentionally skip creating nodes for _current_ objects, since - // ConfigTransformer created nodes that will do that during - // DynamicExpand.) - &StateTransformer{ - ConcreteCurrent: b.ConcreteResourceInstance, - ConcreteDeposed: b.ConcreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Create orphan output nodes - &OrphanOutputTransformer{ - Config: b.Config, - State: b.State, - Planning: true, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // add providers - transformProviders(b.ConcreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Plugins: b.Plugins, Config: b.Config}, - - // Create expansion nodes for all of the module calls. This must - // come after all other transformers that create nodes representing - // objects that can belong to modules. - &ModuleExpansionTransformer{Concrete: b.ConcreteModule, Config: b.Config}, - - &ReferenceTransformer{}, - - &AttachDependenciesTransformer{}, - - // Make sure data sources are aware of any depends_on from the - // configuration - &attachDataResourceDependsOnTransformer{}, - - // DestroyEdgeTransformer is only required during a plan so that the - // TargetsTransformer can determine which nodes to keep in the graph. - &DestroyEdgeTransformer{}, - - &pruneUnusedNodesTransformer{ - skip: b.Operation != walkPlanDestroy, - }, - - // Target - &TargetsTransformer{Targets: b.Targets}, - - // Detect when create_before_destroy must be forced on for a particular - // node due to dependency edges, to avoid graph cycles during apply. - &ForcedCBDTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Close the root module - &CloseRootModuleTransformer{}, - - // Perform the transitive reduction to make our graph a bit - // more understandable if possible (it usually is possible). - &TransitiveReductionTransformer{}, - } - - return steps -} - -func (b *PlanGraphBuilder) initPlan() { - b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &nodeExpandPlannableResource{ - NodeAbstractResource: a, - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, - preDestroyRefresh: b.preDestroyRefresh, - forceReplace: b.ForceReplace, - } - } - - b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, - } - } - - b.ConcreteResourceInstanceDeposed = func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - - skipRefresh: b.skipRefresh, - skipPlanChanges: b.skipPlanChanges, - } - } -} - -func (b *PlanGraphBuilder) initDestroy() { - b.initPlan() - - b.ConcreteResourceInstance = func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlanDestroyableResourceInstance{ - NodeAbstractResourceInstance: a, - skipRefresh: b.skipRefresh, - } - } -} - -func (b *PlanGraphBuilder) initValidate() { - // Set the provider to the normal provider. This will ask for input. - b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodeValidatableResource{ - NodeAbstractResource: a, - } - } - - b.ConcreteModule = func(n *nodeExpandModule) dag.Vertex { - return &nodeValidateModule{ - nodeExpandModule: *n, - } - } -} - -func (b *PlanGraphBuilder) initImport() { - b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &nodeExpandPlannableResource{ - NodeAbstractResource: a, - - // For now we always skip planning changes for import, since we are - // not going to combine importing with other changes. This is - // temporary to try and maintain existing import behaviors, but - // planning will need to be allowed for more complex configurations. - skipPlanChanges: true, - - // We also skip refresh for now, since the plan output is written - // as the new state, and users are not expecting the import process - // to update any other instances in state. - skipRefresh: true, - } - } -} diff --git a/internal/terraform/graph_builder_plan_test.go b/internal/terraform/graph_builder_plan_test.go deleted file mode 100644 index 8775e1f17518..000000000000 --- a/internal/terraform/graph_builder_plan_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" -) - -func TestPlanGraphBuilder_impl(t *testing.T) { - var _ GraphBuilder = new(PlanGraphBuilder) -} - -func TestPlanGraphBuilder(t *testing.T) { - awsProvider := &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "aws_security_group": {Block: simpleTestSchema()}, - "aws_instance": {Block: simpleTestSchema()}, - "aws_load_balancer": {Block: simpleTestSchema()}, - }, - }, - } - openstackProvider := mockProviderWithResourceTypeSchema("openstack_floating_ip", simpleTestSchema()) - plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): providers.FactoryFixed(awsProvider), - addrs.NewDefaultProvider("openstack"): providers.FactoryFixed(openstackProvider), - }, nil) - - b := &PlanGraphBuilder{ - Config: testModule(t, "graph-builder-plan-basic"), - Plugins: plugins, - Operation: walkPlan, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong module path %q", g.Path) - } - - got := strings.TrimSpace(g.String()) - want := strings.TrimSpace(testPlanGraphBuilderStr) - if diff := cmp.Diff(want, got); diff != "" { - t.Fatalf("wrong result\n%s", diff) - } -} - -func TestPlanGraphBuilder_dynamicBlock(t *testing.T) { - provider := mockProviderWithResourceTypeSchema("test_thing", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "list": {Type: cty.List(cty.String), Computed: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, - }, - }) - plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): providers.FactoryFixed(provider), - }, nil) - - b := &PlanGraphBuilder{ - Config: testModule(t, "graph-builder-plan-dynblock"), - Plugins: plugins, - Operation: walkPlan, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong module path %q", g.Path) - } - - // This test is here to make sure we properly detect references inside - // the special "dynamic" block construct. The most important thing here - // is that at the end test_thing.c depends on both test_thing.a and - // test_thing.b. Other details might shift over time as other logic in - // the graph builders changes. - got := strings.TrimSpace(g.String()) - want := strings.TrimSpace(` -provider["registry.terraform.io/hashicorp/test"] -provider["registry.terraform.io/hashicorp/test"] (close) - test_thing.c (expand) -root - provider["registry.terraform.io/hashicorp/test"] (close) -test_thing.a (expand) - provider["registry.terraform.io/hashicorp/test"] -test_thing.b (expand) - provider["registry.terraform.io/hashicorp/test"] -test_thing.c (expand) - test_thing.a (expand) - test_thing.b (expand) -`) - if diff := cmp.Diff(want, got); diff != "" { - t.Fatalf("wrong result\n%s", diff) - } -} - -func TestPlanGraphBuilder_attrAsBlocks(t *testing.T) { - provider := mockProviderWithResourceTypeSchema("test_thing", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "nested": { - Type: cty.List(cty.Object(map[string]cty.Type{ - "foo": cty.String, - })), - Optional: true, - }, - }, - }) - plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): providers.FactoryFixed(provider), - }, nil) - - b := &PlanGraphBuilder{ - Config: testModule(t, "graph-builder-plan-attr-as-blocks"), - Plugins: plugins, - Operation: walkPlan, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong module path %q", g.Path) - } - - // This test is here to make sure we properly detect references inside - // the "nested" block that is actually defined in the schema as a - // list-of-objects attribute. This requires some special effort - // inside lang.ReferencesInBlock to make sure it searches blocks of - // type "nested" along with an attribute named "nested". - got := strings.TrimSpace(g.String()) - want := strings.TrimSpace(` -provider["registry.terraform.io/hashicorp/test"] -provider["registry.terraform.io/hashicorp/test"] (close) - test_thing.b (expand) -root - provider["registry.terraform.io/hashicorp/test"] (close) -test_thing.a (expand) - provider["registry.terraform.io/hashicorp/test"] -test_thing.b (expand) - test_thing.a (expand) -`) - if diff := cmp.Diff(want, got); diff != "" { - t.Fatalf("wrong result\n%s", diff) - } -} - -func TestPlanGraphBuilder_targetModule(t *testing.T) { - b := &PlanGraphBuilder{ - Config: testModule(t, "graph-builder-plan-target-module-provider"), - Plugins: simpleMockPluginLibrary(), - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Child("child2", addrs.NoKey), - }, - Operation: walkPlan, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - t.Logf("Graph: %s", g.String()) - - testGraphNotContains(t, g, `module.child1.provider["registry.terraform.io/hashicorp/test"]`) - testGraphNotContains(t, g, "module.child1.test_object.foo") -} - -func TestPlanGraphBuilder_forEach(t *testing.T) { - awsProvider := mockProviderWithResourceTypeSchema("aws_instance", simpleTestSchema()) - - plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("aws"): providers.FactoryFixed(awsProvider), - }, nil) - - b := &PlanGraphBuilder{ - Config: testModule(t, "plan-for-each"), - Plugins: plugins, - Operation: walkPlan, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong module path %q", g.Path) - } - - got := strings.TrimSpace(g.String()) - // We're especially looking for the edge here, where aws_instance.bat - // has a dependency on aws_instance.boo - want := strings.TrimSpace(testPlanGraphBuilderForEachStr) - if diff := cmp.Diff(want, got); diff != "" { - t.Fatalf("wrong result\n%s", diff) - } -} - -const testPlanGraphBuilderStr = ` -aws_instance.web (expand) - aws_security_group.firewall (expand) - var.foo -aws_load_balancer.weblb (expand) - aws_instance.web (expand) -aws_security_group.firewall (expand) - provider["registry.terraform.io/hashicorp/aws"] -local.instance_id (expand) - aws_instance.web (expand) -openstack_floating_ip.random (expand) - provider["registry.terraform.io/hashicorp/openstack"] -output.instance_id (expand) - local.instance_id (expand) -provider["registry.terraform.io/hashicorp/aws"] - openstack_floating_ip.random (expand) -provider["registry.terraform.io/hashicorp/aws"] (close) - aws_load_balancer.weblb (expand) -provider["registry.terraform.io/hashicorp/openstack"] -provider["registry.terraform.io/hashicorp/openstack"] (close) - openstack_floating_ip.random (expand) -root - output.instance_id (expand) - provider["registry.terraform.io/hashicorp/aws"] (close) - provider["registry.terraform.io/hashicorp/openstack"] (close) -var.foo -` -const testPlanGraphBuilderForEachStr = ` -aws_instance.bar (expand) - provider["registry.terraform.io/hashicorp/aws"] -aws_instance.bar2 (expand) - provider["registry.terraform.io/hashicorp/aws"] -aws_instance.bat (expand) - aws_instance.boo (expand) -aws_instance.baz (expand) - provider["registry.terraform.io/hashicorp/aws"] -aws_instance.boo (expand) - provider["registry.terraform.io/hashicorp/aws"] -aws_instance.foo (expand) - provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/aws"] (close) - aws_instance.bar (expand) - aws_instance.bar2 (expand) - aws_instance.bat (expand) - aws_instance.baz (expand) - aws_instance.foo (expand) -root - provider["registry.terraform.io/hashicorp/aws"] (close) -` diff --git a/internal/terraform/graph_builder_test.go b/internal/terraform/graph_builder_test.go deleted file mode 100644 index 414fc0b8d766..000000000000 --- a/internal/terraform/graph_builder_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - - "github.com/hashicorp/terraform/internal/dag" -) - -func TestBasicGraphBuilder_impl(t *testing.T) { - var _ GraphBuilder = new(BasicGraphBuilder) -} - -func TestBasicGraphBuilder(t *testing.T) { - b := &BasicGraphBuilder{ - Steps: []GraphTransformer{ - &testBasicGraphBuilderTransform{1}, - }, - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - if g.Path.String() != addrs.RootModuleInstance.String() { - t.Fatalf("wrong module path %q", g.Path) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testBasicGraphBuilderStr) - if actual != expected { - t.Fatalf("bad: %s", actual) - } -} - -func TestBasicGraphBuilder_validate(t *testing.T) { - b := &BasicGraphBuilder{ - Steps: []GraphTransformer{ - &testBasicGraphBuilderTransform{1}, - &testBasicGraphBuilderTransform{2}, - }, - } - - _, err := b.Build(addrs.RootModuleInstance) - if err == nil { - t.Fatal("should error") - } -} - -type testBasicGraphBuilderTransform struct { - V dag.Vertex -} - -func (t *testBasicGraphBuilderTransform) Transform(g *Graph) error { - g.Add(t.V) - return nil -} - -const testBasicGraphBuilderStr = ` -1 -` diff --git a/internal/terraform/graph_dot.go b/internal/terraform/graph_dot.go deleted file mode 100644 index 22e701bcc122..000000000000 --- a/internal/terraform/graph_dot.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform/internal/dag" - -// GraphDot returns the dot formatting of a visual representation of -// the given Terraform graph. -func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { - return string(g.Dot(opts)), nil -} diff --git a/internal/terraform/graph_dot_test.go b/internal/terraform/graph_dot_test.go deleted file mode 100644 index 5042534b4ea2..000000000000 --- a/internal/terraform/graph_dot_test.go +++ /dev/null @@ -1,313 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/dag" -) - -func TestGraphDot(t *testing.T) { - cases := []struct { - Name string - Graph testGraphFunc - Opts dag.DotOpts - Expect string - Error string - }{ - { - Name: "empty", - Graph: func() *Graph { return &Graph{} }, - Expect: ` -digraph { - compound = "true" - newrank = "true" - subgraph "root" { - } -}`, - }, - { - Name: "three-level", - Graph: func() *Graph { - var g Graph - root := &testDrawableOrigin{"root"} - g.Add(root) - - levelOne := []interface{}{"foo", "bar"} - for i, s := range levelOne { - levelOne[i] = &testDrawable{ - VertexName: s.(string), - } - v := levelOne[i] - - g.Add(v) - g.Connect(dag.BasicEdge(v, root)) - } - - levelTwo := []string{"baz", "qux"} - for i, s := range levelTwo { - v := &testDrawable{ - VertexName: s, - } - - g.Add(v) - g.Connect(dag.BasicEdge(v, levelOne[i])) - } - - return &g - }, - Expect: ` -digraph { - compound = "true" - newrank = "true" - subgraph "root" { - "[root] bar" - "[root] baz" - "[root] foo" - "[root] qux" - "[root] root" - "[root] bar" -> "[root] root" - "[root] baz" -> "[root] foo" - "[root] foo" -> "[root] root" - "[root] qux" -> "[root] bar" - } -} - `, - }, - - { - Name: "cycle", - Opts: dag.DotOpts{ - DrawCycles: true, - }, - Graph: func() *Graph { - var g Graph - root := &testDrawableOrigin{"root"} - g.Add(root) - - vA := g.Add(&testDrawable{ - VertexName: "A", - }) - - vB := g.Add(&testDrawable{ - VertexName: "B", - }) - - vC := g.Add(&testDrawable{ - VertexName: "C", - }) - - g.Connect(dag.BasicEdge(vA, root)) - g.Connect(dag.BasicEdge(vA, vC)) - g.Connect(dag.BasicEdge(vB, vA)) - g.Connect(dag.BasicEdge(vC, vB)) - - return &g - }, - Expect: ` -digraph { - compound = "true" - newrank = "true" - subgraph "root" { - "[root] A" - "[root] B" - "[root] C" - "[root] root" - "[root] A" -> "[root] B" [color = "red", penwidth = "2.0"] - "[root] A" -> "[root] C" - "[root] A" -> "[root] root" - "[root] B" -> "[root] A" - "[root] B" -> "[root] C" [color = "red", penwidth = "2.0"] - "[root] C" -> "[root] A" [color = "red", penwidth = "2.0"] - "[root] C" -> "[root] B" - } -} - `, - }, - - { - Name: "subgraphs, no depth restriction", - Opts: dag.DotOpts{ - MaxDepth: -1, - }, - Graph: func() *Graph { - var g Graph - root := &testDrawableOrigin{"root"} - g.Add(root) - - var sub Graph - vSubRoot := sub.Add(&testDrawableOrigin{"sub_root"}) - - var subsub Graph - subsub.Add(&testDrawableOrigin{"subsub_root"}) - vSubV := sub.Add(&testDrawableSubgraph{ - VertexName: "subsub", - SubgraphMock: &subsub, - }) - - vSub := g.Add(&testDrawableSubgraph{ - VertexName: "sub", - SubgraphMock: &sub, - }) - - g.Connect(dag.BasicEdge(vSub, root)) - sub.Connect(dag.BasicEdge(vSubV, vSubRoot)) - - return &g - }, - Expect: ` -digraph { - compound = "true" - newrank = "true" - subgraph "root" { - "[root] root" - "[root] sub" - "[root] sub" -> "[root] root" - } - subgraph "cluster_sub" { - label = "sub" - "[sub] sub_root" - "[sub] subsub" - "[sub] subsub" -> "[sub] sub_root" - } - subgraph "cluster_subsub" { - label = "subsub" - "[subsub] subsub_root" - } -} - `, - }, - - { - Name: "subgraphs, with depth restriction", - Opts: dag.DotOpts{ - MaxDepth: 1, - }, - Graph: func() *Graph { - var g Graph - root := &testDrawableOrigin{"root"} - g.Add(root) - - var sub Graph - rootSub := sub.Add(&testDrawableOrigin{"sub_root"}) - - var subsub Graph - subsub.Add(&testDrawableOrigin{"subsub_root"}) - - subV := sub.Add(&testDrawableSubgraph{ - VertexName: "subsub", - SubgraphMock: &subsub, - }) - vSub := g.Add(&testDrawableSubgraph{ - VertexName: "sub", - SubgraphMock: &sub, - }) - - g.Connect(dag.BasicEdge(vSub, root)) - sub.Connect(dag.BasicEdge(subV, rootSub)) - return &g - }, - Expect: ` -digraph { - compound = "true" - newrank = "true" - subgraph "root" { - "[root] root" - "[root] sub" - "[root] sub" -> "[root] root" - } - subgraph "cluster_sub" { - label = "sub" - "[sub] sub_root" - "[sub] subsub" - "[sub] subsub" -> "[sub] sub_root" - } -} - `, - }, - } - - for _, tc := range cases { - tn := tc.Name - t.Run(tn, func(t *testing.T) { - g := tc.Graph() - var err error - //actual, err := GraphDot(g, &tc.Opts) - actual := string(g.Dot(&tc.Opts)) - - if err == nil && tc.Error != "" { - t.Fatalf("%s: expected err: %s, got none", tn, tc.Error) - } - if err != nil && tc.Error == "" { - t.Fatalf("%s: unexpected err: %s", tn, err) - } - if err != nil && tc.Error != "" { - if !strings.Contains(err.Error(), tc.Error) { - t.Fatalf("%s: expected err: %s\nto contain: %s", tn, err, tc.Error) - } - return - } - - expected := strings.TrimSpace(tc.Expect) + "\n" - if actual != expected { - t.Fatalf("%s:\n\nexpected:\n%s\n\ngot:\n%s", tn, expected, actual) - } - }) - } -} - -type testGraphFunc func() *Graph - -type testDrawable struct { - VertexName string - DependentOnMock []string -} - -func (node *testDrawable) Name() string { - return node.VertexName -} -func (node *testDrawable) DotNode(n string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{Name: n, Attrs: map[string]string{}} -} -func (node *testDrawable) DependableName() []string { - return []string{node.VertexName} -} -func (node *testDrawable) DependentOn() []string { - return node.DependentOnMock -} - -type testDrawableOrigin struct { - VertexName string -} - -func (node *testDrawableOrigin) Name() string { - return node.VertexName -} -func (node *testDrawableOrigin) DotNode(n string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{Name: n, Attrs: map[string]string{}} -} -func (node *testDrawableOrigin) DotOrigin() bool { - return true -} -func (node *testDrawableOrigin) DependableName() []string { - return []string{node.VertexName} -} - -type testDrawableSubgraph struct { - VertexName string - SubgraphMock *Graph - DependentOnMock []string -} - -func (node *testDrawableSubgraph) Name() string { - return node.VertexName -} -func (node *testDrawableSubgraph) Subgraph() dag.Grapher { - return node.SubgraphMock -} -func (node *testDrawableSubgraph) DotNode(n string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{Name: n, Attrs: map[string]string{}} -} -func (node *testDrawableSubgraph) DependentOn() []string { - return node.DependentOnMock -} diff --git a/internal/terraform/graph_interface_subgraph.go b/internal/terraform/graph_interface_subgraph.go deleted file mode 100644 index 6aa2206dfd74..000000000000 --- a/internal/terraform/graph_interface_subgraph.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" -) - -// GraphNodeModuleInstance says that a node is part of a graph with a -// different path, and the context should be adjusted accordingly. -type GraphNodeModuleInstance interface { - Path() addrs.ModuleInstance -} - -// GraphNodeModulePath is implemented by all referenceable nodes, to indicate -// their configuration path in unexpanded modules. -type GraphNodeModulePath interface { - ModulePath() addrs.Module -} diff --git a/internal/terraform/graph_test.go b/internal/terraform/graph_test.go deleted file mode 100644 index 5e163a021353..000000000000 --- a/internal/terraform/graph_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/dag" -) - -// testGraphnotContains is an assertion helper that tests that a node is -// NOT contained in the graph. -func testGraphNotContains(t *testing.T, g *Graph, name string) { - for _, v := range g.Vertices() { - if dag.VertexName(v) == name { - t.Fatalf( - "Expected %q to NOT be in:\n\n%s", - name, g.String()) - } - } -} - -// testGraphHappensBefore is an assertion helper that tests that node -// A (dag.VertexName value) happens before node B. -func testGraphHappensBefore(t *testing.T, g *Graph, A, B string) { - t.Helper() - // Find the B vertex - var vertexB dag.Vertex - for _, v := range g.Vertices() { - if dag.VertexName(v) == B { - vertexB = v - break - } - } - if vertexB == nil { - t.Fatalf( - "Expected %q before %q. Couldn't find %q in:\n\n%s", - A, B, B, g.String()) - } - - // Look at ancestors - deps, err := g.Ancestors(vertexB) - if err != nil { - t.Fatalf("Error: %s in graph:\n\n%s", err, g.String()) - } - - // Make sure B is in there - for _, v := range deps.List() { - if dag.VertexName(v) == A { - // Success - return - } - } - - t.Fatalf( - "Expected %q before %q in:\n\n%s", - A, B, g.String()) -} diff --git a/internal/terraform/graph_walk.go b/internal/terraform/graph_walk.go deleted file mode 100644 index 5a0041cb4f1f..000000000000 --- a/internal/terraform/graph_walk.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// GraphWalker is an interface that can be implemented that when used -// with Graph.Walk will invoke the given callbacks under certain events. -type GraphWalker interface { - EvalContext() EvalContext - EnterPath(addrs.ModuleInstance) EvalContext - ExitPath(addrs.ModuleInstance) - Execute(EvalContext, GraphNodeExecutable) tfdiags.Diagnostics -} - -// NullGraphWalker is a GraphWalker implementation that does nothing. -// This can be embedded within other GraphWalker implementations for easily -// implementing all the required functions. -type NullGraphWalker struct{} - -func (NullGraphWalker) EvalContext() EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} -func (NullGraphWalker) Execute(EvalContext, GraphNodeExecutable) tfdiags.Diagnostics { return nil } diff --git a/internal/terraform/graph_walk_context.go b/internal/terraform/graph_walk_context.go deleted file mode 100644 index 806095405986..000000000000 --- a/internal/terraform/graph_walk_context.go +++ /dev/null @@ -1,137 +0,0 @@ -package terraform - -import ( - "context" - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/refactoring" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ContextGraphWalker is the GraphWalker implementation used with the -// Context struct to walk and evaluate the graph. -type ContextGraphWalker struct { - NullGraphWalker - - // Configurable values - Context *Context - State *states.SyncState // Used for safe concurrent access to state - RefreshState *states.SyncState // Used for safe concurrent access to state - PrevRunState *states.SyncState // Used for safe concurrent access to state - Changes *plans.ChangesSync // Used for safe concurrent writes to changes - Checks *checks.State // Used for safe concurrent writes of checkable objects and their check results - InstanceExpander *instances.Expander // Tracks our gradual expansion of module and resource instances - MoveResults refactoring.MoveResults // Read-only record of earlier processing of move statements - Operation walkOperation - StopContext context.Context - RootVariableValues InputValues - Config *configs.Config - - // This is an output. Do not set this, nor read it while a graph walk - // is in progress. - NonFatalDiagnostics tfdiags.Diagnostics - - once sync.Once - contexts map[string]*BuiltinEvalContext - contextLock sync.Mutex - variableValues map[string]map[string]cty.Value - variableValuesLock sync.Mutex - providerCache map[string]providers.Interface - providerSchemas map[string]*ProviderSchema - providerLock sync.Mutex - provisionerCache map[string]provisioners.Interface - provisionerSchemas map[string]*configschema.Block - provisionerLock sync.Mutex -} - -func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { - w.contextLock.Lock() - defer w.contextLock.Unlock() - - // If we already have a context for this path cached, use that - key := path.String() - if ctx, ok := w.contexts[key]; ok { - return ctx - } - - ctx := w.EvalContext().WithPath(path) - w.contexts[key] = ctx.(*BuiltinEvalContext) - return ctx -} - -func (w *ContextGraphWalker) EvalContext() EvalContext { - w.once.Do(w.init) - - // Our evaluator shares some locks with the main context and the walker - // so that we can safely run multiple evaluations at once across - // different modules. - evaluator := &Evaluator{ - Meta: w.Context.meta, - Config: w.Config, - Operation: w.Operation, - State: w.State, - Changes: w.Changes, - Plugins: w.Context.plugins, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - ctx := &BuiltinEvalContext{ - StopContext: w.StopContext, - Hooks: w.Context.hooks, - InputValue: w.Context.uiInput, - InstanceExpanderValue: w.InstanceExpander, - Plugins: w.Context.plugins, - MoveResultsValue: w.MoveResults, - ProviderCache: w.providerCache, - ProviderInputConfig: w.Context.providerInputConfig, - ProviderLock: &w.providerLock, - ProvisionerCache: w.provisionerCache, - ProvisionerLock: &w.provisionerLock, - ChangesValue: w.Changes, - ChecksValue: w.Checks, - StateValue: w.State, - RefreshStateValue: w.RefreshState, - PrevRunStateValue: w.PrevRunState, - Evaluator: evaluator, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - return ctx -} - -func (w *ContextGraphWalker) init() { - w.contexts = make(map[string]*BuiltinEvalContext) - w.providerCache = make(map[string]providers.Interface) - w.providerSchemas = make(map[string]*ProviderSchema) - w.provisionerCache = make(map[string]provisioners.Interface) - w.provisionerSchemas = make(map[string]*configschema.Block) - w.variableValues = make(map[string]map[string]cty.Value) - - // Populate root module variable values. Other modules will be populated - // during the graph walk. - w.variableValues[""] = make(map[string]cty.Value) - for k, iv := range w.RootVariableValues { - w.variableValues[""][k] = iv.Value - } -} - -func (w *ContextGraphWalker) Execute(ctx EvalContext, n GraphNodeExecutable) tfdiags.Diagnostics { - // Acquire a lock on the semaphore - w.Context.parallelSem.Acquire() - defer w.Context.parallelSem.Release() - - return n.Execute(ctx, w.Operation) -} diff --git a/internal/terraform/graph_walk_operation.go b/internal/terraform/graph_walk_operation.go deleted file mode 100644 index 798ff20e1392..000000000000 --- a/internal/terraform/graph_walk_operation.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go - -// walkOperation is an enum which tells the walkContext what to do. -type walkOperation byte - -const ( - walkInvalid walkOperation = iota - walkApply - walkPlan - walkPlanDestroy - walkValidate - walkDestroy - walkImport - walkEval // used just to prepare EvalContext for expression evaluation, with no other actions -) diff --git a/internal/terraform/graph_walk_test.go b/internal/terraform/graph_walk_test.go deleted file mode 100644 index 88b52a748163..000000000000 --- a/internal/terraform/graph_walk_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import ( - "testing" -) - -func TestNullGraphWalker_impl(t *testing.T) { - var _ GraphWalker = NullGraphWalker{} -} diff --git a/internal/terraform/hook.go b/internal/terraform/hook.go deleted file mode 100644 index 7e927e8a5db7..000000000000 --- a/internal/terraform/hook.go +++ /dev/null @@ -1,145 +0,0 @@ -package terraform - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" -) - -// HookAction is an enum of actions that can be taken as a result of a hook -// callback. This allows you to modify the behavior of Terraform at runtime. -type HookAction byte - -const ( - // HookActionContinue continues with processing as usual. - HookActionContinue HookAction = iota - - // HookActionHalt halts immediately: no more hooks are processed - // and the action that Terraform was about to take is cancelled. - HookActionHalt -) - -// Hook is the interface that must be implemented to hook into various -// parts of Terraform, allowing you to inspect or change behavior at runtime. -// -// There are MANY hook points into Terraform. If you only want to implement -// some hook points, but not all (which is the likely case), then embed the -// NilHook into your struct, which implements all of the interface but does -// nothing. Then, override only the functions you want to implement. -type Hook interface { - // PreApply and PostApply are called before and after an action for a - // single instance is applied. The error argument in PostApply is the - // error, if any, that was returned from the provider Apply call itself. - PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) - - // PreDiff and PostDiff are called before and after a provider is given - // the opportunity to customize the proposed new state to produce the - // planned new state. - PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) - PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - - // The provisioning hooks signal both the overall start end end of - // provisioning for a particular instance and of each of the individual - // configured provisioners for each instance. The sequence of these - // for a given instance might look something like this: - // - // PreProvisionInstance(aws_instance.foo[1], ...) - // PreProvisionInstanceStep(aws_instance.foo[1], "file") - // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) - // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") - // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) - // PostProvisionInstance(aws_instance.foo[1], ...) - // - // ProvisionOutput is called with output sent back by the provisioners. - // This will be called multiple times as output comes in, with each call - // representing one line of output. It cannot control whether the - // provisioner continues running. - PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) - PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) - ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) - - // PreRefresh and PostRefresh are called before and after a single - // resource state is refreshed, respectively. - PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) - PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) - - // PreImportState and PostImportState are called before and after - // (respectively) each state import operation for a given resource address. - PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) - PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) - - // PostStateUpdate is called each time the state is updated. It receives - // a deep copy of the state, which it may therefore access freely without - // any need for locks to protect from concurrent writes from the caller. - PostStateUpdate(new *states.State) (HookAction, error) -} - -// NilHook is a Hook implementation that does nothing. It exists only to -// simplify implementing hooks. You can embed this into your Hook implementation -// and only implement the functions you are interested in. -type NilHook struct{} - -var _ Hook = (*NilHook)(nil) - -func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { - return HookActionContinue, nil -} diff --git a/internal/terraform/hook_mock.go b/internal/terraform/hook_mock.go deleted file mode 100644 index 0511a5780504..000000000000 --- a/internal/terraform/hook_mock.go +++ /dev/null @@ -1,274 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" -) - -// MockHook is an implementation of Hook that can be used for tests. -// It records all of its function calls. -type MockHook struct { - sync.Mutex - - PreApplyCalled bool - PreApplyAddr addrs.AbsResourceInstance - PreApplyGen states.Generation - PreApplyAction plans.Action - PreApplyPriorState cty.Value - PreApplyPlannedState cty.Value - PreApplyReturn HookAction - PreApplyError error - - PostApplyCalled bool - PostApplyAddr addrs.AbsResourceInstance - PostApplyGen states.Generation - PostApplyNewState cty.Value - PostApplyError error - PostApplyReturn HookAction - PostApplyReturnError error - PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) - - PreDiffCalled bool - PreDiffAddr addrs.AbsResourceInstance - PreDiffGen states.Generation - PreDiffPriorState cty.Value - PreDiffProposedState cty.Value - PreDiffReturn HookAction - PreDiffError error - - PostDiffCalled bool - PostDiffAddr addrs.AbsResourceInstance - PostDiffGen states.Generation - PostDiffAction plans.Action - PostDiffPriorState cty.Value - PostDiffPlannedState cty.Value - PostDiffReturn HookAction - PostDiffError error - - PreProvisionInstanceCalled bool - PreProvisionInstanceAddr addrs.AbsResourceInstance - PreProvisionInstanceState cty.Value - PreProvisionInstanceReturn HookAction - PreProvisionInstanceError error - - PostProvisionInstanceCalled bool - PostProvisionInstanceAddr addrs.AbsResourceInstance - PostProvisionInstanceState cty.Value - PostProvisionInstanceReturn HookAction - PostProvisionInstanceError error - - PreProvisionInstanceStepCalled bool - PreProvisionInstanceStepAddr addrs.AbsResourceInstance - PreProvisionInstanceStepProvisionerType string - PreProvisionInstanceStepReturn HookAction - PreProvisionInstanceStepError error - - PostProvisionInstanceStepCalled bool - PostProvisionInstanceStepAddr addrs.AbsResourceInstance - PostProvisionInstanceStepProvisionerType string - PostProvisionInstanceStepErrorArg error - PostProvisionInstanceStepReturn HookAction - PostProvisionInstanceStepError error - - ProvisionOutputCalled bool - ProvisionOutputAddr addrs.AbsResourceInstance - ProvisionOutputProvisionerType string - ProvisionOutputMessage string - - PreRefreshCalled bool - PreRefreshAddr addrs.AbsResourceInstance - PreRefreshGen states.Generation - PreRefreshPriorState cty.Value - PreRefreshReturn HookAction - PreRefreshError error - - PostRefreshCalled bool - PostRefreshAddr addrs.AbsResourceInstance - PostRefreshGen states.Generation - PostRefreshPriorState cty.Value - PostRefreshNewState cty.Value - PostRefreshReturn HookAction - PostRefreshError error - - PreImportStateCalled bool - PreImportStateAddr addrs.AbsResourceInstance - PreImportStateID string - PreImportStateReturn HookAction - PreImportStateError error - - PostImportStateCalled bool - PostImportStateAddr addrs.AbsResourceInstance - PostImportStateNewStates []providers.ImportedResource - PostImportStateReturn HookAction - PostImportStateError error - - PostStateUpdateCalled bool - PostStateUpdateState *states.State - PostStateUpdateReturn HookAction - PostStateUpdateError error -} - -var _ Hook = (*MockHook)(nil) - -func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreApplyCalled = true - h.PreApplyAddr = addr - h.PreApplyGen = gen - h.PreApplyAction = action - h.PreApplyPriorState = priorState - h.PreApplyPlannedState = plannedNewState - return h.PreApplyReturn, h.PreApplyError -} - -func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostApplyCalled = true - h.PostApplyAddr = addr - h.PostApplyGen = gen - h.PostApplyNewState = newState - h.PostApplyError = err - - if h.PostApplyFn != nil { - return h.PostApplyFn(addr, gen, newState, err) - } - - return h.PostApplyReturn, h.PostApplyReturnError -} - -func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreDiffCalled = true - h.PreDiffAddr = addr - h.PreDiffGen = gen - h.PreDiffPriorState = priorState - h.PreDiffProposedState = proposedNewState - return h.PreDiffReturn, h.PreDiffError -} - -func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostDiffCalled = true - h.PostDiffAddr = addr - h.PostDiffGen = gen - h.PostDiffAction = action - h.PostDiffPriorState = priorState - h.PostDiffPlannedState = plannedNewState - return h.PostDiffReturn, h.PostDiffError -} - -func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceCalled = true - h.PreProvisionInstanceAddr = addr - h.PreProvisionInstanceState = state - return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError -} - -func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceCalled = true - h.PostProvisionInstanceAddr = addr - h.PostProvisionInstanceState = state - return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError -} - -func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceStepCalled = true - h.PreProvisionInstanceStepAddr = addr - h.PreProvisionInstanceStepProvisionerType = typeName - return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError -} - -func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceStepCalled = true - h.PostProvisionInstanceStepAddr = addr - h.PostProvisionInstanceStepProvisionerType = typeName - h.PostProvisionInstanceStepErrorArg = err - return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError -} - -func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { - h.Lock() - defer h.Unlock() - - h.ProvisionOutputCalled = true - h.ProvisionOutputAddr = addr - h.ProvisionOutputProvisionerType = typeName - h.ProvisionOutputMessage = line -} - -func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreRefreshCalled = true - h.PreRefreshAddr = addr - h.PreRefreshGen = gen - h.PreRefreshPriorState = priorState - return h.PreRefreshReturn, h.PreRefreshError -} - -func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostRefreshCalled = true - h.PostRefreshAddr = addr - h.PostRefreshPriorState = priorState - h.PostRefreshNewState = newState - return h.PostRefreshReturn, h.PostRefreshError -} - -func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreImportStateCalled = true - h.PreImportStateAddr = addr - h.PreImportStateID = importID - return h.PreImportStateReturn, h.PreImportStateError -} - -func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostImportStateCalled = true - h.PostImportStateAddr = addr - h.PostImportStateNewStates = imported - return h.PostImportStateReturn, h.PostImportStateError -} - -func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostStateUpdateCalled = true - h.PostStateUpdateState = new - return h.PostStateUpdateReturn, h.PostStateUpdateError -} diff --git a/internal/terraform/hook_stop.go b/internal/terraform/hook_stop.go deleted file mode 100644 index 2d4144e56090..000000000000 --- a/internal/terraform/hook_stop.go +++ /dev/null @@ -1,97 +0,0 @@ -package terraform - -import ( - "errors" - "sync/atomic" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" -) - -// stopHook is a private Hook implementation that Terraform uses to -// signal when to stop or cancel actions. -type stopHook struct { - stop uint32 -} - -var _ Hook = (*stopHook)(nil) - -func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) hook() (HookAction, error) { - if h.Stopped() { - return HookActionHalt, errors.New("execution halted") - } - - return HookActionContinue, nil -} - -// reset should be called within the lock context -func (h *stopHook) Reset() { - atomic.StoreUint32(&h.stop, 0) -} - -func (h *stopHook) Stop() { - atomic.StoreUint32(&h.stop, 1) -} - -func (h *stopHook) Stopped() bool { - return atomic.LoadUint32(&h.stop) == 1 -} diff --git a/internal/terraform/hook_stop_test.go b/internal/terraform/hook_stop_test.go deleted file mode 100644 index 2c30231f9608..000000000000 --- a/internal/terraform/hook_stop_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import ( - "testing" -) - -func TestStopHook_impl(t *testing.T) { - var _ Hook = new(stopHook) -} diff --git a/internal/terraform/hook_test.go b/internal/terraform/hook_test.go deleted file mode 100644 index 0d5267dafaca..000000000000 --- a/internal/terraform/hook_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package terraform - -import ( - "sync" - "testing" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" -) - -func TestNilHook_impl(t *testing.T) { - var _ Hook = new(NilHook) -} - -// testHook is a Hook implementation that logs the calls it receives. -// It is intended for testing that core code is emitting the correct hooks -// for a given situation. -type testHook struct { - mu sync.Mutex - Calls []*testHookCall -} - -var _ Hook = (*testHook)(nil) - -// testHookCall represents a single call in testHook. -// This hook just logs string names to make it easy to write "want" expressions -// in tests that can DeepEqual against the real calls. -type testHookCall struct { - Action string - InstanceID string -} - -func (h *testHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PreApply", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PostApply", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PreDiff", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PostDiff", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PreProvisionInstance", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PostProvisionInstance", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PreProvisionInstanceStep", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PostProvisionInstanceStep", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"ProvisionOutput", addr.String()}) -} - -func (h *testHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PreRefresh", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PostRefresh", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PreImportState", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PostImportState", addr.String()}) - return HookActionContinue, nil -} - -func (h *testHook) PostStateUpdate(new *states.State) (HookAction, error) { - h.mu.Lock() - defer h.mu.Unlock() - h.Calls = append(h.Calls, &testHookCall{"PostStateUpdate", ""}) - return HookActionContinue, nil -} diff --git a/internal/terraform/instance_expanders.go b/internal/terraform/instance_expanders.go deleted file mode 100644 index b3733afb0afd..000000000000 --- a/internal/terraform/instance_expanders.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// graphNodeExpandsInstances is implemented by nodes that causes instances to -// be registered in the instances.Expander. -type graphNodeExpandsInstances interface { - expandsInstances() -} diff --git a/internal/terraform/marks.go b/internal/terraform/marks.go deleted file mode 100644 index 8e2a3260721f..000000000000 --- a/internal/terraform/marks.go +++ /dev/null @@ -1,39 +0,0 @@ -package terraform - -import ( - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" -) - -// marksEqual compares 2 unordered sets of PathValue marks for equality, with -// the comparison using the cty.PathValueMarks.Equal method. -func marksEqual(a, b []cty.PathValueMarks) bool { - if len(a) == 0 && len(b) == 0 { - return true - } - - if len(a) != len(b) { - return false - } - - less := func(s []cty.PathValueMarks) func(i, j int) bool { - return func(i, j int) bool { - // the sort only needs to be consistent, so use the GoString format - // to get a comparable value - return fmt.Sprintf("%#v", s[i]) < fmt.Sprintf("%#v", s[j]) - } - } - - sort.Slice(a, less(a)) - sort.Slice(b, less(b)) - - for i := 0; i < len(a); i++ { - if !a[i].Equal(b[i]) { - return false - } - } - - return true -} diff --git a/internal/terraform/marks_test.go b/internal/terraform/marks_test.go deleted file mode 100644 index d3f449187746..000000000000 --- a/internal/terraform/marks_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package terraform - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/zclconf/go-cty/cty" -) - -func TestMarksEqual(t *testing.T) { - for i, tc := range []struct { - a, b []cty.PathValueMarks - equal bool - }{ - { - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - true, - }, - { - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "A"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - false, - }, - { - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "c"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "c"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - true, - }, - { - []cty.PathValueMarks{ - cty.PathValueMarks{ - Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - cty.PathValueMarks{ - Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "c"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - []cty.PathValueMarks{ - cty.PathValueMarks{ - Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "c"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - cty.PathValueMarks{ - Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, - Marks: cty.NewValueMarks(marks.Sensitive), - }, - }, - true, - }, - { - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - false, - }, - { - nil, - nil, - true, - }, - { - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - nil, - false, - }, - { - nil, - []cty.PathValueMarks{ - cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, - }, - false, - }, - } { - t.Run(fmt.Sprint(i), func(t *testing.T) { - if marksEqual(tc.a, tc.b) != tc.equal { - t.Fatalf("marksEqual(\n%#v,\n%#v,\n) != %t\n", tc.a, tc.b, tc.equal) - } - }) - } -} diff --git a/internal/terraform/node_data_destroy.go b/internal/terraform/node_data_destroy.go deleted file mode 100644 index 0e81bb9c45d5..000000000000 --- a/internal/terraform/node_data_destroy.go +++ /dev/null @@ -1,24 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": -// it is ready to be destroyed. -type NodeDestroyableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeExecutable = (*NodeDestroyableDataResourceInstance)(nil) -) - -// GraphNodeExecutable -func (n *NodeDestroyableDataResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { - log.Printf("[TRACE] NodeDestroyableDataResourceInstance: removing state object for %s", n.Addr) - ctx.State().SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) - return nil -} diff --git a/internal/terraform/node_data_destroy_test.go b/internal/terraform/node_data_destroy_test.go deleted file mode 100644 index f399ee4183c4..000000000000 --- a/internal/terraform/node_data_destroy_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" -) - -func TestNodeDataDestroyExecute(t *testing.T) { - state := states.NewState() - state.Module(addrs.RootModuleInstance).SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"dynamic":{"type":"string","value":"hello"}}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - } - - node := NodeDestroyableDataResourceInstance{&NodeAbstractResourceInstance{ - Addr: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - }} - - diags := node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %v", diags.Err()) - } - - // verify resource removed from state - if state.HasManagedResourceInstanceObjects() { - t.Fatal("resources still in state after NodeDataDestroy.Execute") - } -} diff --git a/internal/terraform/node_local.go b/internal/terraform/node_local.go deleted file mode 100644 index f194b9cc82a2..000000000000 --- a/internal/terraform/node_local.go +++ /dev/null @@ -1,180 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// nodeExpandLocal represents a named local value in a configuration module, -// which has not yet been expanded. -type nodeExpandLocal struct { - Addr addrs.LocalValue - Module addrs.Module - Config *configs.Local -} - -var ( - _ GraphNodeReferenceable = (*nodeExpandLocal)(nil) - _ GraphNodeReferencer = (*nodeExpandLocal)(nil) - _ GraphNodeDynamicExpandable = (*nodeExpandLocal)(nil) - _ graphNodeTemporaryValue = (*nodeExpandLocal)(nil) - _ graphNodeExpandsInstances = (*nodeExpandLocal)(nil) -) - -func (n *nodeExpandLocal) expandsInstances() {} - -// graphNodeTemporaryValue -func (n *nodeExpandLocal) temporaryValue() bool { - return true -} - -func (n *nodeExpandLocal) Name() string { - path := n.Module.String() - addr := n.Addr.String() + " (expand)" - - if path != "" { - return path + "." + addr - } - return addr -} - -// GraphNodeModulePath -func (n *nodeExpandLocal) ModulePath() addrs.Module { - return n.Module -} - -// GraphNodeReferenceable -func (n *nodeExpandLocal) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr} -} - -// GraphNodeReferencer -func (n *nodeExpandLocal) References() []*addrs.Reference { - refs, _ := lang.ReferencesInExpr(n.Config.Expr) - return refs -} - -func (n *nodeExpandLocal) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - expander := ctx.InstanceExpander() - for _, module := range expander.ExpandModule(n.Module) { - o := &NodeLocal{ - Addr: n.Addr.Absolute(module), - Config: n.Config, - } - log.Printf("[TRACE] Expanding local: adding %s as %T", o.Addr.String(), o) - g.Add(o) - } - addRootNodeToGraph(&g) - return &g, nil -} - -// NodeLocal represents a named local value in a particular module. -// -// Local value nodes only have one operation, common to all walk types: -// evaluate the result and place it in state. -type NodeLocal struct { - Addr addrs.AbsLocalValue - Config *configs.Local -} - -var ( - _ GraphNodeModuleInstance = (*NodeLocal)(nil) - _ GraphNodeReferenceable = (*NodeLocal)(nil) - _ GraphNodeReferencer = (*NodeLocal)(nil) - _ GraphNodeExecutable = (*NodeLocal)(nil) - _ graphNodeTemporaryValue = (*NodeLocal)(nil) - _ dag.GraphNodeDotter = (*NodeLocal)(nil) -) - -// graphNodeTemporaryValue -func (n *NodeLocal) temporaryValue() bool { - return true -} - -func (n *NodeLocal) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodeLocal) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeModulePath -func (n *NodeLocal) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -// GraphNodeReferenceable -func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.LocalValue} -} - -// GraphNodeReferencer -func (n *NodeLocal) References() []*addrs.Reference { - refs, _ := lang.ReferencesInExpr(n.Config.Expr) - return refs -} - -// GraphNodeExecutable -// NodeLocal.Execute is an Execute implementation that evaluates the -// expression for a local value and writes it into a transient part of -// the state. -func (n *NodeLocal) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - expr := n.Config.Expr - addr := n.Addr.LocalValue - - // We ignore diags here because any problems we might find will be found - // again in EvaluateExpr below. - refs, _ := lang.ReferencesInExpr(expr) - for _, ref := range refs { - if ref.Subject == addr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referencing local value", - Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", addr), - Subject: ref.SourceRange.ToHCL().Ptr(), - Context: expr.Range().Ptr(), - }) - } - } - if diags.HasErrors() { - return diags - } - - val, moreDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return diags - } - - state := ctx.State() - if state == nil { - diags = diags.Append(fmt.Errorf("cannot write local value to nil state")) - return diags - } - - state.SetLocalValue(addr.Absolute(ctx.Path()), val) - - return diags -} - -// dag.GraphNodeDotter impl. -func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/internal/terraform/node_local_test.go b/internal/terraform/node_local_test.go deleted file mode 100644 index c79f05eabf7e..000000000000 --- a/internal/terraform/node_local_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package terraform - -import ( - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/states" -) - -func TestNodeLocalExecute(t *testing.T) { - tests := []struct { - Value string - Want interface{} - Err bool - }{ - { - "hello!", - "hello!", - false, - }, - { - "", - "", - false, - }, - { - "Hello, ${local.foo}", - nil, - true, // self-referencing - }, - } - - for _, test := range tests { - t.Run(test.Value, func(t *testing.T) { - expr, diags := hclsyntax.ParseTemplate([]byte(test.Value), "", hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - n := &NodeLocal{ - Addr: addrs.LocalValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), - Config: &configs.Local{ - Expr: expr, - }, - } - ctx := &MockEvalContext{ - StateState: states.NewState().SyncWrapper(), - - EvaluateExprResult: hcl2shim.HCL2ValueFromConfigValue(test.Want), - } - - err := n.Execute(ctx, walkApply) - if (err != nil) != test.Err { - if err != nil { - t.Errorf("unexpected error: %s", err) - } else { - t.Errorf("successful Eval; want error") - } - } - - ms := ctx.StateState.Module(addrs.RootModuleInstance) - gotLocals := ms.LocalValues - wantLocals := map[string]cty.Value{} - if test.Want != nil { - wantLocals["foo"] = hcl2shim.HCL2ValueFromConfigValue(test.Want) - } - - if !reflect.DeepEqual(gotLocals, wantLocals) { - t.Errorf( - "wrong locals after Eval\ngot: %swant: %s", - spew.Sdump(gotLocals), spew.Sdump(wantLocals), - ) - } - }) - } - -} diff --git a/internal/terraform/node_module_expand.go b/internal/terraform/node_module_expand.go deleted file mode 100644 index 28ec2b4e36e6..000000000000 --- a/internal/terraform/node_module_expand.go +++ /dev/null @@ -1,252 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -type ConcreteModuleNodeFunc func(n *nodeExpandModule) dag.Vertex - -// nodeExpandModule represents a module call in the configuration that -// might expand into multiple module instances depending on how it is -// configured. -type nodeExpandModule struct { - Addr addrs.Module - Config *configs.Module - ModuleCall *configs.ModuleCall -} - -var ( - _ GraphNodeExecutable = (*nodeExpandModule)(nil) - _ GraphNodeReferencer = (*nodeExpandModule)(nil) - _ GraphNodeReferenceOutside = (*nodeExpandModule)(nil) - _ graphNodeExpandsInstances = (*nodeExpandModule)(nil) -) - -func (n *nodeExpandModule) expandsInstances() {} - -func (n *nodeExpandModule) Name() string { - return n.Addr.String() + " (expand)" -} - -// GraphNodeModulePath implementation -func (n *nodeExpandModule) ModulePath() addrs.Module { - return n.Addr -} - -// GraphNodeReferencer implementation -func (n *nodeExpandModule) References() []*addrs.Reference { - var refs []*addrs.Reference - - if n.ModuleCall == nil { - return nil - } - - refs = append(refs, n.DependsOn()...) - - // Expansion only uses the count and for_each expressions, so this - // particular graph node only refers to those. - // Individual variable values in the module call definition might also - // refer to other objects, but that's handled by - // NodeApplyableModuleVariable. - // - // Because our Path method returns the module instance that contains - // our call, these references will be correctly interpreted as being - // in the calling module's namespace, not the namespaces of any of the - // child module instances we might expand to during our evaluation. - - if n.ModuleCall.Count != nil { - countRefs, _ := lang.ReferencesInExpr(n.ModuleCall.Count) - refs = append(refs, countRefs...) - } - if n.ModuleCall.ForEach != nil { - forEachRefs, _ := lang.ReferencesInExpr(n.ModuleCall.ForEach) - refs = append(refs, forEachRefs...) - } - return refs -} - -func (n *nodeExpandModule) DependsOn() []*addrs.Reference { - if n.ModuleCall == nil { - return nil - } - - var refs []*addrs.Reference - for _, traversal := range n.ModuleCall.DependsOn { - ref, diags := addrs.ParseRef(traversal) - if diags.HasErrors() { - // We ignore this here, because this isn't a suitable place to return - // errors. This situation should be caught and rejected during - // validation. - log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, diags.Err()) - continue - } - - refs = append(refs, ref) - } - - return refs -} - -// GraphNodeReferenceOutside -func (n *nodeExpandModule) ReferenceOutside() (selfPath, referencePath addrs.Module) { - return n.Addr, n.Addr.Parent() -} - -// GraphNodeExecutable -func (n *nodeExpandModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - expander := ctx.InstanceExpander() - _, call := n.Addr.Call() - - // nodeExpandModule itself does not have visibility into how its ancestors - // were expanded, so we use the expander here to provide all possible paths - // to our module, and register module instances with each of them. - for _, module := range expander.ExpandModule(n.Addr.Parent()) { - ctx = ctx.WithPath(module) - switch { - case n.ModuleCall.Count != nil: - count, ctDiags := evaluateCountExpression(n.ModuleCall.Count, ctx) - diags = diags.Append(ctDiags) - if diags.HasErrors() { - return diags - } - expander.SetModuleCount(module, call, count) - - case n.ModuleCall.ForEach != nil: - forEach, feDiags := evaluateForEachExpression(n.ModuleCall.ForEach, ctx) - diags = diags.Append(feDiags) - if diags.HasErrors() { - return diags - } - expander.SetModuleForEach(module, call, forEach) - - default: - expander.SetModuleSingle(module, call) - } - } - - return diags - -} - -// nodeCloseModule represents an expanded module during apply, and is visited -// after all other module instance nodes. This node will depend on all module -// instance resource and outputs, and anything depending on the module should -// wait on this node. -// Besides providing a root node for dependency ordering, nodeCloseModule also -// cleans up state after all the module nodes have been evaluated, removing -// empty resources and modules from the state. -// The root module instance also closes any remaining provisioner plugins which -// do not have a lifecycle controlled by individual graph nodes. -type nodeCloseModule struct { - Addr addrs.Module -} - -var ( - _ GraphNodeReferenceable = (*nodeCloseModule)(nil) - _ GraphNodeReferenceOutside = (*nodeCloseModule)(nil) - _ GraphNodeExecutable = (*nodeCloseModule)(nil) -) - -func (n *nodeCloseModule) ModulePath() addrs.Module { - return n.Addr -} - -func (n *nodeCloseModule) ReferenceOutside() (selfPath, referencePath addrs.Module) { - return n.Addr.Parent(), n.Addr -} - -func (n *nodeCloseModule) ReferenceableAddrs() []addrs.Referenceable { - _, call := n.Addr.Call() - return []addrs.Referenceable{ - call, - } -} - -func (n *nodeCloseModule) Name() string { - if len(n.Addr) == 0 { - return "root" - } - return n.Addr.String() + " (close)" -} - -func (n *nodeCloseModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - if !n.Addr.IsRoot() { - return - } - - // If this is the root module, we are cleaning up the walk, so close - // any running provisioners - diags = diags.Append(ctx.CloseProvisioners()) - - switch op { - case walkApply, walkDestroy: - state := ctx.State().Lock() - defer ctx.State().Unlock() - - for modKey, mod := range state.Modules { - // clean out any empty resources - for resKey, res := range mod.Resources { - if len(res.Instances) == 0 { - delete(mod.Resources, resKey) - } - } - - // empty child modules are always removed - if len(mod.Resources) == 0 && !mod.Addr.IsRoot() { - delete(state.Modules, modKey) - } - } - return nil - default: - return nil - } -} - -// nodeValidateModule wraps a nodeExpand module for validation, ensuring that -// no expansion is attempted during evaluation, when count and for_each -// expressions may not be known. -type nodeValidateModule struct { - nodeExpandModule -} - -var _ GraphNodeExecutable = (*nodeValidateModule)(nil) - -// GraphNodeEvalable -func (n *nodeValidateModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - _, call := n.Addr.Call() - expander := ctx.InstanceExpander() - - // Modules all evaluate to single instances during validation, only to - // create a proper context within which to evaluate. All parent modules - // will be a single instance, but still get our address in the expected - // manner anyway to ensure they've been registered correctly. - for _, module := range expander.ExpandModule(n.Addr.Parent()) { - ctx = ctx.WithPath(module) - - // Validate our for_each and count expressions at a basic level - // We skip validation on known, because there will be unknown values before - // a full expansion, presuming these errors will be caught in later steps - switch { - case n.ModuleCall.Count != nil: - _, countDiags := evaluateCountExpressionValue(n.ModuleCall.Count, ctx) - diags = diags.Append(countDiags) - - case n.ModuleCall.ForEach != nil: - _, forEachDiags := evaluateForEachExpressionValue(n.ModuleCall.ForEach, ctx, true) - diags = diags.Append(forEachDiags) - } - - diags = diags.Append(validateDependsOn(ctx, n.ModuleCall.DependsOn)) - - // now set our own mode to single - expander.SetModuleSingle(module, call) - } - - return diags -} diff --git a/internal/terraform/node_module_expand_test.go b/internal/terraform/node_module_expand_test.go deleted file mode 100644 index 146f754f37c6..000000000000 --- a/internal/terraform/node_module_expand_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestNodeExpandModuleExecute(t *testing.T) { - ctx := &MockEvalContext{ - InstanceExpanderExpander: instances.NewExpander(), - } - ctx.installSimpleEval() - - node := nodeExpandModule{ - Addr: addrs.Module{"child"}, - ModuleCall: &configs.ModuleCall{ - Count: hcltest.MockExprLiteral(cty.NumberIntVal(2)), - }, - } - - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !ctx.InstanceExpanderCalled { - t.Fatal("did not expand") - } -} - -func TestNodeCloseModuleExecute(t *testing.T) { - t.Run("walkApply", func(t *testing.T) { - state := states.NewState() - state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - } - node := nodeCloseModule{addrs.Module{"child"}} - diags := node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - - // Since module.child has no resources, it should be removed - if _, ok := state.Modules["module.child"]; !ok { - t.Fatal("module.child should not be removed from state yet") - } - - // the root module should do all the module cleanup - node = nodeCloseModule{addrs.RootModule} - diags = node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - - // Since module.child has no resources, it should be removed - if _, ok := state.Modules["module.child"]; ok { - t.Fatal("module.child was not removed from state") - } - }) - - // walkImport is a no-op - t.Run("walkImport", func(t *testing.T) { - state := states.NewState() - state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - } - node := nodeCloseModule{addrs.Module{"child"}} - - diags := node.Execute(ctx, walkImport) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - if _, ok := state.Modules["module.child"]; !ok { - t.Fatal("module.child was removed from state, expected no-op") - } - }) -} - -func TestNodeValidateModuleExecute(t *testing.T) { - t.Run("success", func(t *testing.T) { - ctx := &MockEvalContext{ - InstanceExpanderExpander: instances.NewExpander(), - } - ctx.installSimpleEval() - node := nodeValidateModule{ - nodeExpandModule{ - Addr: addrs.Module{"child"}, - ModuleCall: &configs.ModuleCall{ - Count: hcltest.MockExprLiteral(cty.NumberIntVal(2)), - }, - }, - } - - diags := node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %v", diags.Err()) - } - }) - - t.Run("invalid count", func(t *testing.T) { - ctx := &MockEvalContext{ - InstanceExpanderExpander: instances.NewExpander(), - } - ctx.installSimpleEval() - node := nodeValidateModule{ - nodeExpandModule{ - Addr: addrs.Module{"child"}, - ModuleCall: &configs.ModuleCall{ - Count: hcltest.MockExprLiteral(cty.StringVal("invalid")), - }, - }, - } - - err := node.Execute(ctx, walkApply) - if err == nil { - t.Fatal("expected error, got success") - } - }) - -} diff --git a/internal/terraform/node_module_variable.go b/internal/terraform/node_module_variable.go deleted file mode 100644 index 6d5ae2af89cb..000000000000 --- a/internal/terraform/node_module_variable.go +++ /dev/null @@ -1,244 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// nodeExpandModuleVariable is the placeholder for an variable that has not yet had -// its module path expanded. -type nodeExpandModuleVariable struct { - Addr addrs.InputVariable - Module addrs.Module - Config *configs.Variable - Expr hcl.Expression -} - -var ( - _ GraphNodeDynamicExpandable = (*nodeExpandModuleVariable)(nil) - _ GraphNodeReferenceOutside = (*nodeExpandModuleVariable)(nil) - _ GraphNodeReferenceable = (*nodeExpandModuleVariable)(nil) - _ GraphNodeReferencer = (*nodeExpandModuleVariable)(nil) - _ graphNodeTemporaryValue = (*nodeExpandModuleVariable)(nil) - _ graphNodeExpandsInstances = (*nodeExpandModuleVariable)(nil) -) - -func (n *nodeExpandModuleVariable) expandsInstances() {} - -func (n *nodeExpandModuleVariable) temporaryValue() bool { - return true -} - -func (n *nodeExpandModuleVariable) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - expander := ctx.InstanceExpander() - for _, module := range expander.ExpandModule(n.Module) { - o := &nodeModuleVariable{ - Addr: n.Addr.Absolute(module), - Config: n.Config, - Expr: n.Expr, - ModuleInstance: module, - } - g.Add(o) - } - addRootNodeToGraph(&g) - return &g, nil -} - -func (n *nodeExpandModuleVariable) Name() string { - return fmt.Sprintf("%s.%s (expand)", n.Module, n.Addr.String()) -} - -// GraphNodeModulePath -func (n *nodeExpandModuleVariable) ModulePath() addrs.Module { - return n.Module -} - -// GraphNodeReferencer -func (n *nodeExpandModuleVariable) References() []*addrs.Reference { - - // If we have no value expression, we cannot depend on anything. - if n.Expr == nil { - return nil - } - - // Variables in the root don't depend on anything, because their values - // are gathered prior to the graph walk and recorded in the context. - if len(n.Module) == 0 { - return nil - } - - // Otherwise, we depend on anything referenced by our value expression. - // We ignore diagnostics here under the assumption that we'll re-eval - // all these things later and catch them then; for our purposes here, - // we only care about valid references. - // - // Due to our GraphNodeReferenceOutside implementation, the addresses - // returned by this function are interpreted in the _parent_ module from - // where our associated variable was declared, which is correct because - // our value expression is assigned within a "module" block in the parent - // module. - refs, _ := lang.ReferencesInExpr(n.Expr) - return refs -} - -// GraphNodeReferenceOutside implementation -func (n *nodeExpandModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.Module) { - return n.Module, n.Module.Parent() -} - -// GraphNodeReferenceable -func (n *nodeExpandModuleVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr} -} - -// nodeModuleVariable represents a module variable input during -// the apply step. -type nodeModuleVariable struct { - Addr addrs.AbsInputVariableInstance - Config *configs.Variable // Config is the var in the config - Expr hcl.Expression // Expr is the value expression given in the call - // ModuleInstance in order to create the appropriate context for evaluating - // ModuleCallArguments, ex. so count.index and each.key can resolve - ModuleInstance addrs.ModuleInstance -} - -// Ensure that we are implementing all of the interfaces we think we are -// implementing. -var ( - _ GraphNodeModuleInstance = (*nodeModuleVariable)(nil) - _ GraphNodeExecutable = (*nodeModuleVariable)(nil) - _ graphNodeTemporaryValue = (*nodeModuleVariable)(nil) - _ dag.GraphNodeDotter = (*nodeModuleVariable)(nil) -) - -func (n *nodeModuleVariable) temporaryValue() bool { - return true -} - -func (n *nodeModuleVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *nodeModuleVariable) Path() addrs.ModuleInstance { - // We execute in the parent scope (above our own module) because - // expressions in our value are resolved in that context. - return n.Addr.Module.Parent() -} - -// GraphNodeModulePath -func (n *nodeModuleVariable) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -// GraphNodeExecutable -func (n *nodeModuleVariable) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - log.Printf("[TRACE] nodeModuleVariable: evaluating %s", n.Addr) - - var val cty.Value - var err error - - switch op { - case walkValidate: - val, err = n.evalModuleVariable(ctx, true) - diags = diags.Append(err) - default: - val, err = n.evalModuleVariable(ctx, false) - diags = diags.Append(err) - } - if diags.HasErrors() { - return diags - } - - // Set values for arguments of a child module call, for later retrieval - // during expression evaluation. - _, call := n.Addr.Module.CallInstance() - ctx.SetModuleCallArgument(call, n.Addr.Variable, val) - - return evalVariableValidations(n.Addr, n.Config, n.Expr, ctx) -} - -// dag.GraphNodeDotter impl. -func (n *nodeModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} - -// evalModuleVariable produces the value for a particular variable as will -// be used by a child module instance. -// -// The result is written into a map, with its key set to the local name of the -// variable, disregarding the module instance address. A map is returned instead -// of a single value as a result of trying to be convenient for use with -// EvalContext.SetModuleCallArguments, which expects a map to merge in with any -// existing arguments. -// -// validateOnly indicates that this evaluation is only for config -// validation, and we will not have any expansion module instance -// repetition data. -func (n *nodeModuleVariable) evalModuleVariable(ctx EvalContext, validateOnly bool) (cty.Value, error) { - var diags tfdiags.Diagnostics - var givenVal cty.Value - var errSourceRange tfdiags.SourceRange - if expr := n.Expr; expr != nil { - var moduleInstanceRepetitionData instances.RepetitionData - - switch { - case validateOnly: - // the instance expander does not track unknown expansion values, so we - // have to assume all RepetitionData is unknown. - moduleInstanceRepetitionData = instances.RepetitionData{ - CountIndex: cty.UnknownVal(cty.Number), - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.DynamicVal, - } - - default: - // Get the repetition data for this module instance, - // so we can create the appropriate scope for evaluating our expression - moduleInstanceRepetitionData = ctx.InstanceExpander().GetModuleInstanceRepetitionData(n.ModuleInstance) - } - - scope := ctx.EvaluationScope(nil, moduleInstanceRepetitionData) - val, moreDiags := scope.EvalExpr(expr, cty.DynamicPseudoType) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return cty.DynamicVal, diags.ErrWithWarnings() - } - givenVal = val - errSourceRange = tfdiags.SourceRangeFromHCL(expr.Range()) - } else { - // We'll use cty.NilVal to represent the variable not being set at all. - givenVal = cty.NilVal - errSourceRange = tfdiags.SourceRangeFromHCL(n.Config.DeclRange) // we use the declaration range as a fallback for an undefined variable - } - - // We construct a synthetic InputValue here to pretend as if this were - // a root module variable set from outside, just as a convenience so we - // can reuse the InputValue type for this. - rawVal := &InputValue{ - Value: givenVal, - SourceType: ValueFromConfig, - SourceRange: errSourceRange, - } - - finalVal, moreDiags := prepareFinalInputVariableValue(n.Addr, rawVal, n.Config) - diags = diags.Append(moreDiags) - - return finalVal, diags.ErrWithWarnings() -} diff --git a/internal/terraform/node_module_variable_test.go b/internal/terraform/node_module_variable_test.go deleted file mode 100644 index e2b458cdbbbe..000000000000 --- a/internal/terraform/node_module_variable_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package terraform - -import ( - "reflect" - "testing" - - "github.com/go-test/deep" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" -) - -func TestNodeModuleVariablePath(t *testing.T) { - n := &nodeModuleVariable{ - Addr: addrs.RootModuleInstance.InputVariable("foo"), - Config: &configs.Variable{ - Name: "foo", - Type: cty.String, - ConstraintType: cty.String, - }, - } - - want := addrs.RootModuleInstance - got := n.Path() - if got.String() != want.String() { - t.Fatalf("wrong module address %s; want %s", got, want) - } -} - -func TestNodeModuleVariableReferenceableName(t *testing.T) { - n := &nodeExpandModuleVariable{ - Addr: addrs.InputVariable{Name: "foo"}, - Config: &configs.Variable{ - Name: "foo", - Type: cty.String, - ConstraintType: cty.String, - }, - } - - { - expected := []addrs.Referenceable{ - addrs.InputVariable{Name: "foo"}, - } - actual := n.ReferenceableAddrs() - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("%#v != %#v", actual, expected) - } - } - - { - gotSelfPath, gotReferencePath := n.ReferenceOutside() - wantSelfPath := addrs.RootModuleInstance - wantReferencePath := addrs.RootModuleInstance - if got, want := gotSelfPath.String(), wantSelfPath.String(); got != want { - t.Errorf("wrong self path\ngot: %s\nwant: %s", got, want) - } - if got, want := gotReferencePath.String(), wantReferencePath.String(); got != want { - t.Errorf("wrong reference path\ngot: %s\nwant: %s", got, want) - } - } - -} - -func TestNodeModuleVariableReference(t *testing.T) { - n := &nodeExpandModuleVariable{ - Addr: addrs.InputVariable{Name: "foo"}, - Module: addrs.RootModule.Child("bar"), - Config: &configs.Variable{ - Name: "foo", - Type: cty.String, - ConstraintType: cty.String, - }, - Expr: &hclsyntax.ScopeTraversalExpr{ - Traversal: hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "foo"}, - }, - }, - } - - want := []*addrs.Reference{ - { - Subject: addrs.InputVariable{Name: "foo"}, - }, - } - got := n.References() - for _, problem := range deep.Equal(got, want) { - t.Error(problem) - } -} - -func TestNodeModuleVariableReference_grandchild(t *testing.T) { - n := &nodeExpandModuleVariable{ - Addr: addrs.InputVariable{Name: "foo"}, - Module: addrs.RootModule.Child("bar"), - Config: &configs.Variable{ - Name: "foo", - Type: cty.String, - ConstraintType: cty.String, - }, - Expr: &hclsyntax.ScopeTraversalExpr{ - Traversal: hcl.Traversal{ - hcl.TraverseRoot{Name: "var"}, - hcl.TraverseAttr{Name: "foo"}, - }, - }, - } - - want := []*addrs.Reference{ - { - Subject: addrs.InputVariable{Name: "foo"}, - }, - } - got := n.References() - for _, problem := range deep.Equal(got, want) { - t.Error(problem) - } -} diff --git a/internal/terraform/node_output.go b/internal/terraform/node_output.go deleted file mode 100644 index 5b3a707d2489..000000000000 --- a/internal/terraform/node_output.go +++ /dev/null @@ -1,610 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// nodeExpandOutput is the placeholder for a non-root module output that has -// not yet had its module path expanded. -type nodeExpandOutput struct { - Addr addrs.OutputValue - Module addrs.Module - Config *configs.Output - PlanDestroy bool - ApplyDestroy bool - RefreshOnly bool - - // Planning is set to true when this node is in a graph that was produced - // by the plan graph builder, as opposed to the apply graph builder. - // This quirk is just because we share the same node type between both - // phases but in practice there are a few small differences in the actions - // we need to take between plan and apply. See method DynamicExpand for - // details. - Planning bool -} - -var ( - _ GraphNodeReferenceable = (*nodeExpandOutput)(nil) - _ GraphNodeReferencer = (*nodeExpandOutput)(nil) - _ GraphNodeReferenceOutside = (*nodeExpandOutput)(nil) - _ GraphNodeDynamicExpandable = (*nodeExpandOutput)(nil) - _ graphNodeTemporaryValue = (*nodeExpandOutput)(nil) - _ graphNodeExpandsInstances = (*nodeExpandOutput)(nil) -) - -func (n *nodeExpandOutput) expandsInstances() {} - -func (n *nodeExpandOutput) temporaryValue() bool { - // non root outputs are temporary - return !n.Module.IsRoot() -} - -func (n *nodeExpandOutput) DynamicExpand(ctx EvalContext) (*Graph, error) { - expander := ctx.InstanceExpander() - changes := ctx.Changes() - - // If this is an output value that participates in custom condition checks - // (i.e. it has preconditions or postconditions) then the check state - // wants to know the addresses of the checkable objects so that it can - // treat them as unknown status if we encounter an error before actually - // visiting the checks. - // - // We must do this only during planning, because the apply phase will start - // with all of the same checkable objects that were registered during the - // planning phase. Consumers of our JSON plan and state formats expect - // that the set of checkable objects will be consistent between the plan - // and any state snapshots created during apply, and that only the statuses - // of those objects will have changed. - var checkableAddrs addrs.Set[addrs.Checkable] - if n.Planning { - if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.Addr.InModule(n.Module)) { - checkableAddrs = addrs.MakeSet[addrs.Checkable]() - } - } - - var g Graph - for _, module := range expander.ExpandModule(n.Module) { - absAddr := n.Addr.Absolute(module) - if checkableAddrs != nil { - checkableAddrs.Add(absAddr) - } - - // Find any recorded change for this output - var change *plans.OutputChangeSrc - var outputChanges []*plans.OutputChangeSrc - if module.IsRoot() { - outputChanges = changes.GetRootOutputChanges() - } else { - parent, call := module.Call() - outputChanges = changes.GetOutputChanges(parent, call) - } - for _, c := range outputChanges { - if c.Addr.String() == absAddr.String() { - change = c - break - } - } - - var node dag.Vertex - switch { - case module.IsRoot() && (n.PlanDestroy || n.ApplyDestroy): - node = &NodeDestroyableOutput{ - Addr: absAddr, - Planning: n.Planning, - } - - case n.PlanDestroy: - // nothing is done here for non-root outputs - continue - - default: - node = &NodeApplyableOutput{ - Addr: absAddr, - Config: n.Config, - Change: change, - RefreshOnly: n.RefreshOnly, - DestroyApply: n.ApplyDestroy, - Planning: n.Planning, - } - } - - log.Printf("[TRACE] Expanding output: adding %s as %T", absAddr.String(), node) - g.Add(node) - } - addRootNodeToGraph(&g) - - if checkableAddrs != nil { - checkState := ctx.Checks() - checkState.ReportCheckableObjects(n.Addr.InModule(n.Module), checkableAddrs) - } - - return &g, nil -} - -func (n *nodeExpandOutput) Name() string { - path := n.Module.String() - addr := n.Addr.String() + " (expand)" - if path != "" { - return path + "." + addr - } - return addr -} - -// GraphNodeModulePath -func (n *nodeExpandOutput) ModulePath() addrs.Module { - return n.Module -} - -// GraphNodeReferenceable -func (n *nodeExpandOutput) ReferenceableAddrs() []addrs.Referenceable { - // An output in the root module can't be referenced at all. - if n.Module.IsRoot() { - return nil - } - - // the output is referenced through the module call, and via the - // module itself. - _, call := n.Module.Call() - callOutput := addrs.ModuleCallOutput{ - Call: call, - Name: n.Addr.Name, - } - - // Otherwise, we can reference the output via the - // module call itself - return []addrs.Referenceable{call, callOutput} -} - -// GraphNodeReferenceOutside implementation -func (n *nodeExpandOutput) ReferenceOutside() (selfPath, referencePath addrs.Module) { - // Output values have their expressions resolved in the context of the - // module where they are defined. - referencePath = n.Module - - // ...but they are referenced in the context of their calling module. - selfPath = referencePath.Parent() - - return // uses named return values -} - -// GraphNodeReferencer -func (n *nodeExpandOutput) References() []*addrs.Reference { - // DestroyNodes do not reference anything. - if n.Module.IsRoot() && n.ApplyDestroy { - return nil - } - - return referencesForOutput(n.Config) -} - -// NodeApplyableOutput represents an output that is "applyable": -// it is ready to be applied. -type NodeApplyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config - // If this is being evaluated during apply, we may have a change recorded already - Change *plans.OutputChangeSrc - - // Refresh-only mode means that any failing output preconditions are - // reported as warnings rather than errors - RefreshOnly bool - - // DestroyApply indicates that we are applying a destroy plan, and do not - // need to account for conditional blocks. - DestroyApply bool - - Planning bool -} - -var ( - _ GraphNodeModuleInstance = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) - _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) - _ GraphNodeExecutable = (*NodeApplyableOutput)(nil) - _ graphNodeTemporaryValue = (*NodeApplyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) -) - -func (n *NodeApplyableOutput) temporaryValue() bool { - // this must always be evaluated if it is a root module output - return !n.Addr.Module.IsRoot() -} - -func (n *NodeApplyableOutput) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeModulePath -func (n *NodeApplyableOutput) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.Module) { - // Output values have their expressions resolved in the context of the - // module where they are defined. - referencePath = addr.Module.Module() - - // ...but they are referenced in the context of their calling module. - selfPath = addr.Module.Parent().Module() - - return // uses named return values -} - -// GraphNodeReferenceOutside implementation -func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.Module) { - return referenceOutsideForOutput(n.Addr) -} - -func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { - // An output in the root module can't be referenced at all. - if addr.Module.IsRoot() { - return nil - } - - // Otherwise, we can be referenced via a reference to our output name - // on the parent module's call, or via a reference to the entire call. - // e.g. module.foo.bar or just module.foo . - // Note that our ReferenceOutside method causes these addresses to be - // relative to the calling module, not the module where the output - // was declared. - _, outp := addr.ModuleCallOutput() - _, call := addr.Module.CallInstance() - - return []addrs.Referenceable{outp, call} -} - -// GraphNodeReferenceable -func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { - return referenceableAddrsForOutput(n.Addr) -} - -func referencesForOutput(c *configs.Output) []*addrs.Reference { - var refs []*addrs.Reference - - impRefs, _ := lang.ReferencesInExpr(c.Expr) - expRefs, _ := lang.References(c.DependsOn) - - refs = append(refs, impRefs...) - refs = append(refs, expRefs...) - - for _, check := range c.Preconditions { - condRefs, _ := lang.ReferencesInExpr(check.Condition) - refs = append(refs, condRefs...) - errRefs, _ := lang.ReferencesInExpr(check.ErrorMessage) - refs = append(refs, errRefs...) - } - - return refs -} - -// GraphNodeReferencer -func (n *NodeApplyableOutput) References() []*addrs.Reference { - return referencesForOutput(n.Config) -} - -// GraphNodeExecutable -func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - state := ctx.State() - if state == nil { - return - } - - changes := ctx.Changes() // may be nil, if we're not working on a changeset - - val := cty.UnknownVal(cty.DynamicPseudoType) - changeRecorded := n.Change != nil - // we we have a change recorded, we don't need to re-evaluate if the value - // was known - if changeRecorded { - change, err := n.Change.Decode() - diags = diags.Append(err) - if err == nil { - val = change.After - } - } - - // Checks are not evaluated during a destroy. The checks may fail, may not - // be valid, or may not have been registered at all. - if !n.DestroyApply { - checkRuleSeverity := tfdiags.Error - if n.RefreshOnly { - checkRuleSeverity = tfdiags.Warning - } - checkDiags := evalCheckRules( - addrs.OutputPrecondition, - n.Config.Preconditions, - ctx, n.Addr, EvalDataForNoInstanceKey, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - if diags.HasErrors() { - return diags // failed preconditions prevent further evaluation - } - } - - // If there was no change recorded, or the recorded change was not wholly - // known, then we need to re-evaluate the output - if !changeRecorded || !val.IsWhollyKnown() { - // This has to run before we have a state lock, since evaluation also - // reads the state - var evalDiags tfdiags.Diagnostics - val, evalDiags = ctx.EvaluateExpr(n.Config.Expr, cty.DynamicPseudoType, nil) - diags = diags.Append(evalDiags) - - // We'll handle errors below, after we have loaded the module. - // Outputs don't have a separate mode for validation, so validate - // depends_on expressions here too - diags = diags.Append(validateDependsOn(ctx, n.Config.DependsOn)) - - // For root module outputs in particular, an output value must be - // statically declared as sensitive in order to dynamically return - // a sensitive result, to help avoid accidental exposure in the state - // of a sensitive value that the user doesn't want to include there. - if n.Addr.Module.IsRoot() { - if !n.Config.Sensitive && marks.Contains(val, marks.Sensitive) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Output refers to sensitive values", - Detail: `To reduce the risk of accidentally exporting sensitive data that was intended to be only internal, Terraform requires that any root module output containing sensitive data be explicitly marked as sensitive, to confirm your intent. - -If you do intend to export this data, annotate the output value as sensitive by adding the following argument: - sensitive = true`, - Subject: n.Config.DeclRange.Ptr(), - }) - } - } - } - - // handling the interpolation error - if diags.HasErrors() { - if flagWarnOutputErrors { - log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr, diags.Err()) - // if we're continuing, make sure the output is included, and - // marked as unknown. If the evaluator was able to find a type - // for the value in spite of the error then we'll use it. - n.setValue(state, changes, cty.UnknownVal(val.Type())) - - // Keep existing warnings, while converting errors to warnings. - // This is not meant to be the normal path, so there no need to - // make the errors pretty. - var warnings tfdiags.Diagnostics - for _, d := range diags { - switch d.Severity() { - case tfdiags.Warning: - warnings = warnings.Append(d) - case tfdiags.Error: - desc := d.Description() - warnings = warnings.Append(tfdiags.SimpleWarning(fmt.Sprintf("%s:%s", desc.Summary, desc.Detail))) - } - } - - return warnings - } - return diags - } - n.setValue(state, changes, val) - - // If we were able to evaluate a new value, we can update that in the - // refreshed state as well. - if state = ctx.RefreshState(); state != nil && val.IsWhollyKnown() { - // we only need to update the state, do not pass in the changes again - n.setValue(state, nil, val) - } - - return diags -} - -// dag.GraphNodeDotter impl. -func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} - -// NodeDestroyableOutput represents an output that is "destroyable": -// its application will remove the output from the state. -type NodeDestroyableOutput struct { - Addr addrs.AbsOutputValue - Planning bool -} - -var ( - _ GraphNodeExecutable = (*NodeDestroyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) -) - -func (n *NodeDestroyableOutput) Name() string { - return fmt.Sprintf("%s (destroy)", n.Addr.String()) -} - -// GraphNodeModulePath -func (n *NodeDestroyableOutput) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -func (n *NodeDestroyableOutput) temporaryValue() bool { - // this must always be evaluated if it is a root module output - return !n.Addr.Module.IsRoot() -} - -// GraphNodeExecutable -func (n *NodeDestroyableOutput) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { - state := ctx.State() - if state == nil { - return nil - } - - // if this is a root module, try to get a before value from the state for - // the diff - sensitiveBefore := false - before := cty.NullVal(cty.DynamicPseudoType) - mod := state.Module(n.Addr.Module) - if n.Addr.Module.IsRoot() && mod != nil { - if o, ok := mod.OutputValues[n.Addr.OutputValue.Name]; ok { - sensitiveBefore = o.Sensitive - before = o.Value - } else { - // If the output was not in state, a delete change would - // be meaningless, so exit early. - return nil - - } - } - - changes := ctx.Changes() - if changes != nil && n.Planning { - change := &plans.OutputChange{ - Addr: n.Addr, - Sensitive: sensitiveBefore, - Change: plans.Change{ - Action: plans.Delete, - Before: before, - After: cty.NullVal(cty.DynamicPseudoType), - }, - } - - cs, err := change.Encode() - if err != nil { - // Should never happen, since we just constructed this right above - panic(fmt.Sprintf("planned change for %s could not be encoded: %s", n.Addr, err)) - } - log.Printf("[TRACE] NodeDestroyableOutput: Saving %s change for %s in changeset", change.Action, n.Addr) - - changes.RemoveOutputChange(n.Addr) // remove any existing planned change, if present - changes.AppendOutputChange(cs) // add the new planned change - } - - state.RemoveOutputValue(n.Addr) - return nil -} - -// dag.GraphNodeDotter impl. -func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} - -func (n *NodeApplyableOutput) setValue(state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { - if changes != nil && n.Planning { - // if this is a root module, try to get a before value from the state for - // the diff - sensitiveBefore := false - before := cty.NullVal(cty.DynamicPseudoType) - - // is this output new to our state? - newOutput := true - - mod := state.Module(n.Addr.Module) - if n.Addr.Module.IsRoot() && mod != nil { - for name, o := range mod.OutputValues { - if name == n.Addr.OutputValue.Name { - before = o.Value - sensitiveBefore = o.Sensitive - newOutput = false - break - } - } - } - - // We will not show the value if either the before or after are marked - // as sensitive. We can show the value again once sensitivity is - // removed from both the config and the state. - sensitiveChange := sensitiveBefore || n.Config.Sensitive - - // strip any marks here just to be sure we don't panic on the True comparison - unmarkedVal, _ := val.UnmarkDeep() - - action := plans.Update - switch { - case val.IsNull() && before.IsNull(): - // This is separate from the NoOp case below, since we can ignore - // sensitivity here when there are only null values. - action = plans.NoOp - - case newOutput: - // This output was just added to the configuration - action = plans.Create - - case val.IsWhollyKnown() && - unmarkedVal.Equals(before).True() && - n.Config.Sensitive == sensitiveBefore: - // Sensitivity must also match to be a NoOp. - // Theoretically marks may not match here, but sensitivity is the - // only one we can act on, and the state will have been loaded - // without any marks to consider. - action = plans.NoOp - } - - change := &plans.OutputChange{ - Addr: n.Addr, - Sensitive: sensitiveChange, - Change: plans.Change{ - Action: action, - Before: before, - After: val, - }, - } - - cs, err := change.Encode() - if err != nil { - // Should never happen, since we just constructed this right above - panic(fmt.Sprintf("planned change for %s could not be encoded: %s", n.Addr, err)) - } - log.Printf("[TRACE] setValue: Saving %s change for %s in changeset", change.Action, n.Addr) - changes.AppendOutputChange(cs) // add the new planned change - } - - if changes != nil && !n.Planning { - // During apply there is no longer any change to track, so we must - // ensure the state is updated and not overridden by a change. - changes.RemoveOutputChange(n.Addr) - } - - // Null outputs must be saved for modules so that they can still be - // evaluated. Null root outputs are removed entirely, which is always fine - // because they can't be referenced by anything else in the configuration. - if n.Addr.Module.IsRoot() && val.IsNull() { - log.Printf("[TRACE] setValue: Removing %s from state (it is now null)", n.Addr) - state.RemoveOutputValue(n.Addr) - return - } - - log.Printf("[TRACE] setValue: Saving value for %s in state", n.Addr) - - // non-root outputs need to keep sensitive marks for evaluation, but are - // not serialized. - if n.Addr.Module.IsRoot() { - val, _ = val.UnmarkDeep() - val = cty.UnknownAsNull(val) - } - - state.SetOutputValue(n.Addr, val, n.Config.Sensitive) -} diff --git a/internal/terraform/node_output_test.go b/internal/terraform/node_output_test.go deleted file mode 100644 index 80d60539e83c..000000000000 --- a/internal/terraform/node_output_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/states" -) - -func TestNodeApplyableOutputExecute_knownValue(t *testing.T) { - ctx := new(MockEvalContext) - ctx.StateState = states.NewState().SyncWrapper() - ctx.RefreshStateState = states.NewState().SyncWrapper() - ctx.ChecksState = checks.NewState(nil) - - config := &configs.Output{Name: "map-output"} - addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) - node := &NodeApplyableOutput{Config: config, Addr: addr} - val := cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("b"), - }) - ctx.EvaluateExprResult = val - - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected execute error: %s", err) - } - - outputVal := ctx.StateState.OutputValue(addr) - if got, want := outputVal.Value, val; !got.RawEquals(want) { - t.Errorf("wrong output value in state\n got: %#v\nwant: %#v", got, want) - } - - if !ctx.RefreshStateCalled { - t.Fatal("should have called RefreshState, but didn't") - } - refreshOutputVal := ctx.RefreshStateState.OutputValue(addr) - if got, want := refreshOutputVal.Value, val; !got.RawEquals(want) { - t.Fatalf("wrong output value in refresh state\n got: %#v\nwant: %#v", got, want) - } -} - -func TestNodeApplyableOutputExecute_noState(t *testing.T) { - ctx := new(MockEvalContext) - - config := &configs.Output{Name: "map-output"} - addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) - node := &NodeApplyableOutput{Config: config, Addr: addr} - val := cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("b"), - }) - ctx.EvaluateExprResult = val - - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected execute error: %s", err) - } -} - -func TestNodeApplyableOutputExecute_invalidDependsOn(t *testing.T) { - ctx := new(MockEvalContext) - ctx.StateState = states.NewState().SyncWrapper() - ctx.ChecksState = checks.NewState(nil) - - config := &configs.Output{ - Name: "map-output", - DependsOn: []hcl.Traversal{ - { - hcl.TraverseRoot{Name: "test_instance"}, - hcl.TraverseAttr{Name: "foo"}, - hcl.TraverseAttr{Name: "bar"}, - }, - }, - } - addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) - node := &NodeApplyableOutput{Config: config, Addr: addr} - val := cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("b"), - }) - ctx.EvaluateExprResult = val - - diags := node.Execute(ctx, walkApply) - if !diags.HasErrors() { - t.Fatal("expected execute error, but there was none") - } - if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { - t.Errorf("expected error to include %q, but was: %s", want, got) - } -} - -func TestNodeApplyableOutputExecute_sensitiveValueNotOutput(t *testing.T) { - ctx := new(MockEvalContext) - ctx.StateState = states.NewState().SyncWrapper() - ctx.ChecksState = checks.NewState(nil) - - config := &configs.Output{Name: "map-output"} - addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) - node := &NodeApplyableOutput{Config: config, Addr: addr} - val := cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("b").Mark(marks.Sensitive), - }) - ctx.EvaluateExprResult = val - - diags := node.Execute(ctx, walkApply) - if !diags.HasErrors() { - t.Fatal("expected execute error, but there was none") - } - if got, want := diags.Err().Error(), "Output refers to sensitive values"; !strings.Contains(got, want) { - t.Errorf("expected error to include %q, but was: %s", want, got) - } -} - -func TestNodeApplyableOutputExecute_sensitiveValueAndOutput(t *testing.T) { - ctx := new(MockEvalContext) - ctx.StateState = states.NewState().SyncWrapper() - ctx.ChecksState = checks.NewState(nil) - - config := &configs.Output{ - Name: "map-output", - Sensitive: true, - } - addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) - node := &NodeApplyableOutput{Config: config, Addr: addr} - val := cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("b").Mark(marks.Sensitive), - }) - ctx.EvaluateExprResult = val - - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected execute error: %s", err) - } - - // Unmarked value should be stored in state - outputVal := ctx.StateState.OutputValue(addr) - want, _ := val.UnmarkDeep() - if got := outputVal.Value; !got.RawEquals(want) { - t.Errorf("wrong output value in state\n got: %#v\nwant: %#v", got, want) - } -} - -func TestNodeDestroyableOutputExecute(t *testing.T) { - outputAddr := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) - - state := states.NewState() - state.Module(addrs.RootModuleInstance).SetOutputValue("foo", cty.StringVal("bar"), false) - state.OutputValue(outputAddr) - - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - } - node := NodeDestroyableOutput{Addr: outputAddr} - - diags := node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("Unexpected error: %s", diags.Err()) - } - if state.OutputValue(outputAddr) != nil { - t.Fatal("Unexpected outputs in state after removal") - } -} - -func TestNodeDestroyableOutputExecute_notInState(t *testing.T) { - outputAddr := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) - - state := states.NewState() - - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - } - node := NodeDestroyableOutput{Addr: outputAddr} - - diags := node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("Unexpected error: %s", diags.Err()) - } - if state.OutputValue(outputAddr) != nil { - t.Fatal("Unexpected outputs in state after removal") - } -} diff --git a/internal/terraform/node_provider.go b/internal/terraform/node_provider.go deleted file mode 100644 index c5b09136cb28..000000000000 --- a/internal/terraform/node_provider.go +++ /dev/null @@ -1,179 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// NodeApplyableProvider represents a provider during an apply. -type NodeApplyableProvider struct { - *NodeAbstractProvider -} - -var ( - _ GraphNodeExecutable = (*NodeApplyableProvider)(nil) -) - -// GraphNodeExecutable -func (n *NodeApplyableProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - _, err := ctx.InitProvider(n.Addr) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - provider, _, err := getProvider(ctx, n.Addr) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - switch op { - case walkValidate: - log.Printf("[TRACE] NodeApplyableProvider: validating configuration for %s", n.Addr) - return diags.Append(n.ValidateProvider(ctx, provider)) - case walkPlan, walkPlanDestroy, walkApply, walkDestroy: - log.Printf("[TRACE] NodeApplyableProvider: configuring %s", n.Addr) - return diags.Append(n.ConfigureProvider(ctx, provider, false)) - case walkImport: - log.Printf("[TRACE] NodeApplyableProvider: configuring %s (requiring that configuration is wholly known)", n.Addr) - return diags.Append(n.ConfigureProvider(ctx, provider, true)) - } - return diags -} - -func (n *NodeApplyableProvider) ValidateProvider(ctx EvalContext, provider providers.Interface) (diags tfdiags.Diagnostics) { - - configBody := buildProviderConfig(ctx, n.Addr, n.ProviderConfig()) - - // if a provider config is empty (only an alias), return early and don't continue - // validation. validate doesn't need to fully configure the provider itself, so - // skipping a provider with an implied configuration won't prevent other validation from completing. - _, noConfigDiags := configBody.Content(&hcl.BodySchema{}) - if !noConfigDiags.HasErrors() { - return nil - } - - schemaResp := provider.GetProviderSchema() - diags = diags.Append(schemaResp.Diagnostics.InConfigBody(configBody, n.Addr.String())) - if diags.HasErrors() { - return diags - } - - configSchema := schemaResp.Provider.Block - if configSchema == nil { - // Should never happen in real code, but often comes up in tests where - // mock schemas are being used that tend to be incomplete. - log.Printf("[WARN] ValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) - configSchema = &configschema.Block{} - } - - configVal, _, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - if evalDiags.HasErrors() { - return diags.Append(evalDiags) - } - diags = diags.Append(evalDiags) - - // If our config value contains any marked values, ensure those are - // stripped out before sending this to the provider - unmarkedConfigVal, _ := configVal.UnmarkDeep() - - req := providers.ValidateProviderConfigRequest{ - Config: unmarkedConfigVal, - } - - validateResp := provider.ValidateProviderConfig(req) - diags = diags.Append(validateResp.Diagnostics.InConfigBody(configBody, n.Addr.String())) - - return diags -} - -// ConfigureProvider configures a provider that is already initialized and retrieved. -// If verifyConfigIsKnown is true, ConfigureProvider will return an error if the -// provider configVal is not wholly known and is meant only for use during import. -func (n *NodeApplyableProvider) ConfigureProvider(ctx EvalContext, provider providers.Interface, verifyConfigIsKnown bool) (diags tfdiags.Diagnostics) { - config := n.ProviderConfig() - - configBody := buildProviderConfig(ctx, n.Addr, config) - - resp := provider.GetProviderSchema() - diags = diags.Append(resp.Diagnostics.InConfigBody(configBody, n.Addr.String())) - if diags.HasErrors() { - return diags - } - - configSchema := resp.Provider.Block - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return diags - } - - if verifyConfigIsKnown && !configVal.IsWhollyKnown() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration", - Detail: fmt.Sprintf("The configuration for %s depends on values that cannot be determined until apply.", n.Addr), - Subject: &config.DeclRange, - }) - return diags - } - - // If our config value contains any marked values, ensure those are - // stripped out before sending this to the provider - unmarkedConfigVal, _ := configVal.UnmarkDeep() - - // Allow the provider to validate and insert any defaults into the full - // configuration. - req := providers.ValidateProviderConfigRequest{ - Config: unmarkedConfigVal, - } - - // ValidateProviderConfig is only used for validation. We are intentionally - // ignoring the PreparedConfig field to maintain existing behavior. - validateResp := provider.ValidateProviderConfig(req) - diags = diags.Append(validateResp.Diagnostics.InConfigBody(configBody, n.Addr.String())) - if diags.HasErrors() && config == nil { - // If there isn't an explicit "provider" block in the configuration, - // this error message won't be very clear. Add some detail to the error - // message in this case. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider configuration", - fmt.Sprintf(providerConfigErr, n.Addr.Provider), - )) - } - - if diags.HasErrors() { - return diags - } - - // If the provider returns something different, log a warning to help - // indicate to provider developers that the value is not used. - preparedCfg := validateResp.PreparedConfig - if preparedCfg != cty.NilVal && !preparedCfg.IsNull() && !preparedCfg.RawEquals(unmarkedConfigVal) { - log.Printf("[WARN] ValidateProviderConfig from %q changed the config value, but that value is unused", n.Addr) - } - - configDiags := ctx.ConfigureProvider(n.Addr, unmarkedConfigVal) - diags = diags.Append(configDiags.InConfigBody(configBody, n.Addr.String())) - if diags.HasErrors() && config == nil { - // If there isn't an explicit "provider" block in the configuration, - // this error message won't be very clear. Add some detail to the error - // message in this case. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider configuration", - fmt.Sprintf(providerConfigErr, n.Addr.Provider), - )) - } - return diags -} - -const providerConfigErr = `Provider %q requires explicit configuration. Add a provider block to the root module and configure the provider's required arguments as described in the provider documentation. -` diff --git a/internal/terraform/node_provider_abstract.go b/internal/terraform/node_provider_abstract.go deleted file mode 100644 index 09bdd95b40eb..000000000000 --- a/internal/terraform/node_provider_abstract.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - - "github.com/hashicorp/terraform/internal/dag" -) - -// ConcreteProviderNodeFunc is a callback type used to convert an -// abstract provider to a concrete one of some type. -type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex - -// NodeAbstractProvider represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeAbstractProvider struct { - Addr addrs.AbsProviderConfig - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Config *configs.Provider - Schema *configschema.Block -} - -var ( - _ GraphNodeModulePath = (*NodeAbstractProvider)(nil) - _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) - _ GraphNodeProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) -) - -func (n *NodeAbstractProvider) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { - // Providers cannot be contained inside an expanded module, so this shim - // converts our module path to the correct ModuleInstance. - return n.Addr.Module.UnkeyedInstanceShim() -} - -// GraphNodeModulePath -func (n *NodeAbstractProvider) ModulePath() addrs.Module { - return n.Addr.Module -} - -// GraphNodeReferencer -func (n *NodeAbstractProvider) References() []*addrs.Reference { - if n.Config == nil || n.Schema == nil { - return nil - } - - return ReferencesFromConfig(n.Config.Config, n.Schema) -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { - if n.Config == nil { - return nil - } - - return n.Config -} - -// GraphNodeAttachProvider -func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { - n.Config = c -} - -// GraphNodeAttachProviderConfigSchema impl. -func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { - n.Schema = schema -} - -// GraphNodeDotter impl. -func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} diff --git a/internal/terraform/node_provider_eval.go b/internal/terraform/node_provider_eval.go deleted file mode 100644 index fba47ddb96f6..000000000000 --- a/internal/terraform/node_provider_eval.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform/internal/tfdiags" - -// NodeEvalableProvider represents a provider during an "eval" walk. -// This special provider node type just initializes a provider and -// fetches its schema, without configuring it or otherwise interacting -// with it. -type NodeEvalableProvider struct { - *NodeAbstractProvider -} - -var _ GraphNodeExecutable = (*NodeEvalableProvider)(nil) - -// GraphNodeExecutable -func (n *NodeEvalableProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - _, err := ctx.InitProvider(n.Addr) - return diags.Append(err) -} diff --git a/internal/terraform/node_provider_test.go b/internal/terraform/node_provider_test.go deleted file mode 100644 index fe8a80d11b88..000000000000 --- a/internal/terraform/node_provider_test.go +++ /dev/null @@ -1,524 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -func TestNodeApplyableProviderExecute(t *testing.T) { - config := &configs.Provider{ - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{ - "user": cty.StringVal("hello"), - }), - } - - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "user": { - Type: cty.String, - Required: true, - }, - "pw": { - Type: cty.String, - Required: true, - }, - }, - } - provider := mockProviderWithConfigSchema(schema) - providerAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("foo"), - } - - n := &NodeApplyableProvider{&NodeAbstractProvider{ - Addr: providerAddr, - Config: config, - }} - - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - ctx.ProviderInputValues = map[string]cty.Value{ - "pw": cty.StringVal("so secret"), - } - - if diags := n.Execute(ctx, walkApply); diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - if !ctx.ConfigureProviderCalled { - t.Fatal("should be called") - } - - gotObj := ctx.ConfigureProviderConfig - if !gotObj.Type().HasAttribute("user") { - t.Fatal("configuration object does not have \"user\" attribute") - } - if got, want := gotObj.GetAttr("user"), cty.StringVal("hello"); !got.RawEquals(want) { - t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) - } - - if !gotObj.Type().HasAttribute("pw") { - t.Fatal("configuration object does not have \"pw\" attribute") - } - if got, want := gotObj.GetAttr("pw"), cty.StringVal("so secret"); !got.RawEquals(want) { - t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestNodeApplyableProviderExecute_unknownImport(t *testing.T) { - config := &configs.Provider{ - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{ - "test_string": cty.UnknownVal(cty.String), - }), - } - provider := mockProviderWithConfigSchema(simpleTestSchema()) - providerAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("foo"), - } - n := &NodeApplyableProvider{&NodeAbstractProvider{ - Addr: providerAddr, - Config: config, - }} - - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - - diags := n.Execute(ctx, walkImport) - if !diags.HasErrors() { - t.Fatal("expected error, got success") - } - - detail := `Invalid provider configuration: The configuration for provider["registry.terraform.io/hashicorp/foo"] depends on values that cannot be determined until apply.` - if got, want := diags.Err().Error(), detail; got != want { - t.Errorf("wrong diagnostic detail\n got: %q\nwant: %q", got, want) - } - - if ctx.ConfigureProviderCalled { - t.Fatal("should not be called") - } -} - -func TestNodeApplyableProviderExecute_unknownApply(t *testing.T) { - config := &configs.Provider{ - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{ - "test_string": cty.UnknownVal(cty.String), - }), - } - provider := mockProviderWithConfigSchema(simpleTestSchema()) - providerAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("foo"), - } - n := &NodeApplyableProvider{&NodeAbstractProvider{ - Addr: providerAddr, - Config: config, - }} - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - - if err := n.Execute(ctx, walkApply); err != nil { - t.Fatalf("err: %s", err) - } - - if !ctx.ConfigureProviderCalled { - t.Fatal("should be called") - } - - gotObj := ctx.ConfigureProviderConfig - if !gotObj.Type().HasAttribute("test_string") { - t.Fatal("configuration object does not have \"test_string\" attribute") - } - if got, want := gotObj.GetAttr("test_string"), cty.UnknownVal(cty.String); !got.RawEquals(want) { - t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestNodeApplyableProviderExecute_sensitive(t *testing.T) { - config := &configs.Provider{ - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{ - "test_string": cty.StringVal("hello").Mark(marks.Sensitive), - }), - } - provider := mockProviderWithConfigSchema(simpleTestSchema()) - providerAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("foo"), - } - - n := &NodeApplyableProvider{&NodeAbstractProvider{ - Addr: providerAddr, - Config: config, - }} - - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - if err := n.Execute(ctx, walkApply); err != nil { - t.Fatalf("err: %s", err) - } - - if !ctx.ConfigureProviderCalled { - t.Fatal("should be called") - } - - gotObj := ctx.ConfigureProviderConfig - if !gotObj.Type().HasAttribute("test_string") { - t.Fatal("configuration object does not have \"test_string\" attribute") - } - if got, want := gotObj.GetAttr("test_string"), cty.StringVal("hello"); !got.RawEquals(want) { - t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestNodeApplyableProviderExecute_sensitiveValidate(t *testing.T) { - config := &configs.Provider{ - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{ - "test_string": cty.StringVal("hello").Mark(marks.Sensitive), - }), - } - provider := mockProviderWithConfigSchema(simpleTestSchema()) - providerAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("foo"), - } - - n := &NodeApplyableProvider{&NodeAbstractProvider{ - Addr: providerAddr, - Config: config, - }} - - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - if err := n.Execute(ctx, walkValidate); err != nil { - t.Fatalf("err: %s", err) - } - - if !provider.ValidateProviderConfigCalled { - t.Fatal("should be called") - } - - gotObj := provider.ValidateProviderConfigRequest.Config - if !gotObj.Type().HasAttribute("test_string") { - t.Fatal("configuration object does not have \"test_string\" attribute") - } - if got, want := gotObj.GetAttr("test_string"), cty.StringVal("hello"); !got.RawEquals(want) { - t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) - } -} - -func TestNodeApplyableProviderExecute_emptyValidate(t *testing.T) { - config := &configs.Provider{ - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{}), - } - provider := mockProviderWithConfigSchema(&configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "test_string": { - Type: cty.String, - Required: true, - }, - }, - }) - providerAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.NewDefaultProvider("foo"), - } - - n := &NodeApplyableProvider{&NodeAbstractProvider{ - Addr: providerAddr, - Config: config, - }} - - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - if err := n.Execute(ctx, walkValidate); err != nil { - t.Fatalf("err: %s", err) - } - - if ctx.ConfigureProviderCalled { - t.Fatal("should not be called") - } -} - -func TestNodeApplyableProvider_Validate(t *testing.T) { - provider := mockProviderWithConfigSchema(&configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": { - Type: cty.String, - Required: true, - }, - }, - }) - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - - t.Run("valid", func(t *testing.T) { - config := &configs.Provider{ - Name: "test", - Config: configs.SynthBody("", map[string]cty.Value{ - "region": cty.StringVal("mars"), - }), - } - - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - Config: config, - }, - } - - diags := node.ValidateProvider(ctx, provider) - if diags.HasErrors() { - t.Errorf("unexpected error with valid config: %s", diags.Err()) - } - }) - - t.Run("invalid", func(t *testing.T) { - config := &configs.Provider{ - Name: "test", - Config: configs.SynthBody("", map[string]cty.Value{ - "region": cty.MapValEmpty(cty.String), - }), - } - - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - Config: config, - }, - } - - diags := node.ValidateProvider(ctx, provider) - if !diags.HasErrors() { - t.Error("missing expected error with invalid config") - } - }) - - t.Run("empty config", func(t *testing.T) { - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - diags := node.ValidateProvider(ctx, provider) - if diags.HasErrors() { - t.Errorf("unexpected error with empty config: %s", diags.Err()) - } - }) -} - -// This test specifically tests responses from the -// providers.ValidateProviderConfigFn. See -// TestNodeApplyableProvider_ConfigProvider_config_fn_err for -// providers.ConfigureProviderRequest responses. -func TestNodeApplyableProvider_ConfigProvider(t *testing.T) { - provider := mockProviderWithConfigSchema(&configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": { - Type: cty.String, - Optional: true, - }, - }, - }) - // For this test, we're returning an error for an optional argument. This - // can happen for example if an argument is only conditionally required. - provider.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - region := req.Config.GetAttr("region") - if region.IsNull() { - resp.Diagnostics = resp.Diagnostics.Append( - tfdiags.WholeContainingBody(tfdiags.Error, "value is not found", "you did not supply a required value")) - } - return - } - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - - t.Run("valid", func(t *testing.T) { - config := &configs.Provider{ - Name: "test", - Config: configs.SynthBody("", map[string]cty.Value{ - "region": cty.StringVal("mars"), - }), - } - - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - Config: config, - }, - } - - diags := node.ConfigureProvider(ctx, provider, false) - if diags.HasErrors() { - t.Errorf("unexpected error with valid config: %s", diags.Err()) - } - }) - - t.Run("missing required config (no config at all)", func(t *testing.T) { - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - diags := node.ConfigureProvider(ctx, provider, false) - if !diags.HasErrors() { - t.Fatal("missing expected error with nil config") - } - if !strings.Contains(diags.Err().Error(), "requires explicit configuration") { - t.Errorf("diagnostic is missing \"requires explicit configuration\" message: %s", diags.Err()) - } - }) - - t.Run("missing required config", func(t *testing.T) { - config := &configs.Provider{ - Name: "test", - Config: hcl.EmptyBody(), - } - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - Config: config, - }, - } - - diags := node.ConfigureProvider(ctx, provider, false) - if !diags.HasErrors() { - t.Fatal("missing expected error with invalid config") - } - if !strings.Contains(diags.Err().Error(), "value is not found") { - t.Errorf("wrong diagnostic: %s", diags.Err()) - } - }) - -} - -// This test is similar to TestNodeApplyableProvider_ConfigProvider, but tests responses from the providers.ConfigureProviderRequest -func TestNodeApplyableProvider_ConfigProvider_config_fn_err(t *testing.T) { - provider := mockProviderWithConfigSchema(&configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": { - Type: cty.String, - Optional: true, - }, - }, - }) - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - // For this test, provider.PrepareConfigFn will succeed every time but the - // ctx.ConfigureProviderFn will return an error if a value is not found. - // - // This is an unlikely but real situation that occurs: - // https://github.com/hashicorp/terraform/issues/23087 - ctx.ConfigureProviderFn = func(addr addrs.AbsProviderConfig, cfg cty.Value) (diags tfdiags.Diagnostics) { - if cfg.IsNull() { - diags = diags.Append(fmt.Errorf("no config provided")) - } else { - region := cfg.GetAttr("region") - if region.IsNull() { - diags = diags.Append(fmt.Errorf("value is not found")) - } - } - return - } - - t.Run("valid", func(t *testing.T) { - config := &configs.Provider{ - Name: "test", - Config: configs.SynthBody("", map[string]cty.Value{ - "region": cty.StringVal("mars"), - }), - } - - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - Config: config, - }, - } - - diags := node.ConfigureProvider(ctx, provider, false) - if diags.HasErrors() { - t.Errorf("unexpected error with valid config: %s", diags.Err()) - } - }) - - t.Run("missing required config (no config at all)", func(t *testing.T) { - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - diags := node.ConfigureProvider(ctx, provider, false) - if !diags.HasErrors() { - t.Fatal("missing expected error with nil config") - } - if !strings.Contains(diags.Err().Error(), "requires explicit configuration") { - t.Errorf("diagnostic is missing \"requires explicit configuration\" message: %s", diags.Err()) - } - }) - - t.Run("missing required config", func(t *testing.T) { - config := &configs.Provider{ - Name: "test", - Config: hcl.EmptyBody(), - } - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - Config: config, - }, - } - - diags := node.ConfigureProvider(ctx, provider, false) - if !diags.HasErrors() { - t.Fatal("missing expected error with invalid config") - } - if diags.Err().Error() != "value is not found" { - t.Errorf("wrong diagnostic: %s", diags.Err()) - } - }) -} - -func TestGetSchemaError(t *testing.T) { - provider := &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Diagnostics: tfdiags.Diagnostics.Append(nil, tfdiags.WholeContainingBody(tfdiags.Error, "oops", "error")), - }, - } - - providerAddr := mustProviderConfig(`provider["terraform.io/some/provider"]`) - ctx := &MockEvalContext{ProviderProvider: provider} - ctx.installSimpleEval() - node := NodeApplyableProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - Addr: providerAddr, - }, - } - - diags := node.ConfigureProvider(ctx, provider, false) - for _, d := range diags { - desc := d.Description() - if desc.Address != providerAddr.String() { - t.Fatalf("missing provider address from diagnostics: %#v", desc) - } - } - -} diff --git a/internal/terraform/node_resource_abstract.go b/internal/terraform/node_resource_abstract.go deleted file mode 100644 index 5ad7901d03fe..000000000000 --- a/internal/terraform/node_resource_abstract.go +++ /dev/null @@ -1,518 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ConcreteResourceNodeFunc is a callback type used to convert an -// abstract resource to a concrete one of some type. -type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex - -// GraphNodeConfigResource is implemented by any nodes that represent a resource. -// The type of operation cannot be assumed, only that this node represents -// the given resource. -type GraphNodeConfigResource interface { - ResourceAddr() addrs.ConfigResource -} - -// ConcreteResourceInstanceNodeFunc is a callback type used to convert an -// abstract resource instance to a concrete one of some type. -type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex - -// GraphNodeResourceInstance is implemented by any nodes that represent -// a resource instance. A single resource may have multiple instances if, -// for example, the "count" or "for_each" argument is used for it in -// configuration. -type GraphNodeResourceInstance interface { - ResourceInstanceAddr() addrs.AbsResourceInstance - - // StateDependencies returns any inter-resource dependencies that are - // stored in the state. - StateDependencies() []addrs.ConfigResource -} - -// NodeAbstractResource represents a resource that has no associated -// operations. It registers all the interfaces for a resource that common -// across multiple operation types. -type NodeAbstractResource struct { - Addr addrs.ConfigResource - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Schema *configschema.Block // Schema for processing the configuration body - SchemaVersion uint64 // Schema version of "Schema", as decided by the provider - Config *configs.Resource // Config is the resource in the config - - // ProviderMetas is the provider_meta configs for the module this resource belongs to - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - - ProvisionerSchemas map[string]*configschema.Block - - // Set from GraphNodeTargetable - Targets []addrs.Targetable - - // Set from AttachDataResourceDependsOn - dependsOn []addrs.ConfigResource - forceDependsOn bool - - // The address of the provider this resource will use - ResolvedProvider addrs.AbsProviderConfig - // storedProviderConfig is the provider address retrieved from the - // state. This is defined here for access within the ProvidedBy method, but - // will be set from the embedding instance type when the state is attached. - storedProviderConfig addrs.AbsProviderConfig - - // This resource may expand into instances which need to be imported. - importTargets []*ImportTarget -} - -var ( - _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) - _ GraphNodeReferencer = (*NodeAbstractResource)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeConfigResource = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) - _ GraphNodeAttachProviderMetaConfigs = (*NodeAbstractResource)(nil) - _ GraphNodeTargetable = (*NodeAbstractResource)(nil) - _ graphNodeAttachDataResourceDependsOn = (*NodeAbstractResource)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) -) - -// NewNodeAbstractResource creates an abstract resource graph node for -// the given absolute resource address. -func NewNodeAbstractResource(addr addrs.ConfigResource) *NodeAbstractResource { - return &NodeAbstractResource{ - Addr: addr, - } -} - -var ( - _ GraphNodeModuleInstance = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachProviderMetaConfigs = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) -) - -func (n *NodeAbstractResource) Name() string { - return n.ResourceAddr().String() -} - -// GraphNodeModulePath -func (n *NodeAbstractResource) ModulePath() addrs.Module { - return n.Addr.Module -} - -// GraphNodeReferenceable -func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Resource} -} - -func (n *NodeAbstractResource) Import(addr *ImportTarget) { - -} - -// GraphNodeReferencer -func (n *NodeAbstractResource) References() []*addrs.Reference { - // If we have a config then we prefer to use that. - if c := n.Config; c != nil { - var result []*addrs.Reference - - result = append(result, n.DependsOn()...) - - if n.Schema == nil { - // Should never happen, but we'll log if it does so that we can - // see this easily when debugging. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - } - - refs, _ := lang.ReferencesInExpr(c.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(c.ForEach) - result = append(result, refs...) - - for _, expr := range c.TriggersReplacement { - refs, _ = lang.ReferencesInExpr(expr) - result = append(result, refs...) - } - - // ReferencesInBlock() requires a schema - if n.Schema != nil { - refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) - result = append(result, refs...) - } - - if c.Managed != nil { - if c.Managed.Connection != nil { - refs, _ = lang.ReferencesInBlock(c.Managed.Connection.Config, connectionBlockSupersetSchema) - result = append(result, refs...) - } - - for _, p := range c.Managed.Provisioners { - if p.When != configs.ProvisionerWhenCreate { - continue - } - if p.Connection != nil { - refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) - result = append(result, refs...) - } - - schema := n.ProvisionerSchemas[p.Type] - if schema == nil { - log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) - } - refs, _ = lang.ReferencesInBlock(p.Config, schema) - result = append(result, refs...) - } - } - - for _, check := range c.Preconditions { - refs, _ := lang.ReferencesInExpr(check.Condition) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(check.ErrorMessage) - result = append(result, refs...) - } - for _, check := range c.Postconditions { - refs, _ := lang.ReferencesInExpr(check.Condition) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(check.ErrorMessage) - result = append(result, refs...) - } - - return result - } - - // Otherwise, we have no references. - return nil -} - -func (n *NodeAbstractResource) DependsOn() []*addrs.Reference { - var result []*addrs.Reference - if c := n.Config; c != nil { - - for _, traversal := range c.DependsOn { - ref, diags := addrs.ParseRef(traversal) - if diags.HasErrors() { - // We ignore this here, because this isn't a suitable place to return - // errors. This situation should be caught and rejected during - // validation. - log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, diags.Err()) - continue - } - - result = append(result, ref) - } - } - return result -} - -func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { - n.ResolvedProvider = p -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() (addrs.ProviderConfig, bool) { - // Once the provider is fully resolved, we can return the known value. - if n.ResolvedProvider.Provider.Type != "" { - return n.ResolvedProvider, true - } - - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return addrs.LocalProviderConfig{ - LocalName: relAddr.LocalName, - Alias: relAddr.Alias, - }, false - } - - // See if we have a valid provider config from the state. - if n.storedProviderConfig.Provider.Type != "" { - // An address from the state must match exactly, since we must ensure - // we refresh/destroy a resource with the same provider configuration - // that created it. - return n.storedProviderConfig, true - } - - // No provider configuration found; return a default address - return addrs.AbsProviderConfig{ - Provider: n.Provider(), - Module: n.ModulePath(), - }, false -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResource) Provider() addrs.Provider { - if n.Config != nil { - return n.Config.Provider - } - if n.storedProviderConfig.Provider.Type != "" { - return n.storedProviderConfig.Provider - } - return addrs.ImpliedProviderForUnqualifiedType(n.Addr.Resource.ImpliedProvider()) -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) ProvisionedBy() []string { - // If we have no configuration, then we have no provisioners - if n.Config == nil || n.Config.Managed == nil { - return nil - } - - // Build the list of provisioners we need based on the configuration. - // It is okay to have duplicates here. - result := make([]string, len(n.Config.Managed.Provisioners)) - for i, p := range n.Config.Managed.Provisioners { - result[i] = p.Type - } - - return result -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { - if n.ProvisionerSchemas == nil { - n.ProvisionerSchemas = make(map[string]*configschema.Block) - } - n.ProvisionerSchemas[name] = schema -} - -// GraphNodeResource -func (n *NodeAbstractResource) ResourceAddr() addrs.ConfigResource { - return n.Addr -} - -// GraphNodeTargetable -func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { - n.Targets = targets -} - -// graphNodeAttachDataResourceDependsOn -func (n *NodeAbstractResource) AttachDataResourceDependsOn(deps []addrs.ConfigResource, force bool) { - n.dependsOn = deps - n.forceDependsOn = force -} - -// GraphNodeAttachResourceConfig -func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { - n.Config = c -} - -// GraphNodeAttachResourceSchema impl -func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { - n.Schema = schema - n.SchemaVersion = version -} - -// GraphNodeAttachProviderMetaConfigs impl -func (n *NodeAbstractResource) AttachProviderMetaConfigs(c map[addrs.Provider]*configs.ProviderMeta) { - n.ProviderMetas = c -} - -// GraphNodeDotter impl. -func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "box", - }, - } -} - -// writeResourceState ensures that a suitable resource-level state record is -// present in the state, if that's required for the "each mode" of that -// resource. -// -// This is important primarily for the situation where count = 0, since this -// eval is the only change we get to set the resource "each mode" to list -// in that case, allowing expression evaluation to see it as a zero-element list -// rather than as not set at all. -func (n *NodeAbstractResource) writeResourceState(ctx EvalContext, addr addrs.AbsResource) (diags tfdiags.Diagnostics) { - state := ctx.State() - - // We'll record our expansion decision in the shared "expander" object - // so that later operations (i.e. DynamicExpand and expression evaluation) - // can refer to it. Since this node represents the abstract module, we need - // to expand the module here to create all resources. - expander := ctx.InstanceExpander() - - switch { - case n.Config.Count != nil: - count, countDiags := evaluateCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return diags - } - - state.SetResourceProvider(addr, n.ResolvedProvider) - expander.SetResourceCount(addr.Module, n.Addr.Resource, count) - - case n.Config.ForEach != nil: - forEach, forEachDiags := evaluateForEachExpression(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return diags - } - - // This method takes care of all of the business logic of updating this - // while ensuring that any existing instances are preserved, etc. - state.SetResourceProvider(addr, n.ResolvedProvider) - expander.SetResourceForEach(addr.Module, n.Addr.Resource, forEach) - - default: - state.SetResourceProvider(addr, n.ResolvedProvider) - expander.SetResourceSingle(addr.Module, n.Addr.Resource) - } - - return diags -} - -// readResourceInstanceState reads the current object for a specific instance in -// the state. -func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - diags = diags.Append(err) - return nil, diags - } - - log.Printf("[TRACE] readResourceInstanceState: reading state for %s", addr) - - src := ctx.State().ResourceInstanceObject(addr, states.CurrentGen) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] readResourceInstanceState: no state present for %s", addr) - return nil, nil - } - - schema, currentVersion := (providerSchema).SchemaForResourceAddr(addr.Resource.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, diags.Append(fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", addr)) - } - src, upgradeDiags := upgradeResourceState(addr, provider, src, schema, currentVersion) - if n.Config != nil { - upgradeDiags = upgradeDiags.InConfigBody(n.Config.Config, addr.String()) - } - diags = diags.Append(upgradeDiags) - if diags.HasErrors() { - return nil, diags - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - diags = diags.Append(err) - } - - return obj, diags -} - -// readResourceInstanceStateDeposed reads the deposed object for a specific -// instance in the state. -func (n *NodeAbstractResource) readResourceInstanceStateDeposed(ctx EvalContext, addr addrs.AbsResourceInstance, key states.DeposedKey) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - diags = diags.Append(err) - return nil, diags - } - - if key == states.NotDeposed { - return nil, diags.Append(fmt.Errorf("readResourceInstanceStateDeposed used with no instance key; this is a bug in Terraform and should be reported")) - } - - log.Printf("[TRACE] readResourceInstanceStateDeposed: reading state for %s deposed object %s", addr, key) - - src := ctx.State().ResourceInstanceObject(addr, key) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] readResourceInstanceStateDeposed: no state present for %s deposed object %s", addr, key) - return nil, diags - } - - schema, currentVersion := (providerSchema).SchemaForResourceAddr(addr.Resource.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, diags.Append(fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", addr)) - - } - - src, upgradeDiags := upgradeResourceState(addr, provider, src, schema, currentVersion) - if n.Config != nil { - upgradeDiags = upgradeDiags.InConfigBody(n.Config.Config, addr.String()) - } - diags = diags.Append(upgradeDiags) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - diags = diags.Append(err) - } - - return obj, diags -} - -// graphNodesAreResourceInstancesInDifferentInstancesOfSameModule is an -// annoyingly-task-specific helper function that returns true if and only if -// the following conditions hold: -// - Both of the given vertices represent specific resource instances, as -// opposed to unexpanded resources or any other non-resource-related object. -// - The module instance addresses for both of the resource instances belong -// to the same static module. -// - The module instance addresses for both of the resource instances are -// not equal, indicating that they belong to different instances of the -// same module. -// -// This result can be used as a way to compensate for the effects of -// conservative analysis passes in our graph builders which make their -// decisions based only on unexpanded addresses, often so that they can behave -// correctly for interactions between expanded and not-yet-expanded objects. -// -// Callers of this helper function will typically skip adding an edge between -// the two given nodes if this function returns true. -func graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(a, b dag.Vertex) bool { - aRI, aOK := a.(GraphNodeResourceInstance) - bRI, bOK := b.(GraphNodeResourceInstance) - if !(aOK && bOK) { - return false - } - aModInst := aRI.ResourceInstanceAddr().Module - bModInst := bRI.ResourceInstanceAddr().Module - aMod := aModInst.Module() - bMod := bModInst.Module() - if !aMod.Equal(bMod) { - return false - } - return !aModInst.Equal(bModInst) -} diff --git a/internal/terraform/node_resource_abstract_instance.go b/internal/terraform/node_resource_abstract_instance.go deleted file mode 100644 index d14d0d92ff76..000000000000 --- a/internal/terraform/node_resource_abstract_instance.go +++ /dev/null @@ -1,2400 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/objchange" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// NodeAbstractResourceInstance represents a resource instance with no -// associated operations. It embeds NodeAbstractResource but additionally -// contains an instance key, used to identify one of potentially many -// instances that were created from a resource in configuration, e.g. using -// the "count" or "for_each" arguments. -type NodeAbstractResourceInstance struct { - NodeAbstractResource - Addr addrs.AbsResourceInstance - - // These are set via the AttachState method. - instanceState *states.ResourceInstance - - Dependencies []addrs.ConfigResource - - preDestroyRefresh bool -} - -// NewNodeAbstractResourceInstance creates an abstract resource instance graph -// node for the given absolute resource instance address. -func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { - // Due to the fact that we embed NodeAbstractResource, the given address - // actually ends up split between the resource address in the embedded - // object and the InstanceKey field in our own struct. The - // ResourceInstanceAddr method will stick these back together again on - // request. - r := NewNodeAbstractResource(addr.ContainingResource().Config()) - return &NodeAbstractResourceInstance{ - NodeAbstractResource: *r, - Addr: addr, - } -} - -func (n *NodeAbstractResourceInstance) Name() string { - return n.ResourceInstanceAddr().String() -} - -func (n *NodeAbstractResourceInstance) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeReferenceable -func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - addr := n.ResourceInstanceAddr() - return []addrs.Referenceable{ - addr.Resource, - - // A resource instance can also be referenced by the address of its - // containing resource, so that e.g. a reference to aws_instance.foo - // would match both aws_instance.foo[0] and aws_instance.foo[1]. - addr.ContainingResource().Resource, - } -} - -// GraphNodeReferencer -func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { - // If we have a configuration attached then we'll delegate to our - // embedded abstract resource, which knows how to extract dependencies - // from configuration. If there is no config, then the dependencies will - // be connected during destroy from those stored in the state. - if n.Config != nil { - if n.Schema == nil { - // We'll produce a log message about this out here so that - // we can include the full instance address, since the equivalent - // message in NodeAbstractResource.References cannot see it. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - return nil - } - return n.NodeAbstractResource.References() - } - - // If we have neither config nor state then we have no references. - return nil -} - -// StateDependencies returns the dependencies which will be saved in the state -// for managed resources, or the most current dependencies for data resources. -func (n *NodeAbstractResourceInstance) StateDependencies() []addrs.ConfigResource { - // Managed resources prefer the stored dependencies, to avoid possible - // conflicts in ordering when refactoring configuration. - if s := n.instanceState; s != nil { - if s.Current != nil { - return s.Current.Dependencies - } - } - - // If there are no stored dependencies, this is either a newly created - // managed resource, or a data source, and we can use the most recently - // calculated dependencies. - return n.Dependencies -} - -// GraphNodeResourceInstance -func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { - return n.Addr -} - -// GraphNodeAttachResourceState -func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { - if s == nil { - log.Printf("[WARN] attaching nil state to %s", n.Addr) - return - } - log.Printf("[TRACE] NodeAbstractResourceInstance.AttachResourceState for %s", n.Addr) - n.instanceState = s.Instance(n.Addr.Resource.Key) - n.storedProviderConfig = s.ProviderConfig -} - -// readDiff returns the planned change for a particular resource instance -// object. -func (n *NodeAbstractResourceInstance) readDiff(ctx EvalContext, providerSchema *ProviderSchema) (*plans.ResourceInstanceChange, error) { - changes := ctx.Changes() - addr := n.ResourceInstanceAddr() - - schema, _ := providerSchema.SchemaForResourceAddr(addr.Resource.Resource) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", addr.Resource.Resource.Type) - } - - gen := states.CurrentGen - csrc := changes.GetResourceInstanceChange(addr, gen) - if csrc == nil { - log.Printf("[TRACE] readDiff: No planned change recorded for %s", n.Addr) - return nil, nil - } - - change, err := csrc.Decode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to decode planned changes for %s: %s", n.Addr, err) - } - - log.Printf("[TRACE] readDiff: Read %s change from plan for %s", change.Action, n.Addr) - - return change, nil -} - -func (n *NodeAbstractResourceInstance) checkPreventDestroy(change *plans.ResourceInstanceChange) error { - if change == nil || n.Config == nil || n.Config.Managed == nil { - return nil - } - - preventDestroy := n.Config.Managed.PreventDestroy - - if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Instance cannot be destroyed", - Detail: fmt.Sprintf( - "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", - n.Addr.String(), - ), - Subject: &n.Config.DeclRange, - }) - return diags.Err() - } - - return nil -} - -// preApplyHook calls the pre-Apply hook -func (n *NodeAbstractResourceInstance) preApplyHook(ctx EvalContext, change *plans.ResourceInstanceChange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - if change == nil { - panic(fmt.Sprintf("preApplyHook for %s called with nil Change", n.Addr)) - } - - // Only managed resources have user-visible apply actions. - if n.Addr.Resource.Resource.Mode == addrs.ManagedResourceMode { - priorState := change.Before - plannedNewState := change.After - - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(n.Addr, change.DeposedKey.Generation(), change.Action, priorState, plannedNewState) - })) - if diags.HasErrors() { - return diags - } - } - - return nil -} - -// postApplyHook calls the post-Apply hook -func (n *NodeAbstractResourceInstance) postApplyHook(ctx EvalContext, state *states.ResourceInstanceObject, err error) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // Only managed resources have user-visible apply actions. - if n.Addr.Resource.Resource.Mode == addrs.ManagedResourceMode { - var newState cty.Value - if state != nil { - newState = state.Value - } else { - newState = cty.NullVal(cty.DynamicPseudoType) - } - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(n.Addr, nil, newState, err) - })) - } - - return diags -} - -type phaseState int - -const ( - workingState phaseState = iota - refreshState - prevRunState -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type phaseState - -// writeResourceInstanceState saves the given object as the current object for -// the selected resource instance. -// -// dependencies is a parameter, instead of those directly attacted to the -// NodeAbstractResourceInstance, because we don't write dependencies for -// datasources. -// -// targetState determines which context state we're writing to during plan. The -// default is the global working state. -func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject, targetState phaseState) error { - return n.writeResourceInstanceStateImpl(ctx, states.NotDeposed, obj, targetState) -} - -func (n *NodeAbstractResourceInstance) writeResourceInstanceStateDeposed(ctx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error { - if deposedKey == states.NotDeposed { - // Bail out to avoid silently doing something other than what the - // caller seems to have intended. - panic("trying to write current state object using writeResourceInstanceStateDeposed") - } - return n.writeResourceInstanceStateImpl(ctx, deposedKey, obj, targetState) -} - -// (this is the private common body of both writeResourceInstanceState and -// writeResourceInstanceStateDeposed. Don't call it directly; instead, use -// one of the two wrappers to be explicit about which of the instance's -// objects you are intending to write. -func (n *NodeAbstractResourceInstance) writeResourceInstanceStateImpl(ctx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error { - absAddr := n.Addr - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return err - } - logFuncName := "NodeAbstractResouceInstance.writeResourceInstanceState" - if deposedKey == states.NotDeposed { - log.Printf("[TRACE] %s to %s for %s", logFuncName, targetState, absAddr) - } else { - logFuncName = "NodeAbstractResouceInstance.writeResourceInstanceStateDeposed" - log.Printf("[TRACE] %s to %s for %s (deposed key %s)", logFuncName, targetState, absAddr, deposedKey) - } - - var state *states.SyncState - switch targetState { - case workingState: - state = ctx.State() - case refreshState: - state = ctx.RefreshState() - case prevRunState: - state = ctx.PrevRunState() - default: - panic(fmt.Sprintf("unsupported phaseState value %#v", targetState)) - } - if state == nil { - // Should not happen, because we shouldn't ever try to write to - // a state that isn't applicable to the current operation. - // (We can also get in here for unit tests which are using - // EvalContextMock but not populating PrevRunStateState with - // a suitable state object.) - return fmt.Errorf("state of type %s is not applicable to the current operation; this is a bug in Terraform", targetState) - } - - // In spite of the name, this function also handles the non-deposed case - // via the writeResourceInstanceState wrapper, by setting deposedKey to - // the NotDeposed value (the zero value of DeposedKey). - var write func(src *states.ResourceInstanceObjectSrc) - if deposedKey == states.NotDeposed { - write = func(src *states.ResourceInstanceObjectSrc) { - state.SetResourceInstanceCurrent(absAddr, src, n.ResolvedProvider) - } - } else { - write = func(src *states.ResourceInstanceObjectSrc) { - state.SetResourceInstanceDeposed(absAddr, deposedKey, src, n.ResolvedProvider) - } - } - - if obj == nil || obj.Value.IsNull() { - // No need to encode anything: we'll just write it directly. - write(nil) - log.Printf("[TRACE] %s: removing state object for %s", logFuncName, absAddr) - return nil - } - - if providerSchema == nil { - // Should never happen, unless our state object is nil - panic("writeResourceInstanceStateImpl used with nil ProviderSchema") - } - - if obj != nil { - log.Printf("[TRACE] %s: writing state object for %s", logFuncName, absAddr) - } else { - log.Printf("[TRACE] %s: removing state object for %s", logFuncName, absAddr) - } - - schema, currentVersion := (*providerSchema).SchemaForResourceAddr(absAddr.ContainingResource().Resource) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - write(src) - return nil -} - -// planDestroy returns a plain destroy diff. -func (n *NodeAbstractResourceInstance) planDestroy(ctx EvalContext, currentState *states.ResourceInstanceObject, deposedKey states.DeposedKey) (*plans.ResourceInstanceChange, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var plan *plans.ResourceInstanceChange - - absAddr := n.Addr - - if n.ResolvedProvider.Provider.Type == "" { - if deposedKey == "" { - panic(fmt.Sprintf("planDestroy for %s does not have ProviderAddr set", absAddr)) - } else { - panic(fmt.Sprintf("planDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, deposedKey)) - } - } - - // If there is no state or our attributes object is null then we're already - // destroyed. - if currentState == nil || currentState.Value.IsNull() { - // We still need to generate a NoOp change, because that allows - // outside consumers of the plan to distinguish between us affirming - // that we checked something and concluded no changes were needed - // vs. that something being entirely excluded e.g. due to -target. - noop := &plans.ResourceInstanceChange{ - Addr: absAddr, - PrevRunAddr: n.prevRunAddr(ctx), - DeposedKey: deposedKey, - Change: plans.Change{ - Action: plans.NoOp, - Before: cty.NullVal(cty.DynamicPseudoType), - After: cty.NullVal(cty.DynamicPseudoType), - }, - ProviderAddr: n.ResolvedProvider, - } - return noop, nil - } - - unmarkedPriorVal, _ := currentState.Value.UnmarkDeep() - - // The config and new value are null to signify that this is a destroy - // operation. - nullVal := cty.NullVal(unmarkedPriorVal.Type()) - - provider, _, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return plan, diags.Append(err) - } - - metaConfigVal, metaDiags := n.providerMetas(ctx) - diags = diags.Append(metaDiags) - if diags.HasErrors() { - return plan, diags - } - - // Allow the provider to check the destroy plan, and insert any necessary - // private data. - resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Resource.Type, - Config: nullVal, - PriorState: unmarkedPriorVal, - ProposedNewState: nullVal, - PriorPrivate: currentState.Private, - ProviderMeta: metaConfigVal, - }) - - // We may not have a config for all destroys, but we want to reference it in - // the diagnostics if we do. - if n.Config != nil { - resp.Diagnostics = resp.Diagnostics.InConfigBody(n.Config.Config, n.Addr.String()) - } - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return plan, diags - } - - // Check that the provider returned a null value here, since that is the - // only valid value for a destroy plan. - if !resp.PlannedState.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned a non-null destroy value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider.Provider, n.Addr), - ), - ) - return plan, diags - } - - // Plan is always the same for a destroy. - plan = &plans.ResourceInstanceChange{ - Addr: absAddr, - PrevRunAddr: n.prevRunAddr(ctx), - DeposedKey: deposedKey, - Change: plans.Change{ - Action: plans.Delete, - Before: currentState.Value, - After: nullVal, - }, - Private: resp.PlannedPrivate, - ProviderAddr: n.ResolvedProvider, - } - - return plan, diags -} - -// writeChange saves a planned change for an instance object into the set of -// global planned changes. -func (n *NodeAbstractResourceInstance) writeChange(ctx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error { - changes := ctx.Changes() - - if change == nil { - // Caller sets nil to indicate that we need to remove a change from - // the set of changes. - gen := states.CurrentGen - if deposedKey != states.NotDeposed { - gen = deposedKey - } - changes.RemoveResourceInstanceChange(n.Addr, gen) - return nil - } - - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return err - } - - if change.Addr.String() != n.Addr.String() || change.DeposedKey != deposedKey { - // Should never happen, and indicates a bug in the caller. - panic("inconsistent address and/or deposed key in writeChange") - } - if change.PrevRunAddr.Resource.Resource.Type == "" { - // Should never happen, and indicates a bug in the caller. - // (The change.Encode function actually has its own fixup to just - // quietly make this match change.Addr in the incorrect case, but we - // intentionally panic here in order to catch incorrect callers where - // the stack trace will hopefully be actually useful. The tolerance - // at the next layer down is mainly to accommodate sloppy input in - // older tests.) - panic("unpopulated ResourceInstanceChange.PrevRunAddr in writeChange") - } - - ri := n.Addr.Resource - schema, _ := providerSchema.SchemaForResourceAddr(ri.Resource) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return fmt.Errorf("provider does not support resource type %q", ri.Resource.Type) - } - - csrc, err := change.Encode(schema.ImpliedType()) - if err != nil { - return fmt.Errorf("failed to encode planned changes for %s: %s", n.Addr, err) - } - - changes.AppendResourceInstanceChange(csrc) - if deposedKey == states.NotDeposed { - log.Printf("[TRACE] writeChange: recorded %s change for %s", change.Action, n.Addr) - } else { - log.Printf("[TRACE] writeChange: recorded %s change for %s deposed object %s", change.Action, n.Addr, deposedKey) - } - - return nil -} - -// refresh does a refresh for a resource -func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey states.DeposedKey, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - absAddr := n.Addr - if deposedKey == states.NotDeposed { - log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s", absAddr) - } else { - log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s (deposed object %s)", absAddr, deposedKey) - } - provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return state, diags.Append(err) - } - // If we have no state, we don't do any refreshing - if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", absAddr) - return state, diags - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.Resource.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type)) - return state, diags - } - - metaConfigVal, metaDiags := n.providerMetas(ctx) - diags = diags.Append(metaDiags) - if diags.HasErrors() { - return state, diags - } - - hookGen := states.CurrentGen - if deposedKey != states.NotDeposed { - hookGen = deposedKey - } - - // Call pre-refresh hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(absAddr, hookGen, state.Value) - })) - if diags.HasErrors() { - return state, diags - } - - // Refresh! - priorVal := state.Value - - // Unmarked before sending to provider - var priorPaths []cty.PathValueMarks - if priorVal.ContainsMarked() { - priorVal, priorPaths = priorVal.UnmarkDeepWithPaths() - } - - providerReq := providers.ReadResourceRequest{ - TypeName: n.Addr.Resource.Resource.Type, - PriorState: priorVal, - Private: state.Private, - ProviderMeta: metaConfigVal, - } - - resp := provider.ReadResource(providerReq) - if n.Config != nil { - resp.Diagnostics = resp.Diagnostics.InConfigBody(n.Config.Config, n.Addr.String()) - } - - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return state, diags - } - - if resp.NewState == cty.NilVal { - // This ought not to happen in real cases since it's not possible to - // send NilVal over the plugin RPC channel, but it can come up in - // tests due to sloppy mocking. - panic("new state is cty.NilVal") - } - - for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider.Provider.String(), absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return state, diags - } - - newState := objchange.NormalizeObjectFromLegacySDK(resp.NewState, schema) - if !newState.RawEquals(resp.NewState) { - // We had to fix up this object in some way, and we still need to - // accept any changes for compatibility, so all we can do is log a - // warning about the change. - log.Printf("[WARN] Provider %q produced an invalid new value containing null blocks for %q during refresh\n", n.ResolvedProvider.Provider, n.Addr) - } - - ret := state.DeepCopy() - ret.Value = newState - ret.Private = resp.Private - - // We have no way to exempt provider using the legacy SDK from this check, - // so we can only log inconsistencies with the updated state values. - // In most cases these are not errors anyway, and represent "drift" from - // external changes which will be handled by the subsequent plan. - if errs := objchange.AssertObjectCompatible(schema, priorVal, ret.Value); len(errs) > 0 { - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s during refresh.", n.ResolvedProvider.Provider.String(), absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } - - // Call post-refresh hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, hookGen, priorVal, ret.Value) - })) - if diags.HasErrors() { - return ret, diags - } - - // Mark the value if necessary - if len(priorPaths) > 0 { - ret.Value = ret.Value.MarkWithPaths(priorPaths) - } - - return ret, diags -} - -func (n *NodeAbstractResourceInstance) plan( - ctx EvalContext, - plannedChange *plans.ResourceInstanceChange, - currentState *states.ResourceInstanceObject, - createBeforeDestroy bool, - forceReplace []addrs.AbsResourceInstance) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var state *states.ResourceInstanceObject - var plan *plans.ResourceInstanceChange - var keyData instances.RepetitionData - - config := *n.Config - resource := n.Addr.Resource.Resource - provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return plan, state, keyData, diags.Append(err) - } - - checkRuleSeverity := tfdiags.Error - if n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } - - if plannedChange != nil { - // If we already planned the action, we stick to that plan - createBeforeDestroy = plannedChange.Action == plans.CreateThenDelete - } - - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("provider schema is unavailable for %s", n.Addr)) - return plan, state, keyData, diags - } - - // Evaluate the configuration - schema, _ := providerSchema.SchemaForResourceAddr(resource) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider does not support resource type %q", resource.Type)) - return plan, state, keyData, diags - } - - forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) - - keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) - - checkDiags := evalCheckRules( - addrs.ResourcePrecondition, - n.Config.Preconditions, - ctx, n.Addr, keyData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - if diags.HasErrors() { - return plan, state, keyData, diags // failed preconditions prevent further evaluation - } - - // If we have a previous plan and the action was a noop, then the only - // reason we're in this method was to evaluate the preconditions. There's - // no need to re-plan this resource. - if plannedChange != nil && plannedChange.Action == plans.NoOp { - return plannedChange, currentState.DeepCopy(), keyData, diags - } - - origConfigVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return plan, state, keyData, diags - } - - metaConfigVal, metaDiags := n.providerMetas(ctx) - diags = diags.Append(metaDiags) - if diags.HasErrors() { - return plan, state, keyData, diags - } - - var priorVal cty.Value - var priorValTainted cty.Value - var priorPrivate []byte - if currentState != nil { - if currentState.Status != states.ObjectTainted { - priorVal = currentState.Value - priorPrivate = currentState.Private - } else { - // If the prior state is tainted then we'll proceed below like - // we're creating an entirely new object, but then turn it into - // a synthetic "Replace" change at the end, creating the same - // result as if the provider had marked at least one argument - // change as "requires replacement". - priorValTainted = currentState.Value - priorVal = cty.NullVal(schema.ImpliedType()) - } - } else { - priorVal = cty.NullVal(schema.ImpliedType()) - } - - log.Printf("[TRACE] Re-validating config for %q", n.Addr) - // Allow the provider to validate the final set of values. The config was - // statically validated early on, but there may have been unknown values - // which the provider could not validate at the time. - // - // TODO: It would be more correct to validate the config after - // ignore_changes has been applied, but the current implementation cannot - // exclude computed-only attributes when given the `all` option. - - // we must unmark and use the original config, since the ignore_changes - // handling below needs access to the marks. - unmarkedConfigVal, _ := origConfigVal.UnmarkDeep() - validateResp := provider.ValidateResourceConfig( - providers.ValidateResourceConfigRequest{ - TypeName: n.Addr.Resource.Resource.Type, - Config: unmarkedConfigVal, - }, - ) - diags = diags.Append(validateResp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) - if diags.HasErrors() { - return plan, state, keyData, diags - } - - // ignore_changes is meant to only apply to the configuration, so it must - // be applied before we generate a plan. This ensures the config used for - // the proposed value, the proposed value itself, and the config presented - // to the provider in the PlanResourceChange request all agree on the - // starting values. - // Here we operate on the marked values, so as to revert any changes to the - // marks as well as the value. - configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(priorVal, origConfigVal, schema) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return plan, state, keyData, diags - } - - // Create an unmarked version of our config val and our prior val. - // Store the paths for the config val to re-mark after we've sent things - // over the wire. - unmarkedConfigVal, unmarkedPaths := configValIgnored.UnmarkDeepWithPaths() - unmarkedPriorVal, priorPaths := priorVal.UnmarkDeepWithPaths() - - proposedNewVal := objchange.ProposedNew(schema, unmarkedPriorVal, unmarkedConfigVal) - - // Call pre-diff hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Addr, states.CurrentGen, priorVal, proposedNewVal) - })) - if diags.HasErrors() { - return plan, state, keyData, diags - } - - resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Resource.Type, - Config: unmarkedConfigVal, - PriorState: unmarkedPriorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: priorPrivate, - ProviderMeta: metaConfigVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) - if diags.HasErrors() { - return plan, state, keyData, diags - } - - plannedNewVal := resp.PlannedState - plannedPrivate := resp.PlannedPrivate - - if plannedNewVal == cty.NilVal { - // Should never happen. Since real-world providers return via RPC a nil - // is always a bug in the client-side stub. This is more likely caused - // by an incompletely-configured mock provider in tests, though. - panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", n.Addr)) - } - - // We allow the planned new value to disagree with configuration _values_ - // here, since that allows the provider to do special logic like a - // DiffSuppressFunc, but we still require that the provider produces - // a value whose type conforms to the schema. - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider.Provider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), - ), - )) - } - if diags.HasErrors() { - return plan, state, keyData, diags - } - - if errs := objchange.AssertPlanValid(schema, unmarkedPriorVal, unmarkedConfigVal, plannedNewVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, - "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", - n.ResolvedProvider.Provider, n.Addr, - ) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider.Provider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), - ), - )) - } - return plan, state, keyData, diags - } - } - - if resp.LegacyTypeSystem { - // Because we allow legacy providers to depart from the contract and - // return changes to non-computed values, the plan response may have - // altered values that were already suppressed with ignore_changes. - // A prime example of this is where providers attempt to obfuscate - // config data by turning the config value into a hash and storing the - // hash value in the state. There are enough cases of this in existing - // providers that we must accommodate the behavior for now, so for - // ignore_changes to work at all on these values, we will revert the - // ignored values once more. - // A nil schema is passed to processIgnoreChanges to indicate that we - // don't want to fixup a config value according to the schema when - // ignoring "all", rather we are reverting provider imposed changes. - plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(unmarkedPriorVal, plannedNewVal, nil) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return plan, state, keyData, diags - } - } - - // Add the marks back to the planned new value -- this must happen after ignore changes - // have been processed - unmarkedPlannedNewVal := plannedNewVal - if len(unmarkedPaths) > 0 { - plannedNewVal = plannedNewVal.MarkWithPaths(unmarkedPaths) - } - - // The provider produces a list of paths to attributes whose changes mean - // that we must replace rather than update an existing remote object. - // However, we only need to do that if the identified attributes _have_ - // actually changed -- particularly after we may have undone some of the - // changes in processIgnoreChanges -- so now we'll filter that list to - // include only where changes are detected. - reqRep := cty.NewPathSet() - if len(resp.RequiresReplace) > 0 { - for _, path := range resp.RequiresReplace { - if priorVal.IsNull() { - // If prior is null then we don't expect any RequiresReplace at all, - // because this is a Create action. - continue - } - - priorChangedVal, priorPathDiags := hcl.ApplyPath(unmarkedPriorVal, path, nil) - plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) - if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { - // This means the path was invalid in both the prior and new - // values, which is an error with the provider itself. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider.Provider, n.Addr, path, - ), - )) - continue - } - - // Make sure we have valid Values for both values. - // Note: if the opposing value was of the type - // cty.DynamicPseudoType, the type assigned here may not exactly - // match the schema. This is fine here, since we're only going to - // check for equality, but if the NullVal is to be used, we need to - // check the schema for th true type. - switch { - case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: - // this should never happen without ApplyPath errors above - panic("requires replace path returned 2 nil values") - case priorChangedVal == cty.NilVal: - priorChangedVal = cty.NullVal(plannedChangedVal.Type()) - case plannedChangedVal == cty.NilVal: - plannedChangedVal = cty.NullVal(priorChangedVal.Type()) - } - - // Unmark for this value for the equality test. If only sensitivity has changed, - // this does not require an Update or Replace - unmarkedPlannedChangedVal, _ := plannedChangedVal.UnmarkDeep() - eqV := unmarkedPlannedChangedVal.Equals(priorChangedVal) - if !eqV.IsKnown() || eqV.False() { - reqRep.Add(path) - } - } - if diags.HasErrors() { - return plan, state, keyData, diags - } - } - - // The user might also ask us to force replacing a particular resource - // instance, regardless of whether the provider thinks it needs replacing. - // For example, users typically do this if they learn a particular object - // has become degraded in an immutable infrastructure scenario and so - // replacing it with a new object is a viable repair path. - matchedForceReplace := false - for _, candidateAddr := range forceReplace { - if candidateAddr.Equal(n.Addr) { - matchedForceReplace = true - break - } - - // For "force replace" purposes we require an exact resource instance - // address to match. If a user forgets to include the instance key - // for a multi-instance resource then it won't match here, but we - // have an earlier check in NodePlannableResource.Execute that should - // prevent us from getting here in that case. - } - - // Unmark for this test for value equality. - eqV := unmarkedPlannedNewVal.Equals(unmarkedPriorVal) - eq := eqV.IsKnown() && eqV.True() - - var action plans.Action - var actionReason plans.ResourceInstanceChangeActionReason - switch { - case priorVal.IsNull(): - action = plans.Create - case eq && !matchedForceReplace: - action = plans.NoOp - case matchedForceReplace || !reqRep.Empty(): - // If the user "forced replace" of this instance of if there are any - // "requires replace" paths left _after our filtering above_ then this - // is a replace action. - if createBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - switch { - case matchedForceReplace: - actionReason = plans.ResourceInstanceReplaceByRequest - case !reqRep.Empty(): - actionReason = plans.ResourceInstanceReplaceBecauseCannotUpdate - } - default: - action = plans.Update - // "Delete" is never chosen here, because deletion plans are always - // created more directly elsewhere, such as in "orphan" handling. - } - - if action.IsReplace() { - // In this strange situation we want to produce a change object that - // shows our real prior object but has a _new_ object that is built - // from a null prior object, since we're going to delete the one - // that has all the computed values on it. - // - // Therefore we'll ask the provider to plan again here, giving it - // a null object for the prior, and then we'll meld that with the - // _actual_ prior state to produce a correctly-shaped replace change. - // The resulting change should show any computed attributes changing - // from known prior values to unknown values, unless the provider is - // able to predict new values for any of these computed attributes. - nullPriorVal := cty.NullVal(schema.ImpliedType()) - - // Since there is no prior state to compare after replacement, we need - // a new unmarked config from our original with no ignored values. - unmarkedConfigVal := origConfigVal - if origConfigVal.ContainsMarked() { - unmarkedConfigVal, _ = origConfigVal.UnmarkDeep() - } - - // create a new proposed value from the null state and the config - proposedNewVal = objchange.ProposedNew(schema, nullPriorVal, unmarkedConfigVal) - - resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Resource.Type, - Config: unmarkedConfigVal, - PriorState: nullPriorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: plannedPrivate, - ProviderMeta: metaConfigVal, - }) - // We need to tread carefully here, since if there are any warnings - // in here they probably also came out of our previous call to - // PlanResourceChange above, and so we don't want to repeat them. - // Consequently, we break from the usual pattern here and only - // append these new diagnostics if there's at least one error inside. - if resp.Diagnostics.HasErrors() { - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) - return plan, state, keyData, diags - } - plannedNewVal = resp.PlannedState - plannedPrivate = resp.PlannedPrivate - - if len(unmarkedPaths) > 0 { - plannedNewVal = plannedNewVal.MarkWithPaths(unmarkedPaths) - } - - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider.Provider, n.Addr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return plan, state, keyData, diags - } - } - - // If our prior value was tainted then we actually want this to appear - // as a replace change, even though so far we've been treating it as a - // create. - if action == plans.Create && !priorValTainted.IsNull() { - if createBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - priorVal = priorValTainted - actionReason = plans.ResourceInstanceReplaceBecauseTainted - } - - // If we plan to write or delete sensitive paths from state, - // this is an Update action - if action == plans.NoOp && !marksEqual(unmarkedPaths, priorPaths) { - action = plans.Update - } - - // As a special case, if we have a previous diff (presumably from the plan - // phases, whereas we're now in the apply phase) and it was for a replace, - // we've already deleted the original object from state by the time we - // get here and so we would've ended up with a _create_ action this time, - // which we now need to paper over to get a result consistent with what - // we originally intended. - if plannedChange != nil { - prevChange := *plannedChange - if prevChange.Action.IsReplace() && action == plans.Create { - log.Printf("[TRACE] plan: %s treating Create change as %s change to match with earlier plan", n.Addr, prevChange.Action) - action = prevChange.Action - priorVal = prevChange.Before - } - } - - // Call post-refresh hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Addr, states.CurrentGen, action, priorVal, plannedNewVal) - })) - if diags.HasErrors() { - return plan, state, keyData, diags - } - - // Update our return plan - plan = &plans.ResourceInstanceChange{ - Addr: n.Addr, - PrevRunAddr: n.prevRunAddr(ctx), - Private: plannedPrivate, - ProviderAddr: n.ResolvedProvider, - Change: plans.Change{ - Action: action, - Before: priorVal, - // Pass the marked planned value through in our change - // to propogate through evaluation. - // Marks will be removed when encoding. - After: plannedNewVal, - }, - ActionReason: actionReason, - RequiredReplace: reqRep, - } - - // Update our return state - state = &states.ResourceInstanceObject{ - // We use the special "planned" status here to note that this - // object's value is not yet complete. Objects with this status - // cannot be used during expression evaluation, so the caller - // must _also_ record the returned change in the active plan, - // which the expression evaluator will use in preference to this - // incomplete value recorded in the state. - Status: states.ObjectPlanned, - Value: plannedNewVal, - Private: plannedPrivate, - } - - return plan, state, keyData, diags -} - -func (n *NodeAbstractResource) processIgnoreChanges(prior, config cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - // ignore_changes only applies when an object already exists, since we - // can't ignore changes to a thing we've not created yet. - if prior.IsNull() { - return config, nil - } - - ignoreChanges := traversalsToPaths(n.Config.Managed.IgnoreChanges) - ignoreAll := n.Config.Managed.IgnoreAllChanges - - if len(ignoreChanges) == 0 && !ignoreAll { - return config, nil - } - - if ignoreAll { - // Legacy providers need up to clean up their invalid plans and ensure - // no changes are passed though, but that also means making an invalid - // config with computed values. In that case we just don't supply a - // schema and return the prior val directly. - if schema == nil { - return prior, nil - } - - // If we are trying to ignore all attribute changes, we must filter - // computed attributes out from the prior state to avoid sending them - // to the provider as if they were included in the configuration. - ret, _ := cty.Transform(prior, func(path cty.Path, v cty.Value) (cty.Value, error) { - attr := schema.AttributeByPath(path) - if attr != nil && attr.Computed && !attr.Optional { - return cty.NullVal(v.Type()), nil - } - - return v, nil - }) - - return ret, nil - } - - if prior.IsNull() || config.IsNull() { - // Ignore changes doesn't apply when we're creating for the first time. - // Proposed should never be null here, but if it is then we'll just let it be. - return config, nil - } - - ret, diags := processIgnoreChangesIndividual(prior, config, ignoreChanges) - - return ret, diags -} - -// Convert the hcl.Traversal values we get form the configuration to the -// cty.Path values we need to operate on the cty.Values -func traversalsToPaths(traversals []hcl.Traversal) []cty.Path { - paths := make([]cty.Path, len(traversals)) - for i, traversal := range traversals { - path := traversalToPath(traversal) - paths[i] = path - } - return paths -} - -func traversalToPath(traversal hcl.Traversal) cty.Path { - path := make(cty.Path, len(traversal)) - for si, step := range traversal { - switch ts := step.(type) { - case hcl.TraverseRoot: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseAttr: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseIndex: - path[si] = cty.IndexStep{ - Key: ts.Key, - } - default: - panic(fmt.Sprintf("unsupported traversal step %#v", step)) - } - } - return path -} - -func processIgnoreChangesIndividual(prior, config cty.Value, ignoreChangesPath []cty.Path) (cty.Value, tfdiags.Diagnostics) { - type ignoreChange struct { - // Path is the full path, minus any trailing map index - path cty.Path - // Value is the value we are to retain at the above path. If there is a - // key value, this must be a map and the desired value will be at the - // key index. - value cty.Value - // Key is the index key if the ignored path ends in a map index. - key cty.Value - } - var ignoredValues []ignoreChange - - // Find the actual changes first and store them in the ignoreChange struct. - // If the change was to a map value, and the key doesn't exist in the - // config, it would never be visited in the transform walk. - for _, icPath := range ignoreChangesPath { - key := cty.NullVal(cty.String) - // check for a map index, since maps are the only structure where we - // could have invalid path steps. - last, ok := icPath[len(icPath)-1].(cty.IndexStep) - if ok { - if last.Key.Type() == cty.String { - icPath = icPath[:len(icPath)-1] - key = last.Key - } - } - - // The structure should have been validated already, and we already - // trimmed the trailing map index. Any other intermediate index error - // means we wouldn't be able to apply the value below, so no need to - // record this. - p, err := icPath.Apply(prior) - if err != nil { - continue - } - c, err := icPath.Apply(config) - if err != nil { - continue - } - - // If this is a map, it is checking the entire map value for equality - // rather than the individual key. This means that the change is stored - // here even if our ignored key doesn't change. That is OK since it - // won't cause any changes in the transformation, but allows us to skip - // breaking up the maps and checking for key existence here too. - if !p.RawEquals(c) { - // there a change to ignore at this path, store the prior value - ignoredValues = append(ignoredValues, ignoreChange{icPath, p, key}) - } - } - - if len(ignoredValues) == 0 { - return config, nil - } - - ret, _ := cty.Transform(config, func(path cty.Path, v cty.Value) (cty.Value, error) { - // Easy path for when we are only matching the entire value. The only - // values we break up for inspection are maps. - if !v.Type().IsMapType() { - for _, ignored := range ignoredValues { - if path.Equals(ignored.path) { - return ignored.value, nil - } - } - return v, nil - } - // We now know this must be a map, so we need to accumulate the values - // key-by-key. - - if !v.IsNull() && !v.IsKnown() { - // since v is not known, we cannot ignore individual keys - return v, nil - } - - // The map values will remain as cty values, so we only need to store - // the marks from the outer map itself - v, vMarks := v.Unmark() - - // The configMap is the current configuration value, which we will - // mutate based on the ignored paths and the prior map value. - var configMap map[string]cty.Value - switch { - case v.IsNull() || v.LengthInt() == 0: - configMap = map[string]cty.Value{} - default: - configMap = v.AsValueMap() - } - - for _, ignored := range ignoredValues { - if !path.Equals(ignored.path) { - continue - } - - if ignored.key.IsNull() { - // The map address is confirmed to match at this point, - // so if there is no key, we want the entire map and can - // stop accumulating values. - return ignored.value, nil - } - // Now we know we are ignoring a specific index of this map, so get - // the config map and modify, add, or remove the desired key. - - // We also need to create a prior map, so we can check for - // existence while getting the value, because Value.Index will - // return null for a key with a null value and for a non-existent - // key. - var priorMap map[string]cty.Value - - // We need to drop the marks from the ignored map for handling. We - // don't need to store these, as we now know the ignored value is - // only within the map, not the map itself. - ignoredVal, _ := ignored.value.Unmark() - - switch { - case ignored.value.IsNull() || ignoredVal.LengthInt() == 0: - priorMap = map[string]cty.Value{} - default: - priorMap = ignoredVal.AsValueMap() - } - - key := ignored.key.AsString() - priorElem, keep := priorMap[key] - - switch { - case !keep: - // this didn't exist in the old map value, so we're keeping the - // "absence" of the key by removing it from the config - delete(configMap, key) - default: - configMap[key] = priorElem - } - } - - var newVal cty.Value - switch { - case len(configMap) > 0: - newVal = cty.MapVal(configMap) - case v.IsNull(): - // if the config value was null, and no values remain in the map, - // reset the value to null. - newVal = v - default: - newVal = cty.MapValEmpty(v.Type().ElementType()) - } - - if len(vMarks) > 0 { - newVal = newVal.WithMarks(vMarks) - } - - return newVal, nil - }) - return ret, nil -} - -// readDataSource handles everything needed to call ReadDataSource on the provider. -// A previously evaluated configVal can be passed in, or a new one is generated -// from the resource configuration. -func (n *NodeAbstractResourceInstance) readDataSource(ctx EvalContext, configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var newVal cty.Value - - config := *n.Config - - provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return newVal, diags - } - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) - return newVal, diags - } - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) - return newVal, diags - } - - metaConfigVal, metaDiags := n.providerMetas(ctx) - diags = diags.Append(metaDiags) - if diags.HasErrors() { - return newVal, diags - } - - // Unmark before sending to provider, will re-mark before returning - var pvm []cty.PathValueMarks - configVal, pvm = configVal.UnmarkDeepWithPaths() - - log.Printf("[TRACE] readDataSource: Re-validating config for %s", n.Addr) - validateResp := provider.ValidateDataResourceConfig( - providers.ValidateDataResourceConfigRequest{ - TypeName: n.Addr.ContainingResource().Resource.Type, - Config: configVal, - }, - ) - diags = diags.Append(validateResp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) - if diags.HasErrors() { - return newVal, diags - } - - // If we get down here then our configuration is complete and we're read - // to actually call the provider to read the data. - log.Printf("[TRACE] readDataSource: %s configuration is complete, so reading from provider", n.Addr) - - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(n.Addr, states.CurrentGen, plans.Read, cty.NullVal(configVal.Type()), configVal) - })) - if diags.HasErrors() { - return newVal, diags - } - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.ContainingResource().Resource.Type, - Config: configVal, - ProviderMeta: metaConfigVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) - if diags.HasErrors() { - return newVal, diags - } - newVal = resp.State - if newVal == cty.NilVal { - // This can happen with incompletely-configured mocks. We'll allow it - // and treat it as an alias for a properly-typed null value. - newVal = cty.NullVal(schema.ImpliedType()) - } - - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), - ), - )) - } - if diags.HasErrors() { - return newVal, diags - } - - if newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced null object", - fmt.Sprintf( - "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider, n.Addr, - ), - )) - } - - if !newVal.IsNull() && !newVal.IsWhollyKnown() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider, n.Addr, - ), - )) - - // We'll still save the object, but we need to eliminate any unknown - // values first because we can't serialize them in the state file. - // Note that this may cause set elements to be coalesced if they - // differed only by having unknown values, but we don't worry about - // that here because we're saving the value only for inspection - // purposes; the error we added above will halt the graph walk. - newVal = cty.UnknownAsNull(newVal) - } - - if len(pvm) > 0 { - newVal = newVal.MarkWithPaths(pvm) - } - - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(n.Addr, states.CurrentGen, newVal, diags.Err()) - })) - - return newVal, diags -} - -func (n *NodeAbstractResourceInstance) providerMetas(ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return metaConfigVal, diags.Append(err) - } - if providerSchema == nil { - return metaConfigVal, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) - } - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ResolvedProvider.Provider]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if providerSchema.ProviderMeta == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ResolvedProvider.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr.Resource), - Subject: &m.ProviderRange, - }) - } else { - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, providerSchema.ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - } - } - } - return metaConfigVal, diags -} - -// planDataSource deals with the main part of the data resource lifecycle: -// either actually reading from the data source or generating a plan to do so. -// -// currentState is the current state for the data source, and the new state is -// returned. While data sources are read-only, we need to start with the prior -// state to determine if we have a change or not. If we needed to read a new -// value, but it still matches the previous state, then we can record a NoNop -// change. If the states don't match then we record a Read change so that the -// new value is applied to the state. -func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, checkRuleSeverity tfdiags.Severity, skipPlanChanges bool) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var keyData instances.RepetitionData - var configVal cty.Value - - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return nil, nil, keyData, diags.Append(err) - } - if providerSchema == nil { - return nil, nil, keyData, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) - } - - config := *n.Config - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) - return nil, nil, keyData, diags - } - - objTy := schema.ImpliedType() - priorVal := cty.NullVal(objTy) - - forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) - - checkDiags := evalCheckRules( - addrs.ResourcePrecondition, - n.Config.Preconditions, - ctx, n.Addr, keyData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - if diags.HasErrors() { - return nil, nil, keyData, diags // failed preconditions prevent further evaluation - } - - var configDiags tfdiags.Diagnostics - configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, nil, keyData, diags - } - - unmarkedConfigVal, configMarkPaths := configVal.UnmarkDeepWithPaths() - - configKnown := configVal.IsWhollyKnown() - depsPending := n.dependenciesHavePendingChanges(ctx) - // If our configuration contains any unknown values, or we depend on any - // unknown values then we must defer the read to the apply phase by - // producing a "Read" change for this resource, and a placeholder value for - // it in the state. - if depsPending || !configKnown { - // We can't plan any changes if we're only refreshing, so the only - // value we can set here is whatever was in state previously. - if skipPlanChanges { - plannedNewState := &states.ResourceInstanceObject{ - Value: priorVal, - Status: states.ObjectReady, - } - - return nil, plannedNewState, keyData, diags - } - - var reason plans.ResourceInstanceChangeActionReason - switch { - case !configKnown: - log.Printf("[TRACE] planDataSource: %s configuration not fully known yet, so deferring to apply phase", n.Addr) - reason = plans.ResourceInstanceReadBecauseConfigUnknown - case depsPending: - // NOTE: depsPending can be true at the same time as configKnown - // is false; configKnown takes precedence because it's more - // specific. - log.Printf("[TRACE] planDataSource: %s configuration is fully known, at least one dependency has changes pending", n.Addr) - reason = plans.ResourceInstanceReadBecauseDependencyPending - } - - proposedNewVal := objchange.PlannedDataResourceObject(schema, unmarkedConfigVal) - proposedNewVal = proposedNewVal.MarkWithPaths(configMarkPaths) - - // Apply detects that the data source will need to be read by the After - // value containing unknowns from PlanDataResourceObject. - plannedChange := &plans.ResourceInstanceChange{ - Addr: n.Addr, - PrevRunAddr: n.prevRunAddr(ctx), - ProviderAddr: n.ResolvedProvider, - Change: plans.Change{ - Action: plans.Read, - Before: priorVal, - After: proposedNewVal, - }, - ActionReason: reason, - } - - plannedNewState := &states.ResourceInstanceObject{ - Value: proposedNewVal, - Status: states.ObjectPlanned, - } - - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Addr, states.CurrentGen, plans.Read, priorVal, proposedNewVal) - })) - - return plannedChange, plannedNewState, keyData, diags - } - - // We have a complete configuration with no dependencies to wait on, so we - // can read the data source into the state. - newVal, readDiags := n.readDataSource(ctx, configVal) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return nil, nil, keyData, diags - } - - plannedNewState := &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - } - - return nil, plannedNewState, keyData, diags -} - -// dependenciesHavePendingChanges determines whether any managed resource the -// receiver depends on has a change pending in the plan, in which case we'd -// need to override the usual behavior of immediately reading from the data -// source where possible, and instead defer the read until the apply step. -func (n *NodeAbstractResourceInstance) dependenciesHavePendingChanges(ctx EvalContext) bool { - nModInst := n.Addr.Module - nMod := nModInst.Module() - - // Check and see if any depends_on dependencies have - // changes, since they won't show up as changes in the - // configuration. - changes := ctx.Changes() - - depsToUse := n.dependsOn - - if n.Addr.Resource.Resource.Mode == addrs.DataResourceMode { - if n.Config.HasCustomConditions() { - // For a data resource with custom conditions we need to look at - // the full set of resource dependencies -- both direct and - // indirect -- because an upstream update might be what's needed - // in order to make a condition pass. - depsToUse = n.Dependencies - } - } - - for _, d := range depsToUse { - if d.Resource.Mode == addrs.DataResourceMode { - // Data sources have no external side effects, so they pose a need - // to delay this read. If they do have a change planned, it must be - // because of a dependency on a managed resource, in which case - // we'll also encounter it in this list of dependencies. - continue - } - - for _, change := range changes.GetChangesForConfigResource(d) { - changeModInst := change.Addr.Module - changeMod := changeModInst.Module() - - if changeMod.Equal(nMod) && !changeModInst.Equal(nModInst) { - // Dependencies are tracked by configuration address, which - // means we may have changes from other instances of parent - // modules. The actual reference can only take effect within - // the same module instance, so skip any that aren't an exact - // match - continue - } - - if change != nil && change.Action != plans.NoOp { - return true - } - } - } - return false -} - -// apply deals with the main part of the data resource lifecycle: either -// actually reading from the data source or generating a plan to do so. -func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned *plans.ResourceInstanceChange) (*states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var keyData instances.RepetitionData - - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return nil, keyData, diags.Append(err) - } - if providerSchema == nil { - return nil, keyData, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) - } - - if planned != nil && planned.Action != plans.Read && planned.Action != plans.NoOp { - // If any other action gets in here then that's always a bug; this - // EvalNode only deals with reading. - diags = diags.Append(fmt.Errorf( - "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", - planned.Action, n.Addr, - )) - return nil, keyData, diags - } - - config := *n.Config - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) - return nil, keyData, diags - } - - forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData = EvalDataForInstanceKey(n.Addr.Resource.Key, forEach) - - checkDiags := evalCheckRules( - addrs.ResourcePrecondition, - n.Config.Preconditions, - ctx, n.Addr, keyData, - tfdiags.Error, - ) - diags = diags.Append(checkDiags) - if diags.HasErrors() { - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(n.Addr, states.CurrentGen, planned.Before, diags.Err()) - })) - return nil, keyData, diags // failed preconditions prevent further evaluation - } - - if planned.Action == plans.NoOp { - // If we didn't actually plan to read this then we have nothing more - // to do; we're evaluating this only for incidentals like the - // precondition/postcondition checks. - return nil, keyData, diags - } - - configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, keyData, diags - } - - newVal, readDiags := n.readDataSource(ctx, configVal) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return nil, keyData, diags - } - - state := &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - } - - return state, keyData, diags -} - -// evalApplyProvisioners determines if provisioners need to be run, and if so -// executes the provisioners for a resource and returns an updated error if -// provisioning fails. -func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - if state == nil { - log.Printf("[TRACE] evalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) - return nil - } - if when == configs.ProvisionerWhenCreate && !createNew { - // If we're not creating a new resource, then don't run provisioners - log.Printf("[TRACE] evalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) - return nil - } - if state.Status == states.ObjectTainted { - // No point in provisioning an object that is already tainted, since - // it's going to get recreated on the next apply anyway. - log.Printf("[TRACE] evalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) - return nil - } - - provs := filterProvisioners(n.Config, when) - if len(provs) == 0 { - // We have no provisioners, so don't do anything - return nil - } - - // Call pre hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstance(n.Addr, state.Value) - })) - if diags.HasErrors() { - return diags - } - - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - diags = diags.Append(n.applyProvisioners(ctx, state, when, provs)) - if diags.HasErrors() { - log.Printf("[TRACE] evalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", n.Addr) - return diags - } - - // Call post hook - return diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstance(n.Addr, state.Value) - })) -} - -// filterProvisioners filters the provisioners on the resource to only -// the provisioners specified by the "when" option. -func filterProvisioners(config *configs.Resource, when configs.ProvisionerWhen) []*configs.Provisioner { - // Fast path the zero case - if config == nil || config.Managed == nil { - return nil - } - - if len(config.Managed.Provisioners) == 0 { - return nil - } - - result := make([]*configs.Provisioner, 0, len(config.Managed.Provisioners)) - for _, p := range config.Managed.Provisioners { - if p.When == when { - result = append(result, p) - } - } - - return result -} - -// applyProvisioners executes the provisioners for a resource. -func (n *NodeAbstractResourceInstance) applyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, when configs.ProvisionerWhen, provs []*configs.Provisioner) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // this self is only used for destroy provisioner evaluation, and must - // refer to the last known value of the resource. - self := state.Value - - var evalScope func(EvalContext, hcl.Body, cty.Value, *configschema.Block) (cty.Value, tfdiags.Diagnostics) - switch when { - case configs.ProvisionerWhenDestroy: - evalScope = n.evalDestroyProvisionerConfig - default: - evalScope = n.evalProvisionerConfig - } - - // If there's a connection block defined directly inside the resource block - // then it'll serve as a base connection configuration for all of the - // provisioners. - var baseConn hcl.Body - if n.Config.Managed != nil && n.Config.Managed.Connection != nil { - baseConn = n.Config.Managed.Connection.Config - } - - for _, prov := range provs { - log.Printf("[TRACE] applyProvisioners: provisioning %s with %q", n.Addr, prov.Type) - - // Get the provisioner - provisioner, err := ctx.Provisioner(prov.Type) - if err != nil { - return diags.Append(err) - } - - schema, err := ctx.ProvisionerSchema(prov.Type) - if err != nil { - // This error probably won't be a great diagnostic, but in practice - // we typically catch this problem long before we get here, so - // it should be rare to return via this codepath. - diags = diags.Append(err) - return diags - } - - config, configDiags := evalScope(ctx, prov.Config, self, schema) - diags = diags.Append(configDiags) - if diags.HasErrors() { - return diags - } - - // If the provisioner block contains a connection block of its own then - // it can override the base connection configuration, if any. - var localConn hcl.Body - if prov.Connection != nil { - localConn = prov.Connection.Config - } - - var connBody hcl.Body - switch { - case baseConn != nil && localConn != nil: - // Our standard merging logic applies here, similar to what we do - // with _override.tf configuration files: arguments from the - // base connection block will be masked by any arguments of the - // same name in the local connection block. - connBody = configs.MergeBodies(baseConn, localConn) - case baseConn != nil: - connBody = baseConn - case localConn != nil: - connBody = localConn - } - - // start with an empty connInfo - connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) - - if connBody != nil { - var connInfoDiags tfdiags.Diagnostics - connInfo, connInfoDiags = evalScope(ctx, connBody, self, connectionBlockSupersetSchema) - diags = diags.Append(connInfoDiags) - if diags.HasErrors() { - return diags - } - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstanceStep(n.Addr, prov.Type) - }) - if err != nil { - return diags.Append(err) - } - } - - // The output function - outputFn := func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(n.Addr, prov.Type, msg) - return HookActionContinue, nil - }) - } - - // If our config or connection info contains any marked values, ensure - // those are stripped out before sending to the provisioner. Unlike - // resources, we have no need to capture the marked paths and reapply - // later. - unmarkedConfig, configMarks := config.UnmarkDeep() - unmarkedConnInfo, _ := connInfo.UnmarkDeep() - - // Marks on the config might result in leaking sensitive values through - // provisioner logging, so we conservatively suppress all output in - // this case. This should not apply to connection info values, which - // provisioners ought not to be logging anyway. - if len(configMarks) > 0 { - outputFn = func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(n.Addr, prov.Type, "(output suppressed due to sensitive value in config)") - return HookActionContinue, nil - }) - } - } - - output := CallbackUIOutput{OutputFn: outputFn} - resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: unmarkedConfig, - Connection: unmarkedConnInfo, - UIOutput: &output, - }) - applyDiags := resp.Diagnostics.InConfigBody(prov.Config, n.Addr.String()) - - // Call post hook - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstanceStep(n.Addr, prov.Type, applyDiags.Err()) - }) - - switch prov.OnFailure { - case configs.ProvisionerOnFailureContinue: - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) - } else { - // Maybe there are warnings that we still want to see - diags = diags.Append(applyDiags) - } - default: - diags = diags.Append(applyDiags) - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) - return diags - } - } - - // Deal with the hook - if hookErr != nil { - return diags.Append(hookErr) - } - } - - return diags -} - -func (n *NodeAbstractResourceInstance) evalProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - forEach, forEachDiags := evaluateForEachExpression(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - - keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) - - config, _, configDiags := ctx.EvaluateBlock(body, schema, n.ResourceInstanceAddr().Resource, keyData) - diags = diags.Append(configDiags) - - return config, diags -} - -// during destroy a provisioner can only evaluate within the scope of the parent resource -func (n *NodeAbstractResourceInstance) evalDestroyProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // For a destroy-time provisioner forEach is intentionally nil here, - // which EvalDataForInstanceKey responds to by not populating EachValue - // in its result. That's okay because each.value is prohibited for - // destroy-time provisioners. - keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, nil) - - evalScope := ctx.EvaluationScope(n.ResourceInstanceAddr().Resource, keyData) - config, evalDiags := evalScope.EvalSelfBlock(body, self, schema, keyData) - diags = diags.Append(evalDiags) - - return config, diags -} - -// apply accepts an applyConfig, instead of using n.Config, so destroy plans can -// send a nil config. The keyData information can be empty if the config is -// nil, since it is only used to evaluate the configuration. -func (n *NodeAbstractResourceInstance) apply( - ctx EvalContext, - state *states.ResourceInstanceObject, - change *plans.ResourceInstanceChange, - applyConfig *configs.Resource, - keyData instances.RepetitionData, - createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { - - var diags tfdiags.Diagnostics - if state == nil { - state = &states.ResourceInstanceObject{} - } - - if change.Action == plans.NoOp { - // If this is a no-op change then we don't want to actually change - // anything, so we'll just echo back the state we were given and - // let our internal checks and updates proceed. - log.Printf("[TRACE] NodeAbstractResourceInstance.apply: skipping %s because it has no planned action", n.Addr) - return state, diags - } - - provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return nil, diags.Append(err) - } - schema, _ := providerSchema.SchemaForResourceType(n.Addr.Resource.Resource.Mode, n.Addr.Resource.Resource.Type) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type)) - return nil, diags - } - - log.Printf("[INFO] Starting apply for %s", n.Addr) - - configVal := cty.NullVal(cty.DynamicPseudoType) - if applyConfig != nil { - var configDiags tfdiags.Diagnostics - configVal, _, configDiags = ctx.EvaluateBlock(applyConfig.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags - } - } - - if !configVal.IsWhollyKnown() { - // We don't have a pretty format function for a path, but since this is - // such a rare error, we can just drop the raw GoString values in here - // to make sure we have something to debug with. - var unknownPaths []string - cty.Transform(configVal, func(p cty.Path, v cty.Value) (cty.Value, error) { - if !v.IsKnown() { - unknownPaths = append(unknownPaths, fmt.Sprintf("%#v", p)) - } - return v, nil - }) - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Configuration contains unknown value", - fmt.Sprintf("configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)\n"+ - "The following paths in the resource configuration are unknown:\n%s", - n.Addr, - strings.Join(unknownPaths, "\n"), - ), - )) - return nil, diags - } - - metaConfigVal, metaDiags := n.providerMetas(ctx) - diags = diags.Append(metaDiags) - if diags.HasErrors() { - return nil, diags - } - - log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr, change.Action) - - // If our config, Before or After value contain any marked values, - // ensure those are stripped out before sending - // this to the provider - unmarkedConfigVal, _ := configVal.UnmarkDeep() - unmarkedBefore, beforePaths := change.Before.UnmarkDeepWithPaths() - unmarkedAfter, afterPaths := change.After.UnmarkDeepWithPaths() - - // If we have an Update action, our before and after values are equal, - // and only differ on their sensitivity, the newVal is the after val - // and we should not communicate with the provider. We do need to update - // the state with this new value, to ensure the sensitivity change is - // persisted. - eqV := unmarkedBefore.Equals(unmarkedAfter) - eq := eqV.IsKnown() && eqV.True() - if change.Action == plans.Update && eq && !marksEqual(beforePaths, afterPaths) { - // Copy the previous state, changing only the value - newState := &states.ResourceInstanceObject{ - CreateBeforeDestroy: state.CreateBeforeDestroy, - Dependencies: state.Dependencies, - Private: state.Private, - Status: state.Status, - Value: change.After, - } - return newState, diags - } - - resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: n.Addr.Resource.Resource.Type, - PriorState: unmarkedBefore, - Config: unmarkedConfigVal, - PlannedState: unmarkedAfter, - PlannedPrivate: change.Private, - ProviderMeta: metaConfigVal, - }) - applyDiags := resp.Diagnostics - if applyConfig != nil { - applyDiags = applyDiags.InConfigBody(applyConfig.Config, n.Addr.String()) - } - diags = diags.Append(applyDiags) - - // Even if there are errors in the returned diagnostics, the provider may - // have returned a _partial_ state for an object that already exists but - // failed to fully configure, and so the remaining code must always run - // to completion but must be defensive against the new value being - // incomplete. - newVal := resp.NewState - - // If we have paths to mark, mark those on this new value - if len(afterPaths) > 0 { - newVal = newVal.MarkWithPaths(afterPaths) - } - - if newVal == cty.NilVal { - // Providers are supposed to return a partial new value even when errors - // occur, but sometimes they don't and so in that case we'll patch that up - // by just using the prior state, so we'll at least keep track of the - // object for the user to retry. - newVal = change.Before - - // As a special case, we'll set the new value to null if it looks like - // we were trying to execute a delete, because the provider in this case - // probably left the newVal unset intending it to be interpreted as "null". - if change.After.IsNull() { - newVal = cty.NullVal(schema.ImpliedType()) - } - - if !diags.HasErrors() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid nil value after apply for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider.String(), n.Addr.String(), - ), - )) - } - } - - var conformDiags tfdiags.Diagnostics - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - conformDiags = conformDiags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ResolvedProvider.String(), tfdiags.FormatErrorPrefixed(err, n.Addr.String()), - ), - )) - } - diags = diags.Append(conformDiags) - if conformDiags.HasErrors() { - // Bail early in this particular case, because an object that doesn't - // conform to the schema can't be saved in the state anyway -- the - // serializer will reject it. - return nil, diags - } - - // After this point we have a type-conforming result object and so we - // must always run to completion to ensure it can be saved. If n.Error - // is set then we must not return a non-nil error, in order to allow - // evaluation to continue to a later point where our state object will - // be saved. - - // By this point there must not be any unknown values remaining in our - // object, because we've applied the change and we can't save unknowns - // in our persistent state. If any are present then we will indicate an - // error (which is always a bug in the provider) but we will also replace - // them with nulls so that we can successfully save the portions of the - // returned value that are known. - if !newVal.IsWhollyKnown() { - // To generate better error messages, we'll go for a walk through the - // value and make a separate diagnostic for each unknown value we - // find. - cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { - if !val.IsKnown() { - pathStr := tfdiags.FormatCtyPath(path) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", - n.Addr, pathStr, - ), - )) - } - return true, nil - }) - - // NOTE: This operation can potentially be lossy if there are multiple - // elements in a set that differ only by unknown values: after - // replacing with null these will be merged together into a single set - // element. Since we can only get here in the presence of a provider - // bug, we accept this because storing a result here is always a - // best-effort sort of thing. - newVal = cty.UnknownAsNull(newVal) - } - - if change.Action != plans.Delete && !diags.HasErrors() { - // Only values that were marked as unknown in the planned value are allowed - // to change during the apply operation. (We do this after the unknown-ness - // check above so that we also catch anything that became unknown after - // being known during plan.) - // - // If we are returning other errors anyway then we'll give this - // a pass since the other errors are usually the explanation for - // this one and so it's more helpful to let the user focus on the - // root cause rather than distract with this extra problem. - if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ResolvedProvider.String(), n.Addr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - - // The sort of inconsistency we won't catch here is if a known value - // in the plan is changed during apply. That can cause downstream - // problems because a dependent resource would make its own plan based - // on the planned value, and thus get a different result during the - // apply phase. This will usually lead to a "Provider produced invalid plan" - // error that incorrectly blames the downstream resource for the change. - - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent result after apply", - fmt.Sprintf( - "When applying changes to %s, provider %q produced an unexpected new value: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.Addr, n.ResolvedProvider.String(), tfdiags.FormatError(err), - ), - )) - } - } - } - } - - // If a provider returns a null or non-null object at the wrong time then - // we still want to save that but it often causes some confusing behaviors - // where it seems like Terraform is failing to take any action at all, - // so we'll generate some errors to draw attention to it. - if !diags.HasErrors() { - if change.Action == plans.Delete && !newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", - change.Action, n.Addr, - ), - )) - } - if change.Action != plans.Delete && newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", - change.Action, n.Addr, - ), - )) - } - } - - switch { - case diags.HasErrors() && newVal.IsNull(): - // Sometimes providers return a null value when an operation fails for - // some reason, but we'd rather keep the prior state so that the error - // can be corrected on a subsequent run. We must only do this for null - // new value though, or else we may discard partial updates the - // provider was able to complete. Otherwise, we'll continue using the - // prior state as the new value, making this effectively a no-op. If - // the item really _has_ been deleted then our next refresh will detect - // that and fix it up. - return state.DeepCopy(), diags - - case diags.HasErrors() && !newVal.IsNull(): - // if we have an error, make sure we restore the object status in the new state - newState := &states.ResourceInstanceObject{ - Status: state.Status, - Value: newVal, - Private: resp.Private, - CreateBeforeDestroy: createBeforeDestroy, - } - - // if the resource was being deleted, the dependencies are not going to - // be recalculated and we need to restore those as well. - if change.Action == plans.Delete { - newState.Dependencies = state.Dependencies - } - - return newState, diags - - case !newVal.IsNull(): - // Non error case with a new state - newState := &states.ResourceInstanceObject{ - Status: states.ObjectReady, - Value: newVal, - Private: resp.Private, - CreateBeforeDestroy: createBeforeDestroy, - } - return newState, diags - - default: - // Non error case, were the object was deleted - return nil, diags - } -} - -func (n *NodeAbstractResourceInstance) prevRunAddr(ctx EvalContext) addrs.AbsResourceInstance { - return resourceInstancePrevRunAddr(ctx, n.Addr) -} - -func resourceInstancePrevRunAddr(ctx EvalContext, currentAddr addrs.AbsResourceInstance) addrs.AbsResourceInstance { - table := ctx.MoveResults() - return table.OldAddr(currentAddr) -} diff --git a/internal/terraform/node_resource_abstract_instance_test.go b/internal/terraform/node_resource_abstract_instance_test.go deleted file mode 100644 index 22a5b0b5f766..000000000000 --- a/internal/terraform/node_resource_abstract_instance_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package terraform - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestNodeAbstractResourceInstanceProvider(t *testing.T) { - tests := []struct { - Addr addrs.AbsResourceInstance - Config *configs.Resource - StoredProviderConfig addrs.AbsProviderConfig - Want addrs.Provider - }{ - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "baz", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - Want: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "hashicorp", - Type: "null", - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "terraform_remote_state", - Name: "baz", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - Want: addrs.Provider{ - // As a special case, the type prefix "terraform_" maps to - // the builtin provider, not the default one. - Hostname: addrs.BuiltInProviderHost, - Namespace: addrs.BuiltInProviderNamespace, - Type: "terraform", - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "baz", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - Config: &configs.Resource{ - // Just enough configs.Resource for the Provider method. Not - // actually valid for general use. - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - // The config overrides the default behavior. - Want: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "terraform_remote_state", - Name: "baz", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - Config: &configs.Resource{ - // Just enough configs.Resource for the Provider method. Not - // actually valid for general use. - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - // The config overrides the default behavior. - Want: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "null_resource", - Name: "baz", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - Config: nil, - StoredProviderConfig: addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "null", - }, - }, - // The stored provider config overrides the default behavior. - Want: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "null", - }, - }, - } - - for _, test := range tests { - var name string - if test.Config != nil { - name = fmt.Sprintf("%s with configured %s", test.Addr, test.Config.Provider) - } else { - name = fmt.Sprintf("%s with no configuration", test.Addr) - } - t.Run(name, func(t *testing.T) { - node := &NodeAbstractResourceInstance{ - // Just enough NodeAbstractResourceInstance for the Provider - // function. (This would not be valid for some other functions.) - Addr: test.Addr, - NodeAbstractResource: NodeAbstractResource{ - Addr: test.Addr.ConfigResource(), - Config: test.Config, - storedProviderConfig: test.StoredProviderConfig, - }, - } - got := node.Provider() - if got != test.Want { - t.Errorf("wrong result\naddr: %s\nconfig: %#v\ngot: %s\nwant: %s", test.Addr, test.Config, got, test.Want) - } - }) - } -} - -func TestNodeAbstractResourceInstance_WriteResourceInstanceState(t *testing.T) { - state := states.NewState() - ctx := new(MockEvalContext) - ctx.StateState = state.SyncWrapper() - ctx.PathPath = addrs.RootModuleInstance - - mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - }, - }) - - obj := &states.ResourceInstanceObject{ - Value: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-abc123"), - }), - Status: states.ObjectReady, - } - - node := &NodeAbstractResourceInstance{ - Addr: mustResourceInstanceAddr("aws_instance.foo"), - // instanceState: obj, - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - ctx.ProviderProvider = mockProvider - ctx.ProviderSchemaSchema = mockProvider.ProviderSchema() - - err := node.writeResourceInstanceState(ctx, obj, workingState) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) - } - - checkStateString(t, state, ` -aws_instance.foo: - ID = i-abc123 - provider = provider["registry.terraform.io/hashicorp/aws"] - `) -} diff --git a/internal/terraform/node_resource_abstract_test.go b/internal/terraform/node_resource_abstract_test.go deleted file mode 100644 index 51ad8d9698ed..000000000000 --- a/internal/terraform/node_resource_abstract_test.go +++ /dev/null @@ -1,312 +0,0 @@ -package terraform - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestNodeAbstractResourceProvider(t *testing.T) { - tests := []struct { - Addr addrs.ConfigResource - Config *configs.Resource - Want addrs.Provider - }{ - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "baz", - }.InModule(addrs.RootModule), - Want: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "hashicorp", - Type: "null", - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "terraform_remote_state", - Name: "baz", - }.InModule(addrs.RootModule), - Want: addrs.Provider{ - // As a special case, the type prefix "terraform_" maps to - // the builtin provider, not the default one. - Hostname: addrs.BuiltInProviderHost, - Namespace: addrs.BuiltInProviderNamespace, - Type: "terraform", - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "null_resource", - Name: "baz", - }.InModule(addrs.RootModule), - Config: &configs.Resource{ - // Just enough configs.Resource for the Provider method. Not - // actually valid for general use. - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - // The config overrides the default behavior. - Want: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "terraform_remote_state", - Name: "baz", - }.InModule(addrs.RootModule), - Config: &configs.Resource{ - // Just enough configs.Resource for the Provider method. Not - // actually valid for general use. - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - // The config overrides the default behavior. - Want: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - } - - for _, test := range tests { - var name string - if test.Config != nil { - name = fmt.Sprintf("%s with configured %s", test.Addr, test.Config.Provider) - } else { - name = fmt.Sprintf("%s with no configuration", test.Addr) - } - t.Run(name, func(t *testing.T) { - node := &NodeAbstractResource{ - // Just enough NodeAbstractResource for the Provider function. - // (This would not be valid for some other functions.) - Addr: test.Addr, - Config: test.Config, - } - got := node.Provider() - if got != test.Want { - t.Errorf("wrong result\naddr: %s\nconfig: %#v\ngot: %s\nwant: %s", test.Addr, test.Config, got, test.Want) - } - }) - } -} - -// Make sure ProvideBy returns the final resolved provider -func TestNodeAbstractResourceSetProvider(t *testing.T) { - node := &NodeAbstractResource{ - - // Just enough NodeAbstractResource for the Provider function. - // (This would not be valid for some other functions.) - Addr: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "terraform_remote_state", - Name: "baz", - }.InModule(addrs.RootModule), - Config: &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "terraform_remote_state", - Name: "baz", - // Just enough configs.Resource for the Provider method. Not - // actually valid for general use. - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - }, - } - - p, exact := node.ProvidedBy() - if exact { - t.Fatalf("no exact provider should be found from this confniguration, got %q\n", p) - } - - // the implied non-exact provider should be "terraform" - lpc, ok := p.(addrs.LocalProviderConfig) - if !ok { - t.Fatalf("expected LocalProviderConfig, got %#v\n", p) - } - - if lpc.LocalName != "terraform" { - t.Fatalf("expected non-exact provider of 'terraform', got %q", lpc.LocalName) - } - - // now set a resolved provider for the resource - resolved := addrs.AbsProviderConfig{ - Provider: addrs.Provider{ - Hostname: addrs.DefaultProviderRegistryHost, - Namespace: "awesomecorp", - Type: "happycloud", - }, - Module: addrs.RootModule, - Alias: "test", - } - - node.SetProvider(resolved) - p, exact = node.ProvidedBy() - if !exact { - t.Fatalf("exact provider should be found, got %q\n", p) - } - - apc, ok := p.(addrs.AbsProviderConfig) - if !ok { - t.Fatalf("expected AbsProviderConfig, got %#v\n", p) - } - - if apc.String() != resolved.String() { - t.Fatalf("incorrect resolved config: got %#v, wanted %#v\n", apc, resolved) - } -} - -func TestNodeAbstractResource_ReadResourceInstanceState(t *testing.T) { - mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - }, - }) - // This test does not configure the provider, but the mock provider will - // check that this was called and report errors. - mockProvider.ConfigureProviderCalled = true - - tests := map[string]struct { - State *states.State - Node *NodeAbstractResource - ExpectedInstanceId string - }{ - "ReadState gets primary instance state": { - State: states.BuildState(func(s *states.SyncState) { - providerAddr := addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - } - oneAddr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "bar", - }.Absolute(addrs.RootModuleInstance) - s.SetResourceProvider(oneAddr, providerAddr) - s.SetResourceInstanceCurrent(oneAddr.Instance(addrs.NoKey), &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, providerAddr) - }), - Node: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("aws_instance.bar"), - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - ExpectedInstanceId: "i-abc123", - }, - } - - for k, test := range tests { - t.Run(k, func(t *testing.T) { - ctx := new(MockEvalContext) - ctx.StateState = test.State.SyncWrapper() - ctx.PathPath = addrs.RootModuleInstance - ctx.ProviderSchemaSchema = mockProvider.ProviderSchema() - - ctx.ProviderProvider = providers.Interface(mockProvider) - - got, readDiags := test.Node.readResourceInstanceState(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) - if readDiags.HasErrors() { - t.Fatalf("[%s] Got err: %#v", k, readDiags.Err()) - } - - expected := test.ExpectedInstanceId - - if !(got != nil && got.Value.GetAttr("id") == cty.StringVal(expected)) { - t.Fatalf("[%s] Expected output with ID %#v, got: %#v", k, expected, got) - } - }) - } -} - -func TestNodeAbstractResource_ReadResourceInstanceStateDeposed(t *testing.T) { - mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - }, - }) - // This test does not configure the provider, but the mock provider will - // check that this was called and report errors. - mockProvider.ConfigureProviderCalled = true - - tests := map[string]struct { - State *states.State - Node *NodeAbstractResource - ExpectedInstanceId string - }{ - "ReadStateDeposed gets deposed instance": { - State: states.BuildState(func(s *states.SyncState) { - providerAddr := addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - } - oneAddr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "bar", - }.Absolute(addrs.RootModuleInstance) - s.SetResourceProvider(oneAddr, providerAddr) - s.SetResourceInstanceDeposed(oneAddr.Instance(addrs.NoKey), states.DeposedKey("00000001"), &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"i-abc123"}`), - }, providerAddr) - }), - Node: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("aws_instance.bar"), - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - ExpectedInstanceId: "i-abc123", - }, - } - for k, test := range tests { - t.Run(k, func(t *testing.T) { - ctx := new(MockEvalContext) - ctx.StateState = test.State.SyncWrapper() - ctx.PathPath = addrs.RootModuleInstance - ctx.ProviderSchemaSchema = mockProvider.ProviderSchema() - ctx.ProviderProvider = providers.Interface(mockProvider) - - key := states.DeposedKey("00000001") // shim from legacy state assigns 0th deposed index this key - - got, readDiags := test.Node.readResourceInstanceStateDeposed(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), key) - if readDiags.HasErrors() { - t.Fatalf("[%s] Got err: %#v", k, readDiags.Err()) - } - - expected := test.ExpectedInstanceId - - if !(got != nil && got.Value.GetAttr("id") == cty.StringVal(expected)) { - t.Fatalf("[%s] Expected output with ID %#v, got: %#v", k, expected, got) - } - }) - } -} diff --git a/internal/terraform/node_resource_apply.go b/internal/terraform/node_resource_apply.go deleted file mode 100644 index 6f7b46af6e9a..000000000000 --- a/internal/terraform/node_resource_apply.go +++ /dev/null @@ -1,112 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// nodeExpandApplyableResource handles the first layer of resource -// expansion during apply. Even though the resource instances themselves are -// already expanded from the plan, we still need to expand the -// NodeApplyableResource nodes into their respective modules. -type nodeExpandApplyableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeDynamicExpandable = (*nodeExpandApplyableResource)(nil) - _ GraphNodeReferenceable = (*nodeExpandApplyableResource)(nil) - _ GraphNodeReferencer = (*nodeExpandApplyableResource)(nil) - _ GraphNodeConfigResource = (*nodeExpandApplyableResource)(nil) - _ GraphNodeAttachResourceConfig = (*nodeExpandApplyableResource)(nil) - _ graphNodeExpandsInstances = (*nodeExpandApplyableResource)(nil) - _ GraphNodeTargetable = (*nodeExpandApplyableResource)(nil) -) - -func (n *nodeExpandApplyableResource) expandsInstances() { -} - -func (n *nodeExpandApplyableResource) References() []*addrs.Reference { - return (&NodeApplyableResource{NodeAbstractResource: n.NodeAbstractResource}).References() -} - -func (n *nodeExpandApplyableResource) Name() string { - return n.NodeAbstractResource.Name() + " (expand)" -} - -func (n *nodeExpandApplyableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - - expander := ctx.InstanceExpander() - moduleInstances := expander.ExpandModule(n.Addr.Module) - for _, module := range moduleInstances { - g.Add(&NodeApplyableResource{ - NodeAbstractResource: n.NodeAbstractResource, - Addr: n.Addr.Resource.Absolute(module), - }) - } - addRootNodeToGraph(&g) - - return &g, nil -} - -// NodeApplyableResource represents a resource that is "applyable": -// it may need to have its record in the state adjusted to match configuration. -// -// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, -// it should be inserted into the same graph as any instances of the nodes -// with dependency edges ensuring that the resource is evaluated before any -// of its instances, which will turn ensure that the whole-resource record -// in the state is suitably prepared to receive any updates to instances. -type NodeApplyableResource struct { - *NodeAbstractResource - - Addr addrs.AbsResource -} - -var ( - _ GraphNodeModuleInstance = (*NodeApplyableResource)(nil) - _ GraphNodeConfigResource = (*NodeApplyableResource)(nil) - _ GraphNodeExecutable = (*NodeApplyableResource)(nil) - _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) - _ GraphNodeReferencer = (*NodeApplyableResource)(nil) -) - -func (n *NodeApplyableResource) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -func (n *NodeApplyableResource) References() []*addrs.Reference { - if n.Config == nil { - log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) - return nil - } - - var result []*addrs.Reference - - // Since this node type only updates resource-level metadata, we only - // need to worry about the parts of the configuration that affect - // our "each mode": the count and for_each meta-arguments. - refs, _ := lang.ReferencesInExpr(n.Config.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(n.Config.ForEach) - result = append(result, refs...) - - return result -} - -// GraphNodeExecutable -func (n *NodeApplyableResource) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { - if n.Config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", n.Name()) - return nil - } - - return n.writeResourceState(ctx, n.Addr) -} diff --git a/internal/terraform/node_resource_apply_instance.go b/internal/terraform/node_resource_apply_instance.go deleted file mode 100644 index 82aebec296c4..000000000000 --- a/internal/terraform/node_resource_apply_instance.go +++ /dev/null @@ -1,486 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/objchange" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// NodeApplyableResourceInstance represents a resource instance that is -// "applyable": it is ready to be applied and is represented by a diff. -// -// This node is for a specific instance of a resource. It will usually be -// accompanied in the graph by a NodeApplyableResource representing its -// containing resource, and should depend on that node to ensure that the -// state is properly prepared to receive changes to instances. -type NodeApplyableResourceInstance struct { - *NodeAbstractResourceInstance - - graphNodeDeposer // implementation of GraphNodeDeposerConfig - - // If this node is forced to be CreateBeforeDestroy, we need to record that - // in the state to. - ForceCreateBeforeDestroy bool - - // forceReplace are resource instance addresses where the user wants to - // force generating a replace action. This set isn't pre-filtered, so - // it might contain addresses that have nothing to do with the resource - // that this node represents, which the node itself must therefore ignore. - forceReplace []addrs.AbsResourceInstance -} - -var ( - _ GraphNodeConfigResource = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeExecutable = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeAttachDependencies = (*NodeApplyableResourceInstance)(nil) -) - -// CreateBeforeDestroy returns this node's CreateBeforeDestroy status. -func (n *NodeApplyableResourceInstance) CreateBeforeDestroy() bool { - if n.ForceCreateBeforeDestroy { - return n.ForceCreateBeforeDestroy - } - - if n.Config != nil && n.Config.Managed != nil { - return n.Config.Managed.CreateBeforeDestroy - } - - return false -} - -func (n *NodeApplyableResourceInstance) ModifyCreateBeforeDestroy(v bool) error { - n.ForceCreateBeforeDestroy = v - return nil -} - -// GraphNodeCreator -func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeReferencer, overriding NodeAbstractResourceInstance -func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { - // Start with the usual resource instance implementation - ret := n.NodeAbstractResourceInstance.References() - - // Applying a resource must also depend on the destruction of any of its - // dependencies, since this may for example affect the outcome of - // evaluating an entire list of resources with "count" set (by reducing - // the count). - // - // However, we can't do this in create_before_destroy mode because that - // would create a dependency cycle. We make a compromise here of requiring - // changes to be updated across two applies in this case, since the first - // plan will use the old values. - if !n.CreateBeforeDestroy() { - for _, ref := range ret { - switch tr := ref.Subject.(type) { - case addrs.ResourceInstance: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - case addrs.Resource: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - } - } - } - - return ret -} - -// GraphNodeAttachDependencies -func (n *NodeApplyableResourceInstance) AttachDependencies(deps []addrs.ConfigResource) { - n.Dependencies = deps -} - -// GraphNodeExecutable -func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - addr := n.ResourceInstanceAddr() - - if n.Config == nil { - // If there is no config, and there is no change, then we have nothing - // to do and the change was left in the plan for informational - // purposes only. - changes := ctx.Changes() - csrc := changes.GetResourceInstanceChange(n.ResourceInstanceAddr(), states.CurrentGen) - if csrc == nil || csrc.Action == plans.NoOp { - log.Printf("[DEBUG] NodeApplyableResourceInstance: No config or planned change recorded for %s", n.Addr) - return nil - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource node has no configuration attached", - fmt.Sprintf( - "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!", - addr, - ), - )) - return diags - } - - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case addrs.ManagedResourceMode: - return n.managedResourceExecute(ctx) - case addrs.DataResourceMode: - return n.dataResourceExecute(ctx) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodeApplyableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - change, err := n.readDiff(ctx, providerSchema) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - // Stop early if we don't actually have a diff - if change == nil { - return diags - } - if change.Action != plans.Read && change.Action != plans.NoOp { - diags = diags.Append(fmt.Errorf("nonsensical planned action %#v for %s; this is a bug in Terraform", change.Action, n.Addr)) - } - - // In this particular call to applyDataSource we include our planned - // change, which signals that we expect this read to complete fully - // with no unknown values; it'll produce an error if not. - state, repeatData, applyDiags := n.applyDataSource(ctx, change) - diags = diags.Append(applyDiags) - if diags.HasErrors() { - return diags - } - - if state != nil { - // If n.applyDataSource returned a nil state object with no accompanying - // errors then it determined that the given change doesn't require - // actually reading the data (e.g. because it was already read during - // the plan phase) and so we're only running through here to get the - // extra details like precondition/postcondition checks. - diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState)) - if diags.HasErrors() { - return diags - } - } - - diags = diags.Append(n.writeChange(ctx, nil, "")) - - diags = diags.Append(updateStateHook(ctx)) - - // Post-conditions might block further progress. We intentionally do this - // _after_ writing the state/diff because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - n.Config.Postconditions, - ctx, n.ResourceInstanceAddr(), - repeatData, - tfdiags.Error, - ) - diags = diags.Append(checkDiags) - - return diags -} - -func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var state *states.ResourceInstanceObject - var createBeforeDestroyEnabled bool - var deposedKey states.DeposedKey - - addr := n.ResourceInstanceAddr().Resource - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - // Get the saved diff for apply - diffApply, err := n.readDiff(ctx, providerSchema) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - // We don't want to do any destroys - // (these are handled by NodeDestroyResourceInstance instead) - if diffApply == nil || diffApply.Action == plans.Delete { - return diags - } - if diffApply.Action == plans.Read { - diags = diags.Append(fmt.Errorf("nonsensical planned action %#v for %s; this is a bug in Terraform", diffApply.Action, n.Addr)) - } - - destroy := (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) - // Get the stored action for CBD if we have a plan already - createBeforeDestroyEnabled = diffApply.Change.Action == plans.CreateThenDelete - - if destroy && n.CreateBeforeDestroy() { - createBeforeDestroyEnabled = true - } - - if createBeforeDestroyEnabled { - state := ctx.State() - if n.PreallocatedDeposedKey == states.NotDeposed { - deposedKey = state.DeposeResourceInstanceObject(n.Addr) - } else { - deposedKey = n.PreallocatedDeposedKey - state.DeposeResourceInstanceObjectForceKey(n.Addr, deposedKey) - } - log.Printf("[TRACE] managedResourceExecute: prior object for %s now deposed with key %s", n.Addr, deposedKey) - } - - state, readDiags := n.readResourceInstanceState(ctx, n.ResourceInstanceAddr()) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return diags - } - - // Get the saved diff - diff, err := n.readDiff(ctx, providerSchema) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - // Make a new diff, in case we've learned new values in the state - // during apply which we can now incorporate. - diffApply, _, repeatData, planDiags := n.plan(ctx, diff, state, false, n.forceReplace) - diags = diags.Append(planDiags) - if diags.HasErrors() { - return diags - } - - // Compare the diffs - diags = diags.Append(n.checkPlannedChange(ctx, diff, diffApply, providerSchema)) - if diags.HasErrors() { - return diags - } - - diffApply = reducePlan(addr, diffApply, false) - // reducePlan may have simplified our planned change - // into a NoOp if it only requires destroying, since destroying - // is handled by NodeDestroyResourceInstance. If so, we'll - // still run through most of the logic here because we do still - // need to deal with other book-keeping such as marking the - // change as "complete", and running the author's postconditions. - - diags = diags.Append(n.preApplyHook(ctx, diffApply)) - if diags.HasErrors() { - return diags - } - - // If there is no change, there was nothing to apply, and we don't need to - // re-write the state, but we do need to re-evaluate postconditions. - if diffApply.Action == plans.NoOp { - return diags.Append(n.managedResourcePostconditions(ctx, repeatData)) - } - - state, applyDiags := n.apply(ctx, state, diffApply, n.Config, repeatData, n.CreateBeforeDestroy()) - diags = diags.Append(applyDiags) - - // We clear the change out here so that future nodes don't see a change - // that is already complete. - err = n.writeChange(ctx, nil, "") - if err != nil { - return diags.Append(err) - } - - state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err()) - - if state != nil { - // dependencies are always updated to match the configuration during apply - state.Dependencies = n.Dependencies - } - err = n.writeResourceInstanceState(ctx, state, workingState) - if err != nil { - return diags.Append(err) - } - - // Run Provisioners - createNew := (diffApply.Action == plans.Create || diffApply.Action.IsReplace()) - applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate) - // the provisioner errors count as port of the apply error, so we can bundle the diags - diags = diags.Append(applyProvisionersDiags) - - state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err()) - - err = n.writeResourceInstanceState(ctx, state, workingState) - if err != nil { - return diags.Append(err) - } - - if createBeforeDestroyEnabled && diags.HasErrors() { - if deposedKey == states.NotDeposed { - // This should never happen, and so it always indicates a bug. - // We should evaluate this node only if we've previously deposed - // an object as part of the same operation. - if diffApply != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Attempt to restore non-existent deposed object", - fmt.Sprintf( - "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This occurred during a %s action. This is a bug in Terraform; please report it!", - addr, diffApply.Action, - ), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Attempt to restore non-existent deposed object", - fmt.Sprintf( - "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This is a bug in Terraform; please report it!", - addr, - ), - )) - } - } else { - restored := ctx.State().MaybeRestoreResourceInstanceDeposed(addr.Absolute(ctx.Path()), deposedKey) - if restored { - log.Printf("[TRACE] managedResourceExecute: %s deposed object %s was restored as the current object", addr, deposedKey) - } else { - log.Printf("[TRACE] managedResourceExecute: %s deposed object %s remains deposed", addr, deposedKey) - } - } - } - - diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) - diags = diags.Append(updateStateHook(ctx)) - - // Post-conditions might block further progress. We intentionally do this - // _after_ writing the state because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - return diags.Append(n.managedResourcePostconditions(ctx, repeatData)) -} - -func (n *NodeApplyableResourceInstance) managedResourcePostconditions(ctx EvalContext, repeatData instances.RepetitionData) (diags tfdiags.Diagnostics) { - - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - n.Config.Postconditions, - ctx, n.ResourceInstanceAddr(), repeatData, - tfdiags.Error, - ) - return diags.Append(checkDiags) -} - -// checkPlannedChange produces errors if the _actual_ expected value is not -// compatible with what was recorded in the plan. -// -// Errors here are most often indicative of a bug in the provider, so our error -// messages will report with that in mind. It's also possible that there's a bug -// in Terraform's Core's own "proposed new value" code in EvalDiff. -func (n *NodeApplyableResourceInstance) checkPlannedChange(ctx EvalContext, plannedChange, actualChange *plans.ResourceInstanceChange, providerSchema *ProviderSchema) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - addr := n.ResourceInstanceAddr().Resource - - schema, _ := providerSchema.SchemaForResourceAddr(addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider does not support %q", addr.Resource.Type)) - return diags - } - - absAddr := addr.Absolute(ctx.Path()) - - log.Printf("[TRACE] checkPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) - - if plannedChange.Action != actualChange.Action { - switch { - case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: - // It's okay for an update to become a NoOp once we've filled in - // all of the unknown values, since the final values might actually - // match what was there before after all. - log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) - - case (plannedChange.Action == plans.CreateThenDelete && actualChange.Action == plans.DeleteThenCreate) || - (plannedChange.Action == plans.DeleteThenCreate && actualChange.Action == plans.CreateThenDelete): - // If the order of replacement changed, then that is a bug in terraform - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Terraform produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, the planned action changed from %s to %s.\n\nThis is a bug in Terraform and should be reported.", - absAddr, plannedChange.Action, actualChange.Action, - ), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ResolvedProvider.Provider.String(), - plannedChange.Action, actualChange.Action, - ), - )) - } - } - - errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ResolvedProvider.Provider.String(), tfdiags.FormatError(err), - ), - )) - } - return diags -} - -// maybeTainted takes the resource addr, new value, planned change, and possible -// error from an apply operation and return a new instance object marked as -// tainted if it appears that a create operation has failed. -func maybeTainted(addr addrs.AbsResourceInstance, state *states.ResourceInstanceObject, change *plans.ResourceInstanceChange, err error) *states.ResourceInstanceObject { - if state == nil || change == nil || err == nil { - return state - } - if state.Status == states.ObjectTainted { - log.Printf("[TRACE] maybeTainted: %s was already tainted, so nothing to do", addr) - return state - } - if change.Action == plans.Create { - // If there are errors during a _create_ then the object is - // in an undefined state, and so we'll mark it as tainted so - // we can try again on the next run. - // - // We don't do this for other change actions because errors - // during updates will often not change the remote object at all. - // If there _were_ changes prior to the error, it's the provider's - // responsibility to record the effect of those changes in the - // object value it returned. - log.Printf("[TRACE] maybeTainted: %s encountered an error during creation, so it is now marked as tainted", addr) - return state.AsTainted() - } - return state -} diff --git a/internal/terraform/node_resource_apply_test.go b/internal/terraform/node_resource_apply_test.go deleted file mode 100644 index 4fd994954418..000000000000 --- a/internal/terraform/node_resource_apply_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/states" -) - -func TestNodeApplyableResourceExecute(t *testing.T) { - state := states.NewState() - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - InstanceExpanderExpander: instances.NewExpander(), - } - - t.Run("no config", func(t *testing.T) { - node := NodeApplyableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Config: nil, - }, - Addr: mustAbsResourceAddr("test_instance.foo"), - } - diags := node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - if !state.Empty() { - t.Fatalf("expected no state, got:\n %s", state.String()) - } - }) - - t.Run("simple", func(t *testing.T) { - - node := NodeApplyableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Config: &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "foo", - }, - ResolvedProvider: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - Addr: mustAbsResourceAddr("test_instance.foo"), - } - diags := node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - if state.Empty() { - t.Fatal("expected resources in state, got empty state") - } - r := state.Resource(mustAbsResourceAddr("test_instance.foo")) - if r == nil { - t.Fatal("test_instance.foo not found in state") - } - }) -} diff --git a/internal/terraform/node_resource_destroy.go b/internal/terraform/node_resource_destroy.go deleted file mode 100644 index a468ad020ba1..000000000000 --- a/internal/terraform/node_resource_destroy.go +++ /dev/null @@ -1,234 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/tfdiags" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states" -) - -// NodeDestroyResourceInstance represents a resource instance that is to be -// destroyed. -type NodeDestroyResourceInstance struct { - *NodeAbstractResourceInstance - - // If DeposedKey is set to anything other than states.NotDeposed then - // this node destroys a deposed object of the associated instance - // rather than its current object. - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeModuleInstance = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeExecutable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) -) - -func (n *NodeDestroyResourceInstance) Name() string { - if n.DeposedKey != states.NotDeposed { - return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) - } - return n.ResourceInstanceAddr().String() + " (destroy)" -} - -func (n *NodeDestroyResourceInstance) ProvidedBy() (addr addrs.ProviderConfig, exact bool) { - if n.Addr.Resource.Resource.Mode == addrs.DataResourceMode { - // indicate that this node does not require a configured provider - return nil, true - } - return n.NodeAbstractResourceInstance.ProvidedBy() -} - -// GraphNodeDestroyer -func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { - // State takes precedence during destroy. - // If the resource was removed, there is no config to check. - // If CBD was forced from descendent, it should be saved in the state - // already. - if s := n.instanceState; s != nil { - if s.Current != nil { - return s.Current.CreateBeforeDestroy - } - } - - if n.Config != nil && n.Config.Managed != nil { - return n.Config.Managed.CreateBeforeDestroy - } - - return false -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { - return nil -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() - destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) - - phaseType := addrs.ResourceInstancePhaseDestroy - if n.CreateBeforeDestroy() { - phaseType = addrs.ResourceInstancePhaseDestroyCBD - } - - for i, normalAddr := range normalAddrs { - switch ta := normalAddr.(type) { - case addrs.Resource: - destroyAddrs[i] = ta.Phase(phaseType) - case addrs.ResourceInstance: - destroyAddrs[i] = ta.Phase(phaseType) - default: - destroyAddrs[i] = normalAddr - } - } - - return destroyAddrs -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { - // If we have a config, then we need to include destroy-time dependencies - if c := n.Config; c != nil && c.Managed != nil { - var result []*addrs.Reference - - // We include conn info and config for destroy time provisioners - // as dependencies that we have. - for _, p := range c.Managed.Provisioners { - schema := n.ProvisionerSchemas[p.Type] - - if p.When == configs.ProvisionerWhenDestroy { - if p.Connection != nil { - result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) - } - result = append(result, ReferencesFromConfig(p.Config, schema)...) - } - } - - return result - } - - return nil -} - -// GraphNodeExecutable -func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - return n.managedResourceExecute(ctx) - case addrs.DataResourceMode: - return n.dataResourceExecute(ctx) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodeDestroyResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { - addr := n.ResourceInstanceAddr() - - // Get our state - is := n.instanceState - if is == nil { - log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) - } - - // These vars are updated through pointers at various stages below. - var changeApply *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - changeApply, err = n.readDiff(ctx, providerSchema) - diags = diags.Append(err) - if changeApply == nil || diags.HasErrors() { - return diags - } - - changeApply = reducePlan(addr.Resource, changeApply, true) - // reducePlan may have simplified our planned change - // into a NoOp if it does not require destroying. - if changeApply == nil || changeApply.Action == plans.NoOp { - return diags - } - - state, readDiags := n.readResourceInstanceState(ctx, addr) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return diags - } - - // Exit early if the state object is null after reading the state - if state == nil || state.Value.IsNull() { - return diags - } - - diags = diags.Append(n.preApplyHook(ctx, changeApply)) - if diags.HasErrors() { - return diags - } - - // Run destroy provisioners if not tainted - if state.Status != states.ObjectTainted { - applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy) - diags = diags.Append(applyProvisionersDiags) - // keep the diags separate from the main set until we handle the cleanup - - if diags.HasErrors() { - // If we have a provisioning error, then we just call - // the post-apply hook now. - diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) - return diags - } - } - - // Managed resources need to be destroyed, while data sources - // are only removed from state. - // we pass a nil configuration to apply because we are destroying - s, d := n.apply(ctx, state, changeApply, nil, instances.RepetitionData{}, false) - state, diags = s, diags.Append(d) - // we don't return immediately here on error, so that the state can be - // finalized - - err = n.writeResourceInstanceState(ctx, state, workingState) - if err != nil { - return diags.Append(err) - } - - // create the err value for postApplyHook - diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) - diags = diags.Append(updateStateHook(ctx)) - return diags -} - -func (n *NodeDestroyResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { - log.Printf("[TRACE] NodeDestroyResourceInstance: removing state object for %s", n.Addr) - ctx.State().SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) - return diags.Append(updateStateHook(ctx)) -} diff --git a/internal/terraform/node_resource_destroy_deposed.go b/internal/terraform/node_resource_destroy_deposed.go deleted file mode 100644 index 702e0596c124..000000000000 --- a/internal/terraform/node_resource_destroy_deposed.go +++ /dev/null @@ -1,334 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert -// an abstract resource instance to a concrete one of some type that has -// an associated deposed object key. -type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex - -type GraphNodeDeposedResourceInstanceObject interface { - DeposedInstanceObjectKey() states.DeposedKey -} - -// NodePlanDeposedResourceInstanceObject represents deposed resource -// instance objects during plan. These are distinct from the primary object -// for each resource instance since the only valid operation to do with them -// is to destroy them. -// -// This node type is also used during the refresh walk to ensure that the -// record of a deposed object is up-to-date before we plan to destroy it. -type NodePlanDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey - - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeConfigResource = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeExecutable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) -) - -func (n *NodePlanDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) -} - -func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeEvalable impl. -func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - log.Printf("[TRACE] NodePlanDeposedResourceInstanceObject: planning %s deposed object %s", n.Addr, n.DeposedKey) - - // Read the state for the deposed resource instance - state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - // Note any upgrades that readResourceInstanceState might've done in the - // prevRunState, so that it'll conform to current schema. - diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, prevRunState)) - if diags.HasErrors() { - return diags - } - // Also the refreshState, because that should still reflect schema upgrades - // even if not refreshing. - diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, refreshState)) - if diags.HasErrors() { - return diags - } - - // We don't refresh during the planDestroy walk, since that is only adding - // the destroy changes to the plan and the provider will not be configured - // at this point. The other nodes use separate types for plan and destroy, - // while deposed instances are always a destroy operation, so the logic - // here is a bit overloaded. - if !n.skipRefresh && op != walkPlanDestroy { - // Refresh this object even though it is going to be destroyed, in - // case it's already been deleted outside of Terraform. If this is a - // normal plan, providers expect a Read request to remove missing - // resources from the plan before apply, and may not handle a missing - // resource during Delete correctly. If this is a simple refresh, - // Terraform is expected to remove the missing resource from the state - // entirely - refreshedState, refreshDiags := n.refresh(ctx, n.DeposedKey, state) - diags = diags.Append(refreshDiags) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, refreshedState, refreshState)) - if diags.HasErrors() { - return diags - } - - // If we refreshed then our subsequent planning should be in terms of - // the new object, not the original object. - state = refreshedState - } - - if !n.skipPlanChanges { - var change *plans.ResourceInstanceChange - change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey) - diags = diags.Append(destroyPlanDiags) - if diags.HasErrors() { - return diags - } - - // NOTE: We don't check prevent_destroy for deposed objects, even - // though we would do so here for a "current" object, because - // if we've reached a point where an object is already deposed then - // we've already planned and partially-executed a create_before_destroy - // replace and we would've checked prevent_destroy at that point. We're - // now just need to get the deposed object destroyed, because there - // should be a new object already serving as its replacement. - - diags = diags.Append(n.writeChange(ctx, change, n.DeposedKey)) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, nil, workingState)) - } else { - // The working state should at least be updated with the result - // of upgrading and refreshing from above. - diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, workingState)) - } - - return diags -} - -// NodeDestroyDeposedResourceInstanceObject represents deposed resource -// instance objects during apply. Nodes of this type are inserted by -// DiffTransformer when the planned changeset contains "delete" changes for -// deposed instance objects, and its only supported operation is to destroy -// and then forget the associated object. -type NodeDestroyDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeConfigResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeExecutable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) -) - -func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) -} - -func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeDestroyer -func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { - // A deposed instance is always CreateBeforeDestroy by definition, since - // we use deposed only to handle create-before-destroy. - return true -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { - if !v { - // Should never happen: deposed instances are _always_ create_before_destroy. - return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") - } - return nil -} - -// GraphNodeExecutable impl. -func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - var change *plans.ResourceInstanceChange - - // Read the state for the deposed resource instance - state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey) - if err != nil { - return diags.Append(err) - } - - if state == nil { - diags = diags.Append(fmt.Errorf("missing deposed state for %s (%s)", n.Addr, n.DeposedKey)) - return diags - } - - change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey) - diags = diags.Append(destroyPlanDiags) - if diags.HasErrors() { - return diags - } - - // Call pre-apply hook - diags = diags.Append(n.preApplyHook(ctx, change)) - if diags.HasErrors() { - return diags - } - - // we pass a nil configuration to apply because we are destroying - state, applyDiags := n.apply(ctx, state, change, nil, instances.RepetitionData{}, false) - diags = diags.Append(applyDiags) - // don't return immediately on errors, we need to handle the state - - // Always write the resource back to the state deposed. If it - // was successfully destroyed it will be pruned. If it was not, it will - // be caught on the next run. - writeDiags := n.writeResourceInstanceState(ctx, state) - diags.Append(writeDiags) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) - - return diags.Append(updateStateHook(ctx)) -} - -// GraphNodeDeposer is an optional interface implemented by graph nodes that -// might create a single new deposed object for a specific associated resource -// instance, allowing a caller to optionally pre-allocate a DeposedKey for -// it. -type GraphNodeDeposer interface { - // SetPreallocatedDeposedKey will be called during graph construction - // if a particular node must use a pre-allocated deposed key if/when it - // "deposes" the current object of its associated resource instance. - SetPreallocatedDeposedKey(key states.DeposedKey) -} - -// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. -// Embed it in a node type to get automatic support for it, and then access -// the field PreallocatedDeposedKey to access any pre-allocated key. -type graphNodeDeposer struct { - PreallocatedDeposedKey states.DeposedKey -} - -func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { - n.PreallocatedDeposedKey = key -} - -func (n *NodeDestroyDeposedResourceInstanceObject) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject) error { - absAddr := n.Addr - key := n.DeposedKey - state := ctx.State() - - if key == states.NotDeposed { - // should never happen - return fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) - } - - if obj == nil { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceDeposed(absAddr, key, nil, n.ResolvedProvider) - log.Printf("[TRACE] writeResourceInstanceStateDeposed: removing state object for %s deposed %s", absAddr, key) - return nil - } - - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - if err != nil { - return err - } - if providerSchema == nil { - // Should never happen, unless our state object is nil - panic("writeResourceInstanceStateDeposed used with no ProviderSchema object") - } - - schema, currentVersion := providerSchema.SchemaForResourceAddr(absAddr.ContainingResource().Resource) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - log.Printf("[TRACE] writeResourceInstanceStateDeposed: writing state object for %s deposed %s", absAddr, key) - state.SetResourceInstanceDeposed(absAddr, key, src, n.ResolvedProvider) - return nil -} diff --git a/internal/terraform/node_resource_destroy_deposed_test.go b/internal/terraform/node_resource_destroy_deposed_test.go deleted file mode 100644 index f173002a28df..000000000000 --- a/internal/terraform/node_resource_destroy_deposed_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestNodePlanDeposedResourceInstanceObject_Execute(t *testing.T) { - deposedKey := states.NewDeposedKey() - state := states.NewState() - absResource := mustResourceInstanceAddr("test_instance.foo") - state.Module(addrs.RootModuleInstance).SetResourceInstanceDeposed( - absResource.Resource, - deposedKey, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - p := testProvider("test") - p.ConfigureProvider(providers.ConfigureProviderRequest{}) - p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ - UpgradedState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - }), - } - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - PrevRunStateState: state.DeepCopy().SyncWrapper(), - RefreshStateState: state.DeepCopy().SyncWrapper(), - ProviderProvider: p, - ProviderSchemaSchema: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }, - ChangesChanges: plans.NewChanges().SyncWrapper(), - } - - node := NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ - Addr: absResource, - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - }, - }, - DeposedKey: deposedKey, - } - err := node.Execute(ctx, walkPlan) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !p.UpgradeResourceStateCalled { - t.Errorf("UpgradeResourceState wasn't called; should've been called to upgrade the previous run's object") - } - if !p.ReadResourceCalled { - t.Errorf("ReadResource wasn't called; should've been called to refresh the deposed object") - } - - change := ctx.Changes().GetResourceInstanceChange(absResource, deposedKey) - if got, want := change.ChangeSrc.Action, plans.Delete; got != want { - t.Fatalf("wrong planned action\ngot: %s\nwant: %s", got, want) - } -} - -func TestNodeDestroyDeposedResourceInstanceObject_Execute(t *testing.T) { - deposedKey := states.NewDeposedKey() - state := states.NewState() - absResource := mustResourceInstanceAddr("test_instance.foo") - state.Module(addrs.RootModuleInstance).SetResourceInstanceDeposed( - absResource.Resource, - deposedKey, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(`{"id":"bar"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - schema := &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - } - - p := testProvider("test") - p.ConfigureProvider(providers.ConfigureProviderRequest{}) - p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) - - p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ - UpgradedState: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - }), - } - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - ProviderProvider: p, - ProviderSchemaSchema: schema, - ChangesChanges: plans.NewChanges().SyncWrapper(), - } - - node := NodeDestroyDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ - Addr: absResource, - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - }, - }, - DeposedKey: deposedKey, - } - err := node.Execute(ctx, walkApply) - - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !state.Empty() { - t.Fatalf("resources left in state after destroy") - } -} - -func TestNodeDestroyDeposedResourceInstanceObject_WriteResourceInstanceState(t *testing.T) { - state := states.NewState() - ctx := new(MockEvalContext) - ctx.StateState = state.SyncWrapper() - ctx.PathPath = addrs.RootModuleInstance - mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - }, - }) - ctx.ProviderProvider = mockProvider - ctx.ProviderSchemaSchema = mockProvider.ProviderSchema() - - obj := &states.ResourceInstanceObject{ - Value: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-abc123"), - }), - Status: states.ObjectReady, - } - node := &NodeDestroyDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - Addr: mustResourceInstanceAddr("aws_instance.foo"), - }, - DeposedKey: states.NewDeposedKey(), - } - err := node.writeResourceInstanceState(ctx, obj) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) - } - - checkStateString(t, state, ` -aws_instance.foo: (1 deposed) - ID = - provider = provider["registry.terraform.io/hashicorp/aws"] - Deposed ID 1 = i-abc123 - `) -} - -func TestNodeDestroyDeposedResourceInstanceObject_ExecuteMissingState(t *testing.T) { - p := simpleMockProvider() - ctx := &MockEvalContext{ - StateState: states.NewState().SyncWrapper(), - ProviderProvider: simpleMockProvider(), - ProviderSchemaSchema: p.ProviderSchema(), - ChangesChanges: plans.NewChanges().SyncWrapper(), - } - - node := NodeDestroyDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ - Addr: mustResourceInstanceAddr("test_object.foo"), - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - }, - }, - DeposedKey: states.NewDeposedKey(), - } - err := node.Execute(ctx, walkApply) - - if err == nil { - t.Fatal("expected error") - } -} diff --git a/internal/terraform/node_resource_import.go b/internal/terraform/node_resource_import.go deleted file mode 100644 index 93b14fdf18a6..000000000000 --- a/internal/terraform/node_resource_import.go +++ /dev/null @@ -1,251 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -type graphNodeImportState struct { - Addr addrs.AbsResourceInstance // Addr is the resource address to import into - ID string // ID is the ID to import as - ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type - ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution - - states []providers.ImportedResource -} - -var ( - _ GraphNodeModulePath = (*graphNodeImportState)(nil) - _ GraphNodeExecutable = (*graphNodeImportState)(nil) - _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) - _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) -) - -func (n *graphNodeImportState) Name() string { - return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) ProvidedBy() (addrs.ProviderConfig, bool) { - // We assume that n.ProviderAddr has been properly populated here. - // It's the responsibility of the code creating a graphNodeImportState - // to populate this, possibly by calling DefaultProviderConfig() on the - // resource address to infer an implied provider from the resource type - // name. - return n.ProviderAddr, false -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) Provider() addrs.Provider { - // We assume that n.ProviderAddr has been properly populated here. - // It's the responsibility of the code creating a graphNodeImportState - // to populate this, possibly by calling DefaultProviderConfig() on the - // resource address to infer an implied provider from the resource type - // name. - return n.ProviderAddr.Provider -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { - n.ResolvedProvider = addr -} - -// GraphNodeModuleInstance -func (n *graphNodeImportState) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeModulePath -func (n *graphNodeImportState) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -// GraphNodeExecutable impl. -func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - // Reset our states - n.states = nil - - provider, _, err := getProvider(ctx, n.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - // import state - absAddr := n.Addr.Resource.Absolute(ctx.Path()) - - // Call pre-import hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreImportState(absAddr, n.ID) - })) - if diags.HasErrors() { - return diags - } - - resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: n.Addr.Resource.Resource.Type, - ID: n.ID, - }) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return diags - } - - imported := resp.ImportedResources - for _, obj := range imported { - log.Printf("[TRACE] graphNodeImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) - } - n.states = imported - - // Call post-import hook - diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostImportState(absAddr, imported) - })) - return diags -} - -// GraphNodeDynamicExpandable impl. -// -// We use DynamicExpand as a way to generate the subgraph of refreshes -// and state inserts we need to do for our import state. Since they're new -// resources they don't depend on anything else and refreshes are isolated -// so this is nearly a perfect use case for dynamic expand. -func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - g := &Graph{Path: ctx.Path()} - - // nameCounter is used to de-dup names in the state. - nameCounter := make(map[string]int) - - // Compile the list of addresses that we'll be inserting into the state. - // We do this ahead of time so we can verify that we aren't importing - // something that already exists. - addrs := make([]addrs.AbsResourceInstance, len(n.states)) - for i, state := range n.states { - addr := n.Addr - if t := state.TypeName; t != "" { - addr.Resource.Resource.Type = t - } - - // Determine if we need to suffix the name to de-dup - key := addr.String() - count, ok := nameCounter[key] - if ok { - count++ - addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) - } - nameCounter[key] = count - - // Add it to our list - addrs[i] = addr - } - - // Verify that all the addresses are clear - state := ctx.State() - for _, addr := range addrs { - existing := state.ResourceInstance(addr) - if existing != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource already managed by Terraform", - fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), - )) - continue - } - } - if diags.HasErrors() { - // Bail out early, then. - return nil, diags.Err() - } - - // For each of the states, we add a node to handle the refresh/add to state. - // "n.states" is populated by our own Execute with the result of - // ImportState. Since DynamicExpand is always called after Execute, this is - // safe. - for i, state := range n.states { - g.Add(&graphNodeImportStateSub{ - TargetAddr: addrs[i], - State: state, - ResolvedProvider: n.ResolvedProvider, - }) - } - - addRootNodeToGraph(g) - - // Done! - return g, diags.Err() -} - -// graphNodeImportStateSub is the sub-node of graphNodeImportState -// and is part of the subgraph. This node is responsible for refreshing -// and adding a resource to the state once it is imported. -type graphNodeImportStateSub struct { - TargetAddr addrs.AbsResourceInstance - State providers.ImportedResource - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeModuleInstance = (*graphNodeImportStateSub)(nil) - _ GraphNodeExecutable = (*graphNodeImportStateSub)(nil) -) - -func (n *graphNodeImportStateSub) Name() string { - return fmt.Sprintf("import %s result", n.TargetAddr) -} - -func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { - return n.TargetAddr.Module -} - -// GraphNodeExecutable impl. -func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - // If the Ephemeral type isn't set, then it is an error - if n.State.TypeName == "" { - diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.TargetAddr.String())) - return diags - } - - state := n.State.AsInstanceObject() - - // Refresh - riNode := &NodeAbstractResourceInstance{ - Addr: n.TargetAddr, - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: n.ResolvedProvider, - }, - } - state, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state) - diags = diags.Append(refreshDiags) - if diags.HasErrors() { - return diags - } - - // Verify the existance of the imported resource - if state.Value.IsNull() { - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot import non-existent remote object", - fmt.Sprintf( - "While attempting to import an existing object to %q, "+ - "the provider detected that no object exists with the given id. "+ - "Only pre-existing objects can be imported; check that the id "+ - "is correct and that it is associated with the provider's "+ - "configured region or endpoint, or use \"terraform apply\" to "+ - "create a new remote object for this resource.", - n.TargetAddr, - ), - )) - return diags - } - - diags = diags.Append(riNode.writeResourceInstanceState(ctx, state, workingState)) - return diags -} diff --git a/internal/terraform/node_resource_plan.go b/internal/terraform/node_resource_plan.go deleted file mode 100644 index 3d9d201f349c..000000000000 --- a/internal/terraform/node_resource_plan.go +++ /dev/null @@ -1,402 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// nodeExpandPlannableResource represents an addrs.ConfigResource and implements -// DynamicExpand to a subgraph containing all of the addrs.AbsResourceInstance -// resulting from both the containing module and resource-specific expansion. -type nodeExpandPlannableResource struct { - *NodeAbstractResource - - // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD - // during graph construction, if dependencies require us to force this - // on regardless of what the configuration says. - ForceCreateBeforeDestroy *bool - - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - preDestroyRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool - - // forceReplace are resource instance addresses where the user wants to - // force generating a replace action. This set isn't pre-filtered, so - // it might contain addresses that have nothing to do with the resource - // that this node represents, which the node itself must therefore ignore. - forceReplace []addrs.AbsResourceInstance - - // We attach dependencies to the Resource during refresh, since the - // instances are instantiated during DynamicExpand. - // FIXME: These would be better off converted to a generic Set data - // structure in the future, as we need to compare for equality and take the - // union of multiple groups of dependencies. - dependencies []addrs.ConfigResource -} - -var ( - _ GraphNodeDestroyerCBD = (*nodeExpandPlannableResource)(nil) - _ GraphNodeDynamicExpandable = (*nodeExpandPlannableResource)(nil) - _ GraphNodeReferenceable = (*nodeExpandPlannableResource)(nil) - _ GraphNodeReferencer = (*nodeExpandPlannableResource)(nil) - _ GraphNodeConfigResource = (*nodeExpandPlannableResource)(nil) - _ GraphNodeAttachResourceConfig = (*nodeExpandPlannableResource)(nil) - _ GraphNodeAttachDependencies = (*nodeExpandPlannableResource)(nil) - _ GraphNodeTargetable = (*nodeExpandPlannableResource)(nil) - _ graphNodeExpandsInstances = (*nodeExpandPlannableResource)(nil) -) - -func (n *nodeExpandPlannableResource) Name() string { - return n.NodeAbstractResource.Name() + " (expand)" -} - -func (n *nodeExpandPlannableResource) expandsInstances() { -} - -// GraphNodeAttachDependencies -func (n *nodeExpandPlannableResource) AttachDependencies(deps []addrs.ConfigResource) { - n.dependencies = deps -} - -// GraphNodeDestroyerCBD -func (n *nodeExpandPlannableResource) CreateBeforeDestroy() bool { - if n.ForceCreateBeforeDestroy != nil { - return *n.ForceCreateBeforeDestroy - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *nodeExpandPlannableResource) ModifyCreateBeforeDestroy(v bool) error { - n.ForceCreateBeforeDestroy = &v - return nil -} - -func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - - expander := ctx.InstanceExpander() - moduleInstances := expander.ExpandModule(n.Addr.Module) - - // Lock the state while we inspect it - state := ctx.State().Lock() - - var orphans []*states.Resource - for _, res := range state.Resources(n.Addr) { - found := false - for _, m := range moduleInstances { - if m.Equal(res.Addr.Module) { - found = true - break - } - } - // The module instance of the resource in the state doesn't exist - // in the current config, so this whole resource is orphaned. - if !found { - orphans = append(orphans, res) - } - } - - // We'll no longer use the state directly here, and the other functions - // we'll call below may use it so we'll release the lock. - state = nil - ctx.State().Unlock() - - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) *NodePlannableResourceInstanceOrphan { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - a.ProviderMetas = n.ProviderMetas - a.Dependencies = n.dependencies - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, - } - } - - for _, res := range orphans { - for key := range res.Instances { - addr := res.Addr.Instance(key) - abs := NewNodeAbstractResourceInstance(addr) - abs.AttachResourceState(res) - n := concreteResourceOrphan(abs) - g.Add(n) - } - } - - // The above dealt with the expansion of the containing module, so now - // we need to deal with the expansion of the resource itself across all - // instances of the module. - // - // We'll gather up all of the leaf instances we learn about along the way - // so that we can inform the checks subsystem of which instances it should - // be expecting check results for, below. - var diags tfdiags.Diagnostics - instAddrs := addrs.MakeSet[addrs.Checkable]() - for _, module := range moduleInstances { - resAddr := n.Addr.Resource.Absolute(module) - err := n.expandResourceInstances(ctx, resAddr, &g, instAddrs) - diags = diags.Append(err) - } - if diags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - // If this is a resource that participates in custom condition checks - // (i.e. it has preconditions or postconditions) then the check state - // wants to know the addresses of the checkable objects so that it can - // treat them as unknown status if we encounter an error before actually - // visiting the checks. - if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) { - checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, instAddrs) - } - - addRootNodeToGraph(&g) - - return &g, diags.ErrWithWarnings() -} - -// expandResourceInstances calculates the dynamic expansion for the resource -// itself in the context of a particular module instance. -// -// It has several side-effects: -// - Adds a node to Graph g for each leaf resource instance it discovers, whether present or orphaned. -// - Registers the expansion of the resource in the "expander" object embedded inside EvalContext ctx. -// - Adds each present (non-orphaned) resource instance address to instAddrs (guaranteed to always be addrs.AbsResourceInstance, despite being declared as addrs.Checkable). -// -// After calling this for each of the module instances the resource appears -// within, the caller must register the final superset instAddrs with the -// checks subsystem so that it knows the fully expanded set of checkable -// object instances for this resource instance. -func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, g *Graph, instAddrs addrs.Set[addrs.Checkable]) error { - var diags tfdiags.Diagnostics - - if n.Config == nil { - // Nothing to do, then. - log.Printf("[TRACE] nodeExpandPlannableResource: no configuration present for %s", n.Name()) - return diags.ErrWithWarnings() - } - - // The rest of our work here needs to know which module instance it's - // working in, so that it can evaluate expressions in the appropriate scope. - moduleCtx := globalCtx.WithPath(resAddr.Module) - - // writeResourceState is responsible for informing the expander of what - // repetition mode this resource has, which allows expander.ExpandResource - // to work below. - moreDiags := n.writeResourceState(moduleCtx, resAddr) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return diags.ErrWithWarnings() - } - - // Before we expand our resource into potentially many resource instances, - // we'll verify that any mention of this resource in n.forceReplace is - // consistent with the repetition mode of the resource. In other words, - // we're aiming to catch a situation where naming a particular resource - // instance would require an instance key but the given address has none. - expander := moduleCtx.InstanceExpander() - instanceAddrs := expander.ExpandResource(resAddr) - - // If there's a number of instances other than 1 then we definitely need - // an index. - mustHaveIndex := len(instanceAddrs) != 1 - // If there's only one instance then we might still need an index, if the - // instance address has one. - if len(instanceAddrs) == 1 && instanceAddrs[0].Resource.Key != addrs.NoKey { - mustHaveIndex = true - } - if mustHaveIndex { - for _, candidateAddr := range n.forceReplace { - if candidateAddr.Resource.Key == addrs.NoKey { - if n.Addr.Resource.Equal(candidateAddr.Resource.Resource) { - switch { - case len(instanceAddrs) == 0: - // In this case there _are_ no instances to replace, so - // there isn't any alternative address for us to suggest. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Incompletely-matched force-replace resource instance", - fmt.Sprintf( - "Your force-replace request for %s doesn't match any resource instances because this resource doesn't have any instances.", - candidateAddr, - ), - )) - case len(instanceAddrs) == 1: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Incompletely-matched force-replace resource instance", - fmt.Sprintf( - "Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of the single declared instance, use the following option instead:\n -replace=%q", - candidateAddr, instanceAddrs[0], - ), - )) - default: - var possibleValidOptions strings.Builder - for _, addr := range instanceAddrs { - fmt.Fprintf(&possibleValidOptions, "\n -replace=%q", addr) - } - - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Incompletely-matched force-replace resource instance", - fmt.Sprintf( - "Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of particular instances, use one or more of the following options instead:%s", - candidateAddr, possibleValidOptions.String(), - ), - )) - } - } - } - } - } - // NOTE: The actual interpretation of n.forceReplace to produce replace - // actions is in the per-instance function we're about to call, because - // we need to evaluate it on a per-instance basis. - - for _, addr := range instanceAddrs { - // If this resource is participating in the "checks" mechanism then our - // caller will need to know all of our expanded instance addresses as - // checkable object instances. - // (NOTE: instAddrs probably already has other instance addresses in it - // from earlier calls to this function with different resource addresses, - // because its purpose is to aggregate them all together into a single set.) - instAddrs.Add(addr) - } - - // Our graph builder mechanism expects to always be constructing new - // graphs rather than adding to existing ones, so we'll first - // construct a subgraph just for this individual modules's instances and - // then we'll steal all of its nodes and edges to incorporate into our - // main graph which contains all of the resource instances together. - instG, err := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs) - if err != nil { - diags = diags.Append(err) - return diags.ErrWithWarnings() - } - g.Subsume(&instG.AcyclicGraph.Graph) - - return diags.ErrWithWarnings() -} - -func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (*Graph, error) { - var diags tfdiags.Diagnostics - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // check if this node is being imported first - for _, importTarget := range n.importTargets { - if importTarget.Addr.Equal(a.Addr) { - return &graphNodeImportState{ - Addr: importTarget.Addr, - ID: importTarget.ID, - ResolvedProvider: n.ResolvedProvider, - } - } - } - - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - a.ProviderMetas = n.ProviderMetas - a.dependsOn = n.dependsOn - a.Dependencies = n.dependencies - a.preDestroyRefresh = n.preDestroyRefresh - - return &NodePlannableResourceInstance{ - NodeAbstractResourceInstance: a, - - // By the time we're walking, we've figured out whether we need - // to force on CreateBeforeDestroy due to dependencies on other - // nodes that have it. - ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, - forceReplace: n.forceReplace, - } - } - - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - a.ProviderMetas = n.ProviderMetas - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - skipRefresh: n.skipRefresh, - skipPlanChanges: n.skipPlanChanges, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count or for_each (if present) - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Addr: n.ResourceAddr(), - InstanceAddrs: instanceAddrs, - }, - - // Add the count/for_each orphans - &OrphanResourceInstanceCountTransformer{ - Concrete: concreteResourceOrphan, - Addr: addr, - InstanceAddrs: instanceAddrs, - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Name: "nodeExpandPlannableResource", - } - graph, diags := b.Build(addr.Module) - return graph, diags.ErrWithWarnings() -} diff --git a/internal/terraform/node_resource_plan_destroy.go b/internal/terraform/node_resource_plan_destroy.go deleted file mode 100644 index dd8216445d32..000000000000 --- a/internal/terraform/node_resource_plan_destroy.go +++ /dev/null @@ -1,122 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// NodePlanDestroyableResourceInstance represents a resource that is ready -// to be planned for destruction. -type NodePlanDestroyableResourceInstance struct { - *NodeAbstractResourceInstance - - // skipRefresh indicates that we should skip refreshing - skipRefresh bool -} - -var ( - _ GraphNodeModuleInstance = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeExecutable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - addr := n.ResourceInstanceAddr() - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - return n.managedResourceExecute(ctx, op) - case addrs.DataResourceMode: - return n.dataResourceExecute(ctx, op) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. These are written to by address in the EvalNodes we - // declare below. - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - state, err := n.readResourceInstanceState(ctx, addr) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - // If we are in the "skip refresh" mode then we will have skipped over our - // usual opportunity to update the previous run state and refresh state - // with the result of any provider schema upgrades, so we'll compensate - // by doing that here. - // - // NOTE: this is coupled with logic in Context.destroyPlan which skips - // running a normal plan walk when refresh is enabled. These two - // conditionals must agree (be exactly opposite) in order to get the - // correct behavior in both cases. - if n.skipRefresh { - diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState)) - if diags.HasErrors() { - return diags - } - diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState)) - if diags.HasErrors() { - return diags - } - } - - change, destroyPlanDiags := n.planDestroy(ctx, state, "") - diags = diags.Append(destroyPlanDiags) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.checkPreventDestroy(change)) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.writeChange(ctx, change, "")) - return diags -} - -func (n *NodePlanDestroyableResourceInstance) dataResourceExecute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - - // We may not be able to read a prior data source from the state if the - // schema was upgraded and we are destroying before ever refreshing that - // data source. Regardless, a data source "destroy" is simply writing a - // null state, which we can do with a null prior state too. - change := &plans.ResourceInstanceChange{ - Addr: n.ResourceInstanceAddr(), - PrevRunAddr: n.prevRunAddr(ctx), - Change: plans.Change{ - Action: plans.Delete, - Before: cty.NullVal(cty.DynamicPseudoType), - After: cty.NullVal(cty.DynamicPseudoType), - }, - ProviderAddr: n.ResolvedProvider, - } - return diags.Append(n.writeChange(ctx, change, "")) -} diff --git a/internal/terraform/node_resource_plan_instance.go b/internal/terraform/node_resource_plan_instance.go deleted file mode 100644 index aac13bedc82d..000000000000 --- a/internal/terraform/node_resource_plan_instance.go +++ /dev/null @@ -1,422 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" -) - -// NodePlannableResourceInstance represents a _single_ resource -// instance that is plannable. This means this represents a single -// count index, for example. -type NodePlannableResourceInstance struct { - *NodeAbstractResourceInstance - ForceCreateBeforeDestroy bool - - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool - - // forceReplace are resource instance addresses where the user wants to - // force generating a replace action. This set isn't pre-filtered, so - // it might contain addresses that have nothing to do with the resource - // that this node represents, which the node itself must therefore ignore. - forceReplace []addrs.AbsResourceInstance - - // replaceTriggeredBy stores references from replace_triggered_by which - // triggered this instance to be replaced. - replaceTriggeredBy []*addrs.Reference -} - -var ( - _ GraphNodeModuleInstance = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodePlannableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) - _ GraphNodeExecutable = (*NodePlannableResourceInstance)(nil) -) - -// GraphNodeEvalable -func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - return n.managedResourceExecute(ctx) - case addrs.DataResourceMode: - return n.dataResourceExecute(ctx) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { - config := n.Config - addr := n.ResourceInstanceAddr() - - var change *plans.ResourceInstanceChange - - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema)) - if diags.HasErrors() { - return diags - } - - checkRuleSeverity := tfdiags.Error - if n.skipPlanChanges || n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } - - change, state, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity, n.skipPlanChanges) - diags = diags.Append(planDiags) - if diags.HasErrors() { - return diags - } - - // write the data source into both the refresh state and the - // working state - diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState)) - if diags.HasErrors() { - return diags - } - diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState)) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.writeChange(ctx, change, "")) - - // Post-conditions might block further progress. We intentionally do this - // _after_ writing the state/diff because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - n.Config.Postconditions, - ctx, addr, repeatData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - - return diags -} - -func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { - config := n.Config - addr := n.ResourceInstanceAddr() - - var change *plans.ResourceInstanceChange - var instanceRefreshState *states.ResourceInstanceObject - - checkRuleSeverity := tfdiags.Error - if n.skipPlanChanges || n.preDestroyRefresh { - checkRuleSeverity = tfdiags.Warning - } - - _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema)) - if diags.HasErrors() { - return diags - } - - instanceRefreshState, readDiags := n.readResourceInstanceState(ctx, addr) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return diags - } - - // We'll save a snapshot of what we just read from the state into the - // prevRunState before we do anything else, since this will capture the - // result of any schema upgrading that readResourceInstanceState just did, - // but not include any out-of-band changes we might detect in in the - // refresh step below. - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState)) - if diags.HasErrors() { - return diags - } - // Also the refreshState, because that should still reflect schema upgrades - // even if it doesn't reflect upstream changes. - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - if diags.HasErrors() { - return diags - } - - // In 0.13 we could be refreshing a resource with no config. - // We should be operating on managed resource, but check here to be certain - if n.Config == nil || n.Config.Managed == nil { - log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr) - } else { - if instanceRefreshState != nil { - instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy - } - } - - // Refresh, maybe - if !n.skipRefresh { - s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState) - diags = diags.Append(refreshDiags) - if diags.HasErrors() { - return diags - } - - instanceRefreshState = s - - if instanceRefreshState != nil { - // When refreshing we start by merging the stored dependencies and - // the configured dependencies. The configured dependencies will be - // stored to state once the changes are applied. If the plan - // results in no changes, we will re-write these dependencies - // below. - instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies) - } - - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - if diags.HasErrors() { - return diags - } - } - - // Plan the instance, unless we're in the refresh-only mode - if !n.skipPlanChanges { - - // add this instance to n.forceReplace if replacement is triggered by - // another change - repData := instances.RepetitionData{} - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - repData.CountIndex = k.Value() - case addrs.StringKey: - repData.EachKey = k.Value() - repData.EachValue = cty.DynamicVal - } - - diags = diags.Append(n.replaceTriggered(ctx, repData)) - if diags.HasErrors() { - return diags - } - - change, instancePlanState, repeatData, planDiags := n.plan( - ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, - ) - diags = diags.Append(planDiags) - if diags.HasErrors() { - return diags - } - - // FIXME: here we udpate the change to reflect the reason for - // replacement, but we still overload forceReplace to get the correct - // change planned. - if len(n.replaceTriggeredBy) > 0 { - change.ActionReason = plans.ResourceInstanceReplaceByTriggers - } - - diags = diags.Append(n.checkPreventDestroy(change)) - if diags.HasErrors() { - return diags - } - - // FIXME: it is currently important that we write resource changes to - // the plan (n.writeChange) before we write the corresponding state - // (n.writeResourceInstanceState). - // - // This is because the planned resource state will normally have the - // status of states.ObjectPlanned, which causes later logic to refer to - // the contents of the plan to retrieve the resource data. Because - // there is no shared lock between these two data structures, reversing - // the order of these writes will cause a brief window of inconsistency - // which can lead to a failed safety check. - // - // Future work should adjust these APIs such that it is impossible to - // update these two data structures incorrectly through any objects - // reachable via the terraform.EvalContext API. - diags = diags.Append(n.writeChange(ctx, change, "")) - - diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) - if diags.HasErrors() { - return diags - } - - // If this plan resulted in a NoOp, then apply won't have a chance to make - // any changes to the stored dependencies. Since this is a NoOp we know - // that the stored dependencies will have no effect during apply, and we can - // write them out now. - if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) { - // the refresh state will be the final state for this resource, so - // finalize the dependencies here if they need to be updated. - instanceRefreshState.Dependencies = n.Dependencies - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) - if diags.HasErrors() { - return diags - } - } - - // Post-conditions might block completion. We intentionally do this - // _after_ writing the state/diff because we want to check against - // the result of the operation, and to fail on future operations - // until the user makes the condition succeed. - // (Note that some preconditions will end up being skipped during - // planning, because their conditions depend on values not yet known.) - checkDiags := evalCheckRules( - addrs.ResourcePostcondition, - n.Config.Postconditions, - ctx, n.ResourceInstanceAddr(), repeatData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - } else { - // In refresh-only mode we need to evaluate the for-each expression in - // order to supply the value to the pre- and post-condition check - // blocks. This has the unfortunate edge case of a refresh-only plan - // executing with a for-each map which has the same keys but different - // values, which could result in a post-condition check relying on that - // value being inaccurate. Unless we decide to store the value of the - // for-each expression in state, this is unavoidable. - forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) - repeatData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) - - checkDiags := evalCheckRules( - addrs.ResourcePrecondition, - n.Config.Preconditions, - ctx, addr, repeatData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - - // Even if we don't plan changes, we do still need to at least update - // the working state to reflect the refresh result. If not, then e.g. - // any output values refering to this will not react to the drift. - // (Even if we didn't actually refresh above, this will still save - // the result of any schema upgrading we did in readResourceInstanceState.) - diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState)) - if diags.HasErrors() { - return diags - } - - // Here we also evaluate post-conditions after updating the working - // state, because we want to check against the result of the refresh. - // Unlike in normal planning mode, these checks are still evaluated - // even if pre-conditions generated diagnostics, because we have no - // planned changes to block. - checkDiags = evalCheckRules( - addrs.ResourcePostcondition, - n.Config.Postconditions, - ctx, addr, repeatData, - checkRuleSeverity, - ) - diags = diags.Append(checkDiags) - } - - return diags -} - -// replaceTriggered checks if this instance needs to be replace due to a change -// in a replace_triggered_by reference. If replacement is required, the -// instance address is added to forceReplace -func (n *NodePlannableResourceInstance) replaceTriggered(ctx EvalContext, repData instances.RepetitionData) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - for _, expr := range n.Config.TriggersReplacement { - ref, replace, evalDiags := ctx.EvaluateReplaceTriggeredBy(expr, repData) - diags = diags.Append(evalDiags) - if diags.HasErrors() { - continue - } - - if replace { - // FIXME: forceReplace accomplishes the same goal, however we may - // want to communicate more information about which resource - // triggered the replacement in the plan. - // Rather than further complicating the plan method with more - // options, we can refactor both of these features later. - n.forceReplace = append(n.forceReplace, n.Addr) - log.Printf("[DEBUG] ReplaceTriggeredBy forcing replacement of %s due to change in %s", n.Addr, ref.DisplayString()) - - n.replaceTriggeredBy = append(n.replaceTriggeredBy, ref) - break - } - } - - return diags -} - -// mergeDeps returns the union of 2 sets of dependencies -func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource { - switch { - case len(a) == 0: - return b - case len(b) == 0: - return a - } - - set := make(map[string]addrs.ConfigResource) - - for _, dep := range a { - set[dep.String()] = dep - } - - for _, dep := range b { - set[dep.String()] = dep - } - - newDeps := make([]addrs.ConfigResource, 0, len(set)) - for _, dep := range set { - newDeps = append(newDeps, dep) - } - - return newDeps -} - -func depsEqual(a, b []addrs.ConfigResource) bool { - if len(a) != len(b) { - return false - } - - // Because we need to sort the deps to compare equality, make shallow - // copies to prevent concurrently modifying the array values on - // dependencies shared between expanded instances. - copyA, copyB := make([]addrs.ConfigResource, len(a)), make([]addrs.ConfigResource, len(b)) - copy(copyA, a) - copy(copyB, b) - a, b = copyA, copyB - - less := func(s []addrs.ConfigResource) func(i, j int) bool { - return func(i, j int) bool { - return s[i].String() < s[j].String() - } - } - - sort.Slice(a, less(a)) - sort.Slice(b, less(b)) - - for i := range a { - if !a[i].Equal(b[i]) { - return false - } - } - return true -} diff --git a/internal/terraform/node_resource_plan_orphan.go b/internal/terraform/node_resource_plan_orphan.go deleted file mode 100644 index c48e55b74fb2..000000000000 --- a/internal/terraform/node_resource_plan_orphan.go +++ /dev/null @@ -1,285 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlannableResourceInstanceOrphan struct { - *NodeAbstractResourceInstance - - // skipRefresh indicates that we should skip refreshing individual instances - skipRefresh bool - - // skipPlanChanges indicates we should skip trying to plan change actions - // for any instances. - skipPlanChanges bool -} - -var ( - _ GraphNodeModuleInstance = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeConfigResource = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeExecutable = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeProviderConsumer = (*NodePlannableResourceInstanceOrphan)(nil) -) - -func (n *NodePlannableResourceInstanceOrphan) Name() string { - return n.ResourceInstanceAddr().String() + " (orphan)" -} - -// GraphNodeExecutable -func (n *NodePlannableResourceInstanceOrphan) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - return n.managedResourceExecute(ctx) - case addrs.DataResourceMode: - return n.dataResourceExecute(ctx) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodePlannableResourceInstanceOrphan) ProvidedBy() (addr addrs.ProviderConfig, exact bool) { - if n.Addr.Resource.Resource.Mode == addrs.DataResourceMode { - // indicate that this node does not require a configured provider - return nil, true - } - return n.NodeAbstractResourceInstance.ProvidedBy() -} - -func (n *NodePlannableResourceInstanceOrphan) dataResourceExecute(ctx EvalContext) tfdiags.Diagnostics { - // A data source that is no longer in the config is removed from the state - log.Printf("[TRACE] NodePlannableResourceInstanceOrphan: removing state object for %s", n.Addr) - - // we need to update both the refresh state to refresh the current data - // source, and the working state for plan-time evaluations. - refreshState := ctx.RefreshState() - refreshState.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) - - workingState := ctx.State() - workingState.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) - return nil -} - -func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { - addr := n.ResourceInstanceAddr() - - oldState, readDiags := n.readResourceInstanceState(ctx, addr) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return diags - } - - // Note any upgrades that readResourceInstanceState might've done in the - // prevRunState, so that it'll conform to current schema. - diags = diags.Append(n.writeResourceInstanceState(ctx, oldState, prevRunState)) - if diags.HasErrors() { - return diags - } - // Also the refreshState, because that should still reflect schema upgrades - // even if not refreshing. - diags = diags.Append(n.writeResourceInstanceState(ctx, oldState, refreshState)) - if diags.HasErrors() { - return diags - } - - if !n.skipRefresh { - // Refresh this instance even though it is going to be destroyed, in - // order to catch missing resources. If this is a normal plan, - // providers expect a Read request to remove missing resources from the - // plan before apply, and may not handle a missing resource during - // Delete correctly. If this is a simple refresh, Terraform is - // expected to remove the missing resource from the state entirely - refreshedState, refreshDiags := n.refresh(ctx, states.NotDeposed, oldState) - diags = diags.Append(refreshDiags) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.writeResourceInstanceState(ctx, refreshedState, refreshState)) - if diags.HasErrors() { - return diags - } - - // If we refreshed then our subsequent planning should be in terms of - // the new object, not the original object. - oldState = refreshedState - } - - // If we're skipping planning, all we need to do is write the state. If the - // refresh indicates the instance no longer exists, there is also nothing - // to plan because there is no longer any state and it doesn't exist in the - // config. - if n.skipPlanChanges || oldState == nil || oldState.Value.IsNull() { - return diags.Append(n.writeResourceInstanceState(ctx, oldState, workingState)) - } - - var change *plans.ResourceInstanceChange - change, destroyPlanDiags := n.planDestroy(ctx, oldState, "") - diags = diags.Append(destroyPlanDiags) - if diags.HasErrors() { - return diags - } - - diags = diags.Append(n.checkPreventDestroy(change)) - if diags.HasErrors() { - return diags - } - - // We might be able to offer an approximate reason for why we are - // planning to delete this object. (This is best-effort; we might - // sometimes not have a reason.) - change.ActionReason = n.deleteActionReason(ctx) - - diags = diags.Append(n.writeChange(ctx, change, "")) - if diags.HasErrors() { - return diags - } - - return diags.Append(n.writeResourceInstanceState(ctx, nil, workingState)) -} - -func (n *NodePlannableResourceInstanceOrphan) deleteActionReason(ctx EvalContext) plans.ResourceInstanceChangeActionReason { - cfg := n.Config - if cfg == nil { - if !n.Addr.Equal(n.prevRunAddr(ctx)) { - // This means the resource was moved - see also - // ResourceInstanceChange.Moved() which calculates - // this the same way. - return plans.ResourceInstanceDeleteBecauseNoMoveTarget - } - - return plans.ResourceInstanceDeleteBecauseNoResourceConfig - } - - // If this is a resource instance inside a module instance that's no - // longer declared then we will have a config (because config isn't - // instance-specific) but the expander will know that our resource - // address's module path refers to an undeclared module instance. - if expander := ctx.InstanceExpander(); expander != nil { // (sometimes nil in MockEvalContext in tests) - validModuleAddr := expander.GetDeepestExistingModuleInstance(n.Addr.Module) - if len(validModuleAddr) != len(n.Addr.Module) { - // If we get here then at least one step in the resource's module - // path is to a module instance that doesn't exist at all, and - // so a missing module instance is the delete reason regardless - // of whether there might _also_ be a change to the resource - // configuration inside the module. (Conceptually the configurations - // inside the non-existing module instance don't exist at all, - // but they end up existing just as an artifact of the - // implementation detail that we detect module instance orphans - // only dynamically.) - return plans.ResourceInstanceDeleteBecauseNoModule - } - } - - switch n.Addr.Resource.Key.(type) { - case nil: // no instance key at all - if cfg.Count != nil || cfg.ForEach != nil { - return plans.ResourceInstanceDeleteBecauseWrongRepetition - } - case addrs.IntKey: - if cfg.Count == nil { - // This resource isn't using "count" at all, then - return plans.ResourceInstanceDeleteBecauseWrongRepetition - } - - expander := ctx.InstanceExpander() - if expander == nil { - break // only for tests that produce an incomplete MockEvalContext - } - insts := expander.ExpandResource(n.Addr.ContainingResource()) - - declared := false - for _, inst := range insts { - if n.Addr.Equal(inst) { - declared = true - } - } - if !declared { - // This instance key is outside of the configured range - return plans.ResourceInstanceDeleteBecauseCountIndex - } - case addrs.StringKey: - if cfg.ForEach == nil { - // This resource isn't using "for_each" at all, then - return plans.ResourceInstanceDeleteBecauseWrongRepetition - } - - expander := ctx.InstanceExpander() - if expander == nil { - break // only for tests that produce an incomplete MockEvalContext - } - insts := expander.ExpandResource(n.Addr.ContainingResource()) - - declared := false - for _, inst := range insts { - if n.Addr.Equal(inst) { - declared = true - } - } - if !declared { - // This instance key is outside of the configured range - return plans.ResourceInstanceDeleteBecauseEachKey - } - } - - // If we get here then the instance key type matches the configured - // repetition mode, and so we need to consider whether the key itself - // is within the range of the repetition construct. - if expander := ctx.InstanceExpander(); expander != nil { // (sometimes nil in MockEvalContext in tests) - // First we'll check whether our containing module instance still - // exists, so we can talk about that differently in the reason. - declared := false - for _, inst := range expander.ExpandModule(n.Addr.Module.Module()) { - if n.Addr.Module.Equal(inst) { - declared = true - break - } - } - if !declared { - return plans.ResourceInstanceDeleteBecauseNoModule - } - - // Now we've proven that we're in a still-existing module instance, - // we'll see if our instance key matches something actually declared. - declared = false - for _, inst := range expander.ExpandResource(n.Addr.ContainingResource()) { - if n.Addr.Equal(inst) { - declared = true - break - } - } - if !declared { - // Because we already checked that the key _type_ was correct - // above, we can assume that any mismatch here is a range error, - // and thus we just need to decide which of the two range - // errors we're going to return. - switch n.Addr.Resource.Key.(type) { - case addrs.IntKey: - return plans.ResourceInstanceDeleteBecauseCountIndex - case addrs.StringKey: - return plans.ResourceInstanceDeleteBecauseEachKey - } - } - } - - // If we didn't find any specific reason to report, we'll report "no reason" - // as a fallback, which means the UI should just state it'll be deleted - // without any explicit reasoning. - return plans.ResourceInstanceChangeNoReason -} diff --git a/internal/terraform/node_resource_plan_orphan_test.go b/internal/terraform/node_resource_plan_orphan_test.go deleted file mode 100644 index 0a29decc5639..000000000000 --- a/internal/terraform/node_resource_plan_orphan_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestNodeResourcePlanOrphanExecute(t *testing.T) { - state := states.NewState() - state.Module(addrs.RootModuleInstance).SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "test_string": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - p := simpleMockProvider() - p.ConfigureProvider(providers.ConfigureProviderRequest{}) - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - RefreshStateState: state.DeepCopy().SyncWrapper(), - PrevRunStateState: state.DeepCopy().SyncWrapper(), - InstanceExpanderExpander: instances.NewExpander(), - ProviderProvider: p, - ProviderSchemaSchema: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_object": simpleTestSchema(), - }, - }, - ChangesChanges: plans.NewChanges().SyncWrapper(), - } - - node := NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - Addr: mustResourceInstanceAddr("test_object.foo"), - }, - } - diags := node.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - if !state.Empty() { - t.Fatalf("expected empty state, got %s", state.String()) - } -} - -func TestNodeResourcePlanOrphanExecute_alreadyDeleted(t *testing.T) { - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - - state := states.NewState() - state.Module(addrs.RootModuleInstance).SetResourceInstanceCurrent( - addr.Resource, - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "test_string": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - refreshState := state.DeepCopy() - prevRunState := state.DeepCopy() - changes := plans.NewChanges() - - p := simpleMockProvider() - p.ConfigureProvider(providers.ConfigureProviderRequest{}) - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["test_string"].Block.ImpliedType()), - } - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - RefreshStateState: refreshState.SyncWrapper(), - PrevRunStateState: prevRunState.SyncWrapper(), - InstanceExpanderExpander: instances.NewExpander(), - ProviderProvider: p, - ProviderSchemaSchema: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_object": simpleTestSchema(), - }, - }, - ChangesChanges: changes.SyncWrapper(), - } - - node := NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - Addr: mustResourceInstanceAddr("test_object.foo"), - }, - } - diags := node.Execute(ctx, walkPlan) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - if !state.Empty() { - t.Fatalf("expected empty state, got %s", state.String()) - } - - if got := prevRunState.ResourceInstance(addr); got == nil { - t.Errorf("no entry for %s in the prev run state; should still be present", addr) - } - if got := refreshState.ResourceInstance(addr); got != nil { - t.Errorf("refresh state has entry for %s; should've been removed", addr) - } - if got := changes.ResourceInstance(addr); got != nil { - t.Errorf("there should be no change for the %s instance, got %s", addr, got.Action) - } -} - -// This test describes a situation which should not be possible, as this node -// should never work on deposed instances. However, a bug elsewhere resulted in -// this code path being exercised and triggered a panic. As a result, the -// assertions at the end of the test are minimal, as the behaviour (aside from -// not panicking) is unspecified. -func TestNodeResourcePlanOrphanExecute_deposed(t *testing.T) { - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - - state := states.NewState() - state.Module(addrs.RootModuleInstance).SetResourceInstanceDeposed( - addr.Resource, - states.NewDeposedKey(), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "test_string": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - refreshState := state.DeepCopy() - prevRunState := state.DeepCopy() - changes := plans.NewChanges() - - p := simpleMockProvider() - p.ConfigureProvider(providers.ConfigureProviderRequest{}) - p.ReadResourceResponse = &providers.ReadResourceResponse{ - NewState: cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["test_string"].Block.ImpliedType()), - } - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - RefreshStateState: refreshState.SyncWrapper(), - PrevRunStateState: prevRunState.SyncWrapper(), - InstanceExpanderExpander: instances.NewExpander(), - ProviderProvider: p, - ProviderSchemaSchema: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_object": simpleTestSchema(), - }, - }, - ChangesChanges: changes.SyncWrapper(), - } - - node := NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ - NodeAbstractResource: NodeAbstractResource{ - ResolvedProvider: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - }, - Addr: mustResourceInstanceAddr("test_object.foo"), - }, - } - diags := node.Execute(ctx, walkPlan) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } -} diff --git a/internal/terraform/node_resource_validate.go b/internal/terraform/node_resource_validate.go deleted file mode 100644 index a70bcdc4bc85..000000000000 --- a/internal/terraform/node_resource_validate.go +++ /dev/null @@ -1,592 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/didyoumean" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// NodeValidatableResource represents a resource that is used for validation -// only. -type NodeValidatableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeModuleInstance = (*NodeValidatableResource)(nil) - _ GraphNodeExecutable = (*NodeValidatableResource)(nil) - _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) - _ GraphNodeReferencer = (*NodeValidatableResource)(nil) - _ GraphNodeConfigResource = (*NodeValidatableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) - _ GraphNodeAttachProviderMetaConfigs = (*NodeValidatableResource)(nil) -) - -func (n *NodeValidatableResource) Path() addrs.ModuleInstance { - // There is no expansion during validation, so we evaluate everything as - // single module instances. - return n.Addr.Module.UnkeyedInstanceShim() -} - -// GraphNodeEvalable -func (n *NodeValidatableResource) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - diags = diags.Append(n.validateResource(ctx)) - - diags = diags.Append(n.validateCheckRules(ctx, n.Config)) - - if managed := n.Config.Managed; managed != nil { - // Validate all the provisioners - for _, p := range managed.Provisioners { - if p.Connection == nil { - p.Connection = n.Config.Managed.Connection - } else if n.Config.Managed.Connection != nil { - p.Connection.Config = configs.MergeBodies(n.Config.Managed.Connection.Config, p.Connection.Config) - } - - // Validate Provisioner Config - diags = diags.Append(n.validateProvisioner(ctx, p)) - if diags.HasErrors() { - return diags - } - } - } - return diags -} - -// validateProvisioner validates the configuration of a provisioner belonging to -// a resource. The provisioner config is expected to contain the merged -// connection configurations. -func (n *NodeValidatableResource) validateProvisioner(ctx EvalContext, p *configs.Provisioner) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - provisioner, err := ctx.Provisioner(p.Type) - if err != nil { - diags = diags.Append(err) - return diags - } - - if provisioner == nil { - return diags.Append(fmt.Errorf("provisioner %s not initialized", p.Type)) - } - provisionerSchema, err := ctx.ProvisionerSchema(p.Type) - if err != nil { - return diags.Append(fmt.Errorf("failed to read schema for provisioner %s: %s", p.Type, err)) - } - if provisionerSchema == nil { - return diags.Append(fmt.Errorf("provisioner %s has no schema", p.Type)) - } - - // Validate the provisioner's own config first - configVal, _, configDiags := n.evaluateBlock(ctx, p.Config, provisionerSchema) - diags = diags.Append(configDiags) - - if configVal == cty.NilVal { - // Should never happen for a well-behaved EvaluateBlock implementation - return diags.Append(fmt.Errorf("EvaluateBlock returned nil value")) - } - - // Use unmarked value for validate request - unmarkedConfigVal, _ := configVal.UnmarkDeep() - req := provisioners.ValidateProvisionerConfigRequest{ - Config: unmarkedConfigVal, - } - - resp := provisioner.ValidateProvisionerConfig(req) - diags = diags.Append(resp.Diagnostics) - - if p.Connection != nil { - // We can't comprehensively validate the connection config since its - // final structure is decided by the communicator and we can't instantiate - // that until we have a complete instance state. However, we *can* catch - // configuration keys that are not valid for *any* communicator, catching - // typos early rather than waiting until we actually try to run one of - // the resource's provisioners. - _, _, connDiags := n.evaluateBlock(ctx, p.Connection.Config, connectionBlockSupersetSchema) - diags = diags.Append(connDiags) - } - return diags -} - -func (n *NodeValidatableResource) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - keyData, selfAddr := n.stubRepetitionData(n.Config.Count != nil, n.Config.ForEach != nil) - - return ctx.EvaluateBlock(body, schema, selfAddr, keyData) -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. Once that is done, we can remove -// this and use a type-specific schema from the communicator to validate -// exactly what is expected for a given connection type. -var connectionBlockSupersetSchema = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - // NOTE: "type" is not included here because it's treated special - // by the config loader and stored away in a separate field. - - // Common attributes for both connection types - "host": { - Type: cty.String, - Required: true, - }, - "type": { - Type: cty.String, - Optional: true, - }, - "user": { - Type: cty.String, - Optional: true, - }, - "password": { - Type: cty.String, - Optional: true, - }, - "port": { - Type: cty.Number, - Optional: true, - }, - "timeout": { - Type: cty.String, - Optional: true, - }, - "script_path": { - Type: cty.String, - Optional: true, - }, - // For type=ssh only (enforced in ssh communicator) - "target_platform": { - Type: cty.String, - Optional: true, - }, - "private_key": { - Type: cty.String, - Optional: true, - }, - "certificate": { - Type: cty.String, - Optional: true, - }, - "host_key": { - Type: cty.String, - Optional: true, - }, - "agent": { - Type: cty.Bool, - Optional: true, - }, - "agent_identity": { - Type: cty.String, - Optional: true, - }, - "proxy_scheme": { - Type: cty.String, - Optional: true, - }, - "proxy_host": { - Type: cty.String, - Optional: true, - }, - "proxy_port": { - Type: cty.Number, - Optional: true, - }, - "proxy_user_name": { - Type: cty.String, - Optional: true, - }, - "proxy_user_password": { - Type: cty.String, - Optional: true, - }, - "bastion_host": { - Type: cty.String, - Optional: true, - }, - "bastion_host_key": { - Type: cty.String, - Optional: true, - }, - "bastion_port": { - Type: cty.Number, - Optional: true, - }, - "bastion_user": { - Type: cty.String, - Optional: true, - }, - "bastion_password": { - Type: cty.String, - Optional: true, - }, - "bastion_private_key": { - Type: cty.String, - Optional: true, - }, - "bastion_certificate": { - Type: cty.String, - Optional: true, - }, - - // For type=winrm only (enforced in winrm communicator) - "https": { - Type: cty.Bool, - Optional: true, - }, - "insecure": { - Type: cty.Bool, - Optional: true, - }, - "cacert": { - Type: cty.String, - Optional: true, - }, - "use_ntlm": { - Type: cty.Bool, - Optional: true, - }, - }, -} - -func (n *NodeValidatableResource) validateResource(ctx EvalContext) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) - diags = diags.Append(err) - if diags.HasErrors() { - return diags - } - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("validateResource has nil schema for %s", n.Addr)) - return diags - } - - keyData := EvalDataForNoInstanceKey - - switch { - case n.Config.Count != nil: - // If the config block has count, we'll evaluate with an unknown - // number as count.index so we can still type check even though - // we won't expand count until the plan phase. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // Basic type-checking of the count argument. More complete validation - // of this will happen when we DynamicExpand during the plan walk. - countDiags := validateCount(ctx, n.Config.Count) - diags = diags.Append(countDiags) - - case n.Config.ForEach != nil: - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.UnknownVal(cty.DynamicPseudoType), - } - - // Evaluate the for_each expression here so we can expose the diagnostics - forEachDiags := validateForEach(ctx, n.Config.ForEach) - diags = diags.Append(forEachDiags) - } - - diags = diags.Append(validateDependsOn(ctx, n.Config.DependsOn)) - - // Validate the provider_meta block for the provider this resource - // belongs to, if there is one. - // - // Note: this will return an error for every resource a provider - // uses in a module, if the provider_meta for that module is - // incorrect. The only way to solve this that we've found is to - // insert a new ProviderMeta graph node in the graph, and make all - // that provider's resources in the module depend on the node. That's - // an awful heavy hammer to swing for this feature, which should be - // used only in limited cases with heavy coordination with the - // Terraform team, so we're going to defer that solution for a future - // enhancement to this functionality. - /* - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.ProviderConfig.Type]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", cfg.ProviderConfigAddr()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - _, _, metaDiags := ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(metaDiags) - } - } - } - */ - // BUG(paddy): we're not validating provider_meta blocks on EvalValidate right now - // because the ProviderAddr for the resource isn't available on the EvalValidate - // struct. - - // Provider entry point varies depending on resource mode, because - // managed resources and data resources are two distinct concepts - // in the provider abstraction. - switch n.Config.Mode { - case addrs.ManagedResourceMode: - schema, _ := providerSchema.SchemaForResourceType(n.Config.Mode, n.Config.Type) - if schema == nil { - var suggestion string - if dSchema, _ := providerSchema.SchemaForResourceType(addrs.DataResourceMode, n.Config.Type); dSchema != nil { - suggestion = fmt.Sprintf("\n\nDid you intend to use the data source %q? If so, declare this using a \"data\" block instead of a \"resource\" block.", n.Config.Type) - } else if len(providerSchema.ResourceTypes) > 0 { - suggestions := make([]string, 0, len(providerSchema.ResourceTypes)) - for name := range providerSchema.ResourceTypes { - suggestions = append(suggestions, name) - } - if suggestion = didyoumean.NameSuggestion(n.Config.Type, suggestions); suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type", - Detail: fmt.Sprintf("The provider %s does not support resource type %q.%s", n.Provider().ForDisplay(), n.Config.Type, suggestion), - Subject: &n.Config.TypeRange, - }) - return diags - } - - configVal, _, valDiags := ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return diags - } - - if n.Config.Managed != nil { // can be nil only in tests with poorly-configured mocks - for _, traversal := range n.Config.Managed.IgnoreChanges { - // validate the ignore_changes traversals apply. - moreDiags := schema.StaticValidateTraversal(traversal) - diags = diags.Append(moreDiags) - - // ignore_changes cannot be used for Computed attributes, - // unless they are also Optional. - // If the traversal was valid, convert it to a cty.Path and - // use that to check whether the Attribute is Computed and - // non-Optional. - if !diags.HasErrors() { - path := traversalToPath(traversal) - - attrSchema := schema.AttributeByPath(path) - - if attrSchema != nil && !attrSchema.Optional && attrSchema.Computed { - // ignore_changes uses absolute traversal syntax in config despite - // using relative traversals, so we strip the leading "." added by - // FormatCtyPath for a better error message. - attrDisplayPath := strings.TrimPrefix(tfdiags.FormatCtyPath(path), ".") - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Redundant ignore_changes element", - Detail: fmt.Sprintf("Adding an attribute name to ignore_changes tells Terraform to ignore future changes to the argument in configuration after the object has been created, retaining the value originally configured.\n\nThe attribute %s is decided by the provider alone and therefore there can be no configured value to compare with. Including this attribute in ignore_changes has no effect. Remove the attribute from ignore_changes to quiet this warning.", attrDisplayPath), - Subject: &n.Config.TypeRange, - }) - } - } - } - } - - // Use unmarked value for validate request - unmarkedConfigVal, _ := configVal.UnmarkDeep() - req := providers.ValidateResourceConfigRequest{ - TypeName: n.Config.Type, - Config: unmarkedConfigVal, - } - - resp := provider.ValidateResourceConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config, n.Addr.String())) - - case addrs.DataResourceMode: - schema, _ := providerSchema.SchemaForResourceType(n.Config.Mode, n.Config.Type) - if schema == nil { - var suggestion string - if dSchema, _ := providerSchema.SchemaForResourceType(addrs.ManagedResourceMode, n.Config.Type); dSchema != nil { - suggestion = fmt.Sprintf("\n\nDid you intend to use the managed resource type %q? If so, declare this using a \"resource\" block instead of a \"data\" block.", n.Config.Type) - } else if len(providerSchema.DataSources) > 0 { - suggestions := make([]string, 0, len(providerSchema.DataSources)) - for name := range providerSchema.DataSources { - suggestions = append(suggestions, name) - } - if suggestion = didyoumean.NameSuggestion(n.Config.Type, suggestions); suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source", - Detail: fmt.Sprintf("The provider %s does not support data source %q.%s", n.Provider().ForDisplay(), n.Config.Type, suggestion), - Subject: &n.Config.TypeRange, - }) - return diags - } - - configVal, _, valDiags := ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return diags - } - - // Use unmarked value for validate request - unmarkedConfigVal, _ := configVal.UnmarkDeep() - req := providers.ValidateDataResourceConfigRequest{ - TypeName: n.Config.Type, - Config: unmarkedConfigVal, - } - - resp := provider.ValidateDataResourceConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config, n.Addr.String())) - } - - return diags -} - -func (n *NodeValidatableResource) evaluateExpr(ctx EvalContext, expr hcl.Expression, wantTy cty.Type, self addrs.Referenceable, keyData instances.RepetitionData) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - refs, refDiags := lang.ReferencesInExpr(expr) - diags = diags.Append(refDiags) - - scope := ctx.EvaluationScope(self, keyData) - - hclCtx, moreDiags := scope.EvalContext(refs) - diags = diags.Append(moreDiags) - - result, hclDiags := expr.Value(hclCtx) - diags = diags.Append(hclDiags) - - return result, diags -} - -func (n *NodeValidatableResource) stubRepetitionData(hasCount, hasForEach bool) (instances.RepetitionData, addrs.Referenceable) { - keyData := EvalDataForNoInstanceKey - selfAddr := n.ResourceAddr().Resource.Instance(addrs.NoKey) - - if n.Config.Count != nil { - // For a resource that has count, we allow count.index but don't - // know at this stage what it will return. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // "self" can't point to an unknown key, but we'll force it to be - // key 0 here, which should return an unknown value of the - // expected type since none of these elements are known at this - // point anyway. - selfAddr = n.ResourceAddr().Resource.Instance(addrs.IntKey(0)) - } else if n.Config.ForEach != nil { - // For a resource that has for_each, we allow each.value and each.key - // but don't know at this stage what it will return. - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.DynamicVal, - } - - // "self" can't point to an unknown key, but we'll force it to be - // key "" here, which should return an unknown value of the - // expected type since none of these elements are known at - // this point anyway. - selfAddr = n.ResourceAddr().Resource.Instance(addrs.StringKey("")) - } - - return keyData, selfAddr -} - -func (n *NodeValidatableResource) validateCheckRules(ctx EvalContext, config *configs.Resource) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - keyData, selfAddr := n.stubRepetitionData(n.Config.Count != nil, n.Config.ForEach != nil) - - for _, cr := range config.Preconditions { - _, conditionDiags := n.evaluateExpr(ctx, cr.Condition, cty.Bool, nil, keyData) - diags = diags.Append(conditionDiags) - - _, errorMessageDiags := n.evaluateExpr(ctx, cr.ErrorMessage, cty.Bool, nil, keyData) - diags = diags.Append(errorMessageDiags) - } - - for _, cr := range config.Postconditions { - _, conditionDiags := n.evaluateExpr(ctx, cr.Condition, cty.Bool, selfAddr, keyData) - diags = diags.Append(conditionDiags) - - _, errorMessageDiags := n.evaluateExpr(ctx, cr.ErrorMessage, cty.Bool, selfAddr, keyData) - diags = diags.Append(errorMessageDiags) - } - - return diags -} - -func validateCount(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { - val, countDiags := evaluateCountExpressionValue(expr, ctx) - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk - if !val.IsKnown() { - return diags - } - - if countDiags.HasErrors() { - diags = diags.Append(countDiags) - } - - return diags -} - -func validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { - val, forEachDiags := evaluateForEachExpressionValue(expr, ctx, true) - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk - if !val.IsKnown() { - return diags - } - - if forEachDiags.HasErrors() { - diags = diags.Append(forEachDiags) - } - - return diags -} - -func validateDependsOn(ctx EvalContext, dependsOn []hcl.Traversal) (diags tfdiags.Diagnostics) { - for _, traversal := range dependsOn { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if !refDiags.HasErrors() && len(ref.Remaining) != 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid depends_on reference", - Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", - Subject: ref.Remaining.SourceRange().Ptr(), - }) - } - - // The ref must also refer to something that exists. To test that, - // we'll just eval it and count on the fact that our evaluator will - // detect references to non-existent objects. - if !diags.HasErrors() { - scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) - if scope != nil { // sometimes nil in tests, due to incomplete mocks - _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) - diags = diags.Append(refDiags) - } - } - } - return diags -} diff --git a/internal/terraform/node_resource_validate_test.go b/internal/terraform/node_resource_validate_test.go deleted file mode 100644 index b5b2af74cd29..000000000000 --- a/internal/terraform/node_resource_validate_test.go +++ /dev/null @@ -1,635 +0,0 @@ -package terraform - -import ( - "errors" - "strings" - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -func TestNodeValidatableResource_ValidateProvisioner_valid(t *testing.T) { - ctx := &MockEvalContext{} - ctx.installSimpleEval() - mp := &MockProvisioner{} - ps := &configschema.Block{} - ctx.ProvisionerSchemaSchema = ps - ctx.ProvisionerProvisioner = mp - - pc := &configs.Provisioner{ - Type: "baz", - Config: hcl.EmptyBody(), - Connection: &configs.Connection{ - Config: configs.SynthBody("", map[string]cty.Value{ - "host": cty.StringVal("localhost"), - "type": cty.StringVal("ssh"), - "port": cty.NumberIntVal(10022), - }), - }, - } - - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_foo", - Name: "bar", - Config: configs.SynthBody("", map[string]cty.Value{}), - } - - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - }, - } - - diags := node.validateProvisioner(ctx, pc) - if diags.HasErrors() { - t.Fatalf("node.Eval failed: %s", diags.Err()) - } - if !mp.ValidateProvisionerConfigCalled { - t.Fatalf("p.ValidateProvisionerConfig not called") - } -} - -func TestNodeValidatableResource_ValidateProvisioner__warning(t *testing.T) { - ctx := &MockEvalContext{} - ctx.installSimpleEval() - mp := &MockProvisioner{} - ps := &configschema.Block{} - ctx.ProvisionerSchemaSchema = ps - ctx.ProvisionerProvisioner = mp - - pc := &configs.Provisioner{ - Type: "baz", - Config: hcl.EmptyBody(), - } - - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_foo", - Name: "bar", - Config: configs.SynthBody("", map[string]cty.Value{}), - Managed: &configs.ManagedResource{}, - } - - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - }, - } - - { - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.SimpleWarning("foo is deprecated")) - mp.ValidateProvisionerConfigResponse = provisioners.ValidateProvisionerConfigResponse{ - Diagnostics: diags, - } - } - - diags := node.validateProvisioner(ctx, pc) - if len(diags) != 1 { - t.Fatalf("wrong number of diagnostics in %s; want one warning", diags.ErrWithWarnings()) - } - - if got, want := diags[0].Description().Summary, mp.ValidateProvisionerConfigResponse.Diagnostics[0].Description().Summary; got != want { - t.Fatalf("wrong warning %q; want %q", got, want) - } -} - -func TestNodeValidatableResource_ValidateProvisioner__connectionInvalid(t *testing.T) { - ctx := &MockEvalContext{} - ctx.installSimpleEval() - mp := &MockProvisioner{} - ps := &configschema.Block{} - ctx.ProvisionerSchemaSchema = ps - ctx.ProvisionerProvisioner = mp - - pc := &configs.Provisioner{ - Type: "baz", - Config: hcl.EmptyBody(), - Connection: &configs.Connection{ - Config: configs.SynthBody("", map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "bananananananana": cty.StringVal("foo"), - "bazaz": cty.StringVal("bar"), - }), - }, - } - - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_foo", - Name: "bar", - Config: configs.SynthBody("", map[string]cty.Value{}), - Managed: &configs.ManagedResource{}, - } - - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - }, - } - - diags := node.validateProvisioner(ctx, pc) - if !diags.HasErrors() { - t.Fatalf("node.Eval succeeded; want error") - } - if len(diags) != 3 { - t.Fatalf("wrong number of diagnostics; want two errors\n\n%s", diags.Err()) - } - - errStr := diags.Err().Error() - if !(strings.Contains(errStr, "bananananananana") && strings.Contains(errStr, "bazaz")) { - t.Fatalf("wrong errors %q; want something about each of our invalid connInfo keys", errStr) - } -} - -func TestNodeValidatableResource_ValidateResource_managedResource(t *testing.T) { - mp := simpleMockProvider() - mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - if got, want := req.TypeName, "test_object"; got != want { - t.Fatalf("wrong resource type\ngot: %#v\nwant: %#v", got, want) - } - if got, want := req.Config.GetAttr("test_string"), cty.StringVal("bar"); !got.RawEquals(want) { - t.Fatalf("wrong value for test_string\ngot: %#v\nwant: %#v", got, want) - } - if got, want := req.Config.GetAttr("test_number"), cty.NumberIntVal(2); !got.RawEquals(want) { - t.Fatalf("wrong value for test_number\ngot: %#v\nwant: %#v", got, want) - } - return providers.ValidateResourceConfigResponse{} - } - - p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{ - "test_string": cty.StringVal("bar"), - "test_number": cty.NumberIntVal(2).Mark(marks.Sensitive), - }), - } - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - ctx.ProviderSchemaSchema = mp.ProviderSchema() - ctx.ProviderProvider = p - - err := node.validateResource(ctx) - if err != nil { - t.Fatalf("err: %s", err) - } - - if !mp.ValidateResourceConfigCalled { - t.Fatal("Expected ValidateResourceConfig to be called, but it was not!") - } -} - -func TestNodeValidatableResource_ValidateResource_managedResourceCount(t *testing.T) { - // Setup - mp := simpleMockProvider() - mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - if got, want := req.TypeName, "test_object"; got != want { - t.Fatalf("wrong resource type\ngot: %#v\nwant: %#v", got, want) - } - if got, want := req.Config.GetAttr("test_string"), cty.StringVal("bar"); !got.RawEquals(want) { - t.Fatalf("wrong value for test_string\ngot: %#v\nwant: %#v", got, want) - } - return providers.ValidateResourceConfigResponse{} - } - - p := providers.Interface(mp) - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - ctx.ProviderSchemaSchema = mp.ProviderSchema() - ctx.ProviderProvider = p - - tests := []struct { - name string - count hcl.Expression - }{ - { - "simple count", - hcltest.MockExprLiteral(cty.NumberIntVal(2)), - }, - { - "marked count value", - hcltest.MockExprLiteral(cty.NumberIntVal(3).Mark("marked")), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Count: test.count, - Config: configs.SynthBody("", map[string]cty.Value{ - "test_string": cty.StringVal("bar"), - }), - } - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - diags := node.validateResource(ctx) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - if !mp.ValidateResourceConfigCalled { - t.Fatal("Expected ValidateResourceConfig to be called, but it was not!") - } - }) - } -} - -func TestNodeValidatableResource_ValidateResource_dataSource(t *testing.T) { - mp := simpleMockProvider() - mp.ValidateDataResourceConfigFn = func(req providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { - if got, want := req.TypeName, "test_object"; got != want { - t.Fatalf("wrong resource type\ngot: %#v\nwant: %#v", got, want) - } - if got, want := req.Config.GetAttr("test_string"), cty.StringVal("bar"); !got.RawEquals(want) { - t.Fatalf("wrong value for test_string\ngot: %#v\nwant: %#v", got, want) - } - if got, want := req.Config.GetAttr("test_number"), cty.NumberIntVal(2); !got.RawEquals(want) { - t.Fatalf("wrong value for test_number\ngot: %#v\nwant: %#v", got, want) - } - return providers.ValidateDataResourceConfigResponse{} - } - - p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_object", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{ - "test_string": cty.StringVal("bar"), - "test_number": cty.NumberIntVal(2).Mark(marks.Sensitive), - }), - } - - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - ctx.ProviderSchemaSchema = mp.ProviderSchema() - ctx.ProviderProvider = p - - diags := node.validateResource(ctx) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } - - if !mp.ValidateDataResourceConfigCalled { - t.Fatal("Expected ValidateDataSourceConfig to be called, but it was not!") - } -} - -func TestNodeValidatableResource_ValidateResource_valid(t *testing.T) { - mp := simpleMockProvider() - mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - return providers.ValidateResourceConfigResponse{} - } - - p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{}), - } - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_object.foo"), - Config: rc, - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - ctx.ProviderSchemaSchema = mp.ProviderSchema() - ctx.ProviderProvider = p - - diags := node.validateResource(ctx) - if diags.HasErrors() { - t.Fatalf("err: %s", diags.Err()) - } -} - -func TestNodeValidatableResource_ValidateResource_warningsAndErrorsPassedThrough(t *testing.T) { - mp := simpleMockProvider() - mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.SimpleWarning("warn")) - diags = diags.Append(errors.New("err")) - return providers.ValidateResourceConfigResponse{ - Diagnostics: diags, - } - } - - p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{}), - } - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - ctx.ProviderSchemaSchema = mp.ProviderSchema() - ctx.ProviderProvider = p - - diags := node.validateResource(ctx) - if !diags.HasErrors() { - t.Fatal("unexpected success; want error") - } - - bySeverity := map[tfdiags.Severity]tfdiags.Diagnostics{} - for _, diag := range diags { - bySeverity[diag.Severity()] = append(bySeverity[diag.Severity()], diag) - } - if len(bySeverity[tfdiags.Warning]) != 1 || bySeverity[tfdiags.Warning][0].Description().Summary != "warn" { - t.Errorf("Expected 1 warning 'warn', got: %s", bySeverity[tfdiags.Warning].ErrWithWarnings()) - } - if len(bySeverity[tfdiags.Error]) != 1 || bySeverity[tfdiags.Error][0].Description().Summary != "err" { - t.Errorf("Expected 1 error 'err', got: %s", bySeverity[tfdiags.Error].Err()) - } -} - -func TestNodeValidatableResource_ValidateResource_invalidDependsOn(t *testing.T) { - mp := simpleMockProvider() - mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - return providers.ValidateResourceConfigResponse{} - } - - // We'll check a _valid_ config first, to make sure we're not failing - // for some other reason, and then make it invalid. - p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{}), - DependsOn: []hcl.Traversal{ - // Depending on path.module is pointless, since it is immediately - // available, but we allow all of the referencable addrs here - // for consistency: referencing them is harmless, and avoids the - // need for us to document a different subset of addresses that - // are valid in depends_on. - // For the sake of this test, it's a valid address we can use that - // doesn't require something else to exist in the configuration. - { - hcl.TraverseRoot{ - Name: "path", - }, - hcl.TraverseAttr{ - Name: "module", - }, - }, - }, - } - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - - ctx.ProviderSchemaSchema = mp.ProviderSchema() - ctx.ProviderProvider = p - - diags := node.validateResource(ctx) - if diags.HasErrors() { - t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings()) - } - - // Now we'll make it invalid by adding additional traversal steps at - // the end of what we're referencing. This is intended to catch the - // situation where the user tries to depend on e.g. a specific resource - // attribute, rather than the whole resource, like aws_instance.foo.id. - rc.DependsOn = append(rc.DependsOn, hcl.Traversal{ - hcl.TraverseRoot{ - Name: "path", - }, - hcl.TraverseAttr{ - Name: "module", - }, - hcl.TraverseAttr{ - Name: "extra", - }, - }) - - diags = node.validateResource(ctx) - if !diags.HasErrors() { - t.Fatal("no error for invalid depends_on") - } - if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { - t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) - } - - // Test for handling an unknown root without attribute, like a - // typo that omits the dot inbetween "path.module". - rc.DependsOn = append(rc.DependsOn, hcl.Traversal{ - hcl.TraverseRoot{ - Name: "pathmodule", - }, - }) - - diags = node.validateResource(ctx) - if !diags.HasErrors() { - t.Fatal("no error for invalid depends_on") - } - if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { - t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) - } -} - -func TestNodeValidatableResource_ValidateResource_invalidIgnoreChangesNonexistent(t *testing.T) { - mp := simpleMockProvider() - mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - return providers.ValidateResourceConfigResponse{} - } - - // We'll check a _valid_ config first, to make sure we're not failing - // for some other reason, and then make it invalid. - p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{}), - Managed: &configs.ManagedResource{ - IgnoreChanges: []hcl.Traversal{ - { - hcl.TraverseAttr{ - Name: "test_string", - }, - }, - }, - }, - } - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - - ctx.ProviderSchemaSchema = mp.ProviderSchema() - ctx.ProviderProvider = p - - diags := node.validateResource(ctx) - if diags.HasErrors() { - t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings()) - } - - // Now we'll make it invalid by attempting to ignore a nonexistent - // attribute. - rc.Managed.IgnoreChanges = append(rc.Managed.IgnoreChanges, hcl.Traversal{ - hcl.TraverseAttr{ - Name: "nonexistent", - }, - }) - - diags = node.validateResource(ctx) - if !diags.HasErrors() { - t.Fatal("no error for invalid ignore_changes") - } - if got, want := diags.Err().Error(), "Unsupported attribute: This object has no argument, nested block, or exported attribute named \"nonexistent\""; !strings.Contains(got, want) { - t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) - } -} - -func TestNodeValidatableResource_ValidateResource_invalidIgnoreChangesComputed(t *testing.T) { - // construct a schema with a computed attribute - ms := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "test_string": { - Type: cty.String, - Optional: true, - }, - "computed_string": { - Type: cty.String, - Computed: true, - Optional: false, - }, - }, - } - - mp := &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: ms}, - ResourceTypes: map[string]providers.Schema{ - "test_object": providers.Schema{Block: ms}, - }, - }, - } - - mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { - return providers.ValidateResourceConfigResponse{} - } - - // We'll check a _valid_ config first, to make sure we're not failing - // for some other reason, and then make it invalid. - p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{}), - Managed: &configs.ManagedResource{ - IgnoreChanges: []hcl.Traversal{ - { - hcl.TraverseAttr{ - Name: "test_string", - }, - }, - }, - }, - } - node := NodeValidatableResource{ - NodeAbstractResource: &NodeAbstractResource{ - Addr: mustConfigResourceAddr("test_foo.bar"), - Config: rc, - ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - }, - } - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - - ctx.ProviderSchemaSchema = mp.ProviderSchema() - ctx.ProviderProvider = p - - diags := node.validateResource(ctx) - if diags.HasErrors() { - t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings()) - } - - // Now we'll make it invalid by attempting to ignore a computed - // attribute. - rc.Managed.IgnoreChanges = append(rc.Managed.IgnoreChanges, hcl.Traversal{ - hcl.TraverseAttr{ - Name: "computed_string", - }, - }) - - diags = node.validateResource(ctx) - if diags.HasErrors() { - t.Fatalf("got unexpected error: %s", diags.ErrWithWarnings()) - } - if got, want := diags.ErrWithWarnings().Error(), `Redundant ignore_changes element: Adding an attribute name to ignore_changes tells Terraform to ignore future changes to the argument in configuration after the object has been created, retaining the value originally configured. - -The attribute computed_string is decided by the provider alone and therefore there can be no configured value to compare with. Including this attribute in ignore_changes has no effect. Remove the attribute from ignore_changes to quiet this warning.`; !strings.Contains(got, want) { - t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) - } -} diff --git a/internal/terraform/node_root_variable.go b/internal/terraform/node_root_variable.go deleted file mode 100644 index 33f439d7cd9a..000000000000 --- a/internal/terraform/node_root_variable.go +++ /dev/null @@ -1,115 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// NodeRootVariable represents a root variable input. -type NodeRootVariable struct { - Addr addrs.InputVariable - Config *configs.Variable - - // RawValue is the value for the variable set from outside Terraform - // Core, such as on the command line, or from an environment variable, - // or similar. This is the raw value that was provided, not yet - // converted or validated, and can be nil for a variable that isn't - // set at all. - RawValue *InputValue -} - -var ( - _ GraphNodeModuleInstance = (*NodeRootVariable)(nil) - _ GraphNodeReferenceable = (*NodeRootVariable)(nil) -) - -func (n *NodeRootVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodeRootVariable) Path() addrs.ModuleInstance { - return addrs.RootModuleInstance -} - -func (n *NodeRootVariable) ModulePath() addrs.Module { - return addrs.RootModule -} - -// GraphNodeReferenceable -func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr} -} - -// GraphNodeExecutable -func (n *NodeRootVariable) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { - // Root module variables are special in that they are provided directly - // by the caller (usually, the CLI layer) and so we don't really need to - // evaluate them in the usual sense, but we do need to process the raw - // values given by the caller to match what the module is expecting, and - // make sure the values are valid. - var diags tfdiags.Diagnostics - - addr := addrs.RootModuleInstance.InputVariable(n.Addr.Name) - log.Printf("[TRACE] NodeRootVariable: evaluating %s", addr) - - if n.Config == nil { - // Because we build NodeRootVariable from configuration in the normal - // case it's strange to get here, but we tolerate it to allow for - // tests that might not populate the inputs fully. - return nil - } - - givenVal := n.RawValue - if givenVal == nil { - // We'll use cty.NilVal to represent the variable not being set at - // all, which for historical reasons is unfortunately different than - // explicitly setting it to null in some cases. In normal code we - // should never get here because all variables should have raw - // values, but we can get here in some historical tests that call - // in directly and don't necessarily obey the rules. - givenVal = &InputValue{ - Value: cty.NilVal, - SourceType: ValueFromUnknown, - } - } - - finalVal, moreDiags := prepareFinalInputVariableValue( - addr, - givenVal, - n.Config, - ) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - // No point in proceeding to validations then, because they'll - // probably fail trying to work with a value of the wrong type. - return diags - } - - ctx.SetRootModuleArgument(addr.Variable, finalVal) - - moreDiags = evalVariableValidations( - addrs.RootModuleInstance.InputVariable(n.Addr.Name), - n.Config, - nil, // not set for root module variables - ctx, - ) - diags = diags.Append(moreDiags) - return diags -} - -// dag.GraphNodeDotter impl. -func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/internal/terraform/node_root_variable_test.go b/internal/terraform/node_root_variable_test.go deleted file mode 100644 index 537cecce9f7d..000000000000 --- a/internal/terraform/node_root_variable_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcltest" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/lang" -) - -func TestNodeRootVariableExecute(t *testing.T) { - t.Run("type conversion", func(t *testing.T) { - ctx := new(MockEvalContext) - - n := &NodeRootVariable{ - Addr: addrs.InputVariable{Name: "foo"}, - Config: &configs.Variable{ - Name: "foo", - Type: cty.String, - ConstraintType: cty.String, - }, - RawValue: &InputValue{ - Value: cty.True, - SourceType: ValueFromUnknown, - }, - } - - diags := n.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - - if !ctx.SetRootModuleArgumentCalled { - t.Fatalf("ctx.SetRootModuleArgument wasn't called") - } - if got, want := ctx.SetRootModuleArgumentAddr.String(), "var.foo"; got != want { - t.Errorf("wrong address for ctx.SetRootModuleArgument\ngot: %s\nwant: %s", got, want) - } - if got, want := ctx.SetRootModuleArgumentValue, cty.StringVal("true"); !want.RawEquals(got) { - // NOTE: The given value was cty.Bool but the type constraint was - // cty.String, so it was NodeRootVariable's responsibility to convert - // as part of preparing the "final value". - t.Errorf("wrong value for ctx.SetRootModuleArgument\ngot: %#v\nwant: %#v", got, want) - } - }) - t.Run("validation", func(t *testing.T) { - ctx := new(MockEvalContext) - - // The variable validation function gets called with Terraform's - // built-in functions available, so we need a minimal scope just for - // it to get the functions from. - ctx.EvaluationScopeScope = &lang.Scope{} - - // We need to reimplement a _little_ bit of EvalContextBuiltin logic - // here to get a similar effect with EvalContextMock just to get the - // value to flow through here in a realistic way that'll make this test - // useful. - var finalVal cty.Value - ctx.SetRootModuleArgumentFunc = func(addr addrs.InputVariable, v cty.Value) { - if addr.Name == "foo" { - t.Logf("set %s to %#v", addr.String(), v) - finalVal = v - } - } - ctx.GetVariableValueFunc = func(addr addrs.AbsInputVariableInstance) cty.Value { - if addr.String() != "var.foo" { - return cty.NilVal - } - t.Logf("reading final val for %s (%#v)", addr.String(), finalVal) - return finalVal - } - - n := &NodeRootVariable{ - Addr: addrs.InputVariable{Name: "foo"}, - Config: &configs.Variable{ - Name: "foo", - Type: cty.Number, - ConstraintType: cty.Number, - Validations: []*configs.CheckRule{ - { - Condition: fakeHCLExpressionFunc(func(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - // This returns true only if the given variable value - // is exactly cty.Number, which allows us to verify - // that we were given the value _after_ type - // conversion. - // This had previously not been handled correctly, - // as reported in: - // https://github.com/hashicorp/terraform/issues/29899 - vars := ctx.Variables["var"] - if vars == cty.NilVal || !vars.Type().IsObjectType() || !vars.Type().HasAttribute("foo") { - t.Logf("var.foo isn't available") - return cty.False, nil - } - val := vars.GetAttr("foo") - if val == cty.NilVal || val.Type() != cty.Number { - t.Logf("var.foo is %#v; want a number", val) - return cty.False, nil - } - return cty.True, nil - }), - ErrorMessage: hcltest.MockExprLiteral(cty.StringVal("Must be a number.")), - }, - }, - }, - RawValue: &InputValue{ - // Note: This is a string, but the variable's type constraint - // is number so it should be converted before use. - Value: cty.StringVal("5"), - SourceType: ValueFromUnknown, - }, - } - - diags := n.Execute(ctx, walkApply) - if diags.HasErrors() { - t.Fatalf("unexpected error: %s", diags.Err()) - } - - if !ctx.SetRootModuleArgumentCalled { - t.Fatalf("ctx.SetRootModuleArgument wasn't called") - } - if got, want := ctx.SetRootModuleArgumentAddr.String(), "var.foo"; got != want { - t.Errorf("wrong address for ctx.SetRootModuleArgument\ngot: %s\nwant: %s", got, want) - } - if got, want := ctx.SetRootModuleArgumentValue, cty.NumberIntVal(5); !want.RawEquals(got) { - // NOTE: The given value was cty.Bool but the type constraint was - // cty.String, so it was NodeRootVariable's responsibility to convert - // as part of preparing the "final value". - t.Errorf("wrong value for ctx.SetRootModuleArgument\ngot: %#v\nwant: %#v", got, want) - } - }) -} - -// fakeHCLExpressionFunc is a fake implementation of hcl.Expression that just -// directly produces a value with direct Go code. -// -// An expression of this type has no references and so it cannot access any -// variables from the EvalContext unless something else arranges for them -// to be guaranteed available. For example, custom variable validations just -// unconditionally have access to the variable they are validating regardless -// of references. -type fakeHCLExpressionFunc func(*hcl.EvalContext) (cty.Value, hcl.Diagnostics) - -var _ hcl.Expression = fakeHCLExpressionFunc(nil) - -func (f fakeHCLExpressionFunc) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return f(ctx) -} - -func (f fakeHCLExpressionFunc) Variables() []hcl.Traversal { - return nil -} - -func (f fakeHCLExpressionFunc) Range() hcl.Range { - return hcl.Range{ - Filename: "fake", - Start: hcl.InitialPos, - End: hcl.InitialPos, - } -} - -func (f fakeHCLExpressionFunc) StartRange() hcl.Range { - return f.Range() -} diff --git a/internal/terraform/node_value.go b/internal/terraform/node_value.go deleted file mode 100644 index 62a6e6ae8374..000000000000 --- a/internal/terraform/node_value.go +++ /dev/null @@ -1,10 +0,0 @@ -package terraform - -// graphNodeTemporaryValue is implemented by nodes that may represent temporary -// values, which are those not saved to the state file. This includes locals, -// variables, and non-root outputs. -// A boolean return value allows a node which may need to be saved to -// conditionally do so. -type graphNodeTemporaryValue interface { - temporaryValue() bool -} diff --git a/internal/terraform/phasestate_string.go b/internal/terraform/phasestate_string.go deleted file mode 100644 index 3c3b4f713af5..000000000000 --- a/internal/terraform/phasestate_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type phaseState"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[workingState-0] - _ = x[refreshState-1] - _ = x[prevRunState-2] -} - -const _phaseState_name = "workingStaterefreshStateprevRunState" - -var _phaseState_index = [...]uint8{0, 12, 24, 36} - -func (i phaseState) String() string { - if i < 0 || i >= phaseState(len(_phaseState_index)-1) { - return "phaseState(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _phaseState_name[_phaseState_index[i]:_phaseState_index[i+1]] -} diff --git a/internal/terraform/provider_mock.go b/internal/terraform/provider_mock.go deleted file mode 100644 index e10f8b98b75b..000000000000 --- a/internal/terraform/provider_mock.go +++ /dev/null @@ -1,539 +0,0 @@ -package terraform - -import ( - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/zclconf/go-cty/cty/msgpack" - - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/providers" -) - -var _ providers.Interface = (*MockProvider)(nil) - -// MockProvider implements providers.Interface but mocks out all the -// calls for testing purposes. -type MockProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetProviderSchemaCalled bool - GetProviderSchemaResponse *providers.GetProviderSchemaResponse - - ValidateProviderConfigCalled bool - ValidateProviderConfigResponse *providers.ValidateProviderConfigResponse - ValidateProviderConfigRequest providers.ValidateProviderConfigRequest - ValidateProviderConfigFn func(providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse - - ValidateResourceConfigCalled bool - ValidateResourceConfigTypeName string - ValidateResourceConfigResponse *providers.ValidateResourceConfigResponse - ValidateResourceConfigRequest providers.ValidateResourceConfigRequest - ValidateResourceConfigFn func(providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse - - ValidateDataResourceConfigCalled bool - ValidateDataResourceConfigTypeName string - ValidateDataResourceConfigResponse *providers.ValidateDataResourceConfigResponse - ValidateDataResourceConfigRequest providers.ValidateDataResourceConfigRequest - ValidateDataResourceConfigFn func(providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse - - UpgradeResourceStateCalled bool - UpgradeResourceStateTypeName string - UpgradeResourceStateResponse *providers.UpgradeResourceStateResponse - UpgradeResourceStateRequest providers.UpgradeResourceStateRequest - UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse - - ConfigureProviderCalled bool - ConfigureProviderResponse *providers.ConfigureProviderResponse - ConfigureProviderRequest providers.ConfigureProviderRequest - ConfigureProviderFn func(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse - - StopCalled bool - StopFn func() error - StopResponse error - - ReadResourceCalled bool - ReadResourceResponse *providers.ReadResourceResponse - ReadResourceRequest providers.ReadResourceRequest - ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse - - PlanResourceChangeCalled bool - PlanResourceChangeResponse *providers.PlanResourceChangeResponse - PlanResourceChangeRequest providers.PlanResourceChangeRequest - PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse - - ApplyResourceChangeCalled bool - ApplyResourceChangeResponse *providers.ApplyResourceChangeResponse - ApplyResourceChangeRequest providers.ApplyResourceChangeRequest - ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse - - ImportResourceStateCalled bool - ImportResourceStateResponse *providers.ImportResourceStateResponse - ImportResourceStateRequest providers.ImportResourceStateRequest - ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse - - ReadDataSourceCalled bool - ReadDataSourceResponse *providers.ReadDataSourceResponse - ReadDataSourceRequest providers.ReadDataSourceRequest - ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse - - CloseCalled bool - CloseError error -} - -func (p *MockProvider) GetProviderSchema() providers.GetProviderSchemaResponse { - p.Lock() - defer p.Unlock() - p.GetProviderSchemaCalled = true - return p.getProviderSchema() -} - -func (p *MockProvider) getProviderSchema() providers.GetProviderSchemaResponse { - // This version of getProviderSchema doesn't do any locking, so it's suitable to - // call from other methods of this mock as long as they are already - // holding the lock. - if p.GetProviderSchemaResponse != nil { - return *p.GetProviderSchemaResponse - } - - return providers.GetProviderSchemaResponse{ - Provider: providers.Schema{}, - DataSources: map[string]providers.Schema{}, - ResourceTypes: map[string]providers.Schema{}, - } -} - -// ProviderSchema is a helper to convert from the internal GetProviderSchemaResponse to -// a ProviderSchema. -func (p *MockProvider) ProviderSchema() *ProviderSchema { - resp := p.getProviderSchema() - - schema := &ProviderSchema{ - Provider: resp.Provider.Block, - ProviderMeta: resp.ProviderMeta.Block, - ResourceTypes: map[string]*configschema.Block{}, - DataSources: map[string]*configschema.Block{}, - ResourceTypeSchemaVersions: map[string]uint64{}, - } - - for resType, s := range resp.ResourceTypes { - schema.ResourceTypes[resType] = s.Block - schema.ResourceTypeSchemaVersions[resType] = uint64(s.Version) - } - - for dataSource, s := range resp.DataSources { - schema.DataSources[dataSource] = s.Block - } - - return schema -} - -func (p *MockProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { - p.Lock() - defer p.Unlock() - - p.ValidateProviderConfigCalled = true - p.ValidateProviderConfigRequest = r - if p.ValidateProviderConfigFn != nil { - return p.ValidateProviderConfigFn(r) - } - - if p.ValidateProviderConfigResponse != nil { - return *p.ValidateProviderConfigResponse - } - - resp.PreparedConfig = r.Config - return resp -} - -func (p *MockProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { - p.Lock() - defer p.Unlock() - - p.ValidateResourceConfigCalled = true - p.ValidateResourceConfigRequest = r - - // Marshall the value to replicate behavior by the GRPC protocol, - // and return any relevant errors - resourceSchema, ok := p.getProviderSchema().ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) - return resp - } - - _, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - if p.ValidateResourceConfigFn != nil { - return p.ValidateResourceConfigFn(r) - } - - if p.ValidateResourceConfigResponse != nil { - return *p.ValidateResourceConfigResponse - } - - return resp -} - -func (p *MockProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { - p.Lock() - defer p.Unlock() - - p.ValidateDataResourceConfigCalled = true - p.ValidateDataResourceConfigRequest = r - - // Marshall the value to replicate behavior by the GRPC protocol - dataSchema, ok := p.getProviderSchema().DataSources[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) - return resp - } - _, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - if p.ValidateDataResourceConfigFn != nil { - return p.ValidateDataResourceConfigFn(r) - } - - if p.ValidateDataResourceConfigResponse != nil { - return *p.ValidateDataResourceConfigResponse - } - - return resp -} - -func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - p.Lock() - defer p.Unlock() - - if !p.ConfigureProviderCalled { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before UpgradeResourceState %q", r.TypeName)) - return resp - } - - schema, ok := p.getProviderSchema().ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) - return resp - } - - schemaType := schema.Block.ImpliedType() - - p.UpgradeResourceStateCalled = true - p.UpgradeResourceStateRequest = r - - if p.UpgradeResourceStateFn != nil { - return p.UpgradeResourceStateFn(r) - } - - if p.UpgradeResourceStateResponse != nil { - return *p.UpgradeResourceStateResponse - } - - switch { - case r.RawStateFlatmap != nil: - v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - case len(r.RawStateJSON) > 0: - v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) - - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - } - - return resp -} - -func (p *MockProvider) ConfigureProvider(r providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { - p.Lock() - defer p.Unlock() - - p.ConfigureProviderCalled = true - p.ConfigureProviderRequest = r - - if p.ConfigureProviderFn != nil { - return p.ConfigureProviderFn(r) - } - - if p.ConfigureProviderResponse != nil { - return *p.ConfigureProviderResponse - } - - return resp -} - -func (p *MockProvider) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provider itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - p.Lock() - defer p.Unlock() - - p.ReadResourceCalled = true - p.ReadResourceRequest = r - - if !p.ConfigureProviderCalled { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before ReadResource %q", r.TypeName)) - return resp - } - - if p.ReadResourceFn != nil { - return p.ReadResourceFn(r) - } - - if p.ReadResourceResponse != nil { - resp = *p.ReadResourceResponse - - // Make sure the NewState conforms to the schema. - // This isn't always the case for the existing tests. - schema, ok := p.getProviderSchema().ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) - return resp - } - - newState, err := schema.Block.CoerceValue(resp.NewState) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - resp.NewState = newState - return resp - } - - // otherwise just return the same state we received - resp.NewState = r.PriorState - resp.Private = r.Private - return resp -} - -func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - p.Lock() - defer p.Unlock() - - if !p.ConfigureProviderCalled { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before PlanResourceChange %q", r.TypeName)) - return resp - } - - p.PlanResourceChangeCalled = true - p.PlanResourceChangeRequest = r - - if p.PlanResourceChangeFn != nil { - return p.PlanResourceChangeFn(r) - } - - if p.PlanResourceChangeResponse != nil { - return *p.PlanResourceChangeResponse - } - - // this is a destroy plan, - if r.ProposedNewState.IsNull() { - resp.PlannedState = r.ProposedNewState - resp.PlannedPrivate = r.PriorPrivate - return resp - } - - schema, ok := p.getProviderSchema().ResourceTypes[r.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) - return resp - } - - // The default plan behavior is to accept the proposed value, and mark all - // nil computed attributes as unknown. - val, err := cty.Transform(r.ProposedNewState, func(path cty.Path, v cty.Value) (cty.Value, error) { - // We're only concerned with known null values, which can be computed - // by the provider. - if !v.IsKnown() { - return v, nil - } - - attrSchema := schema.Block.AttributeByPath(path) - if attrSchema == nil { - // this is an intermediate path which does not represent an attribute - return v, nil - } - - // get the current configuration value, to detect when a - // computed+optional attributes has become unset - configVal, err := path.Apply(r.Config) - if err != nil { - return v, err - } - - switch { - case attrSchema.Computed && !attrSchema.Optional && v.IsNull(): - // this is the easy path, this value is not yet set, and _must_ be computed - return cty.UnknownVal(v.Type()), nil - - case attrSchema.Computed && attrSchema.Optional && !v.IsNull() && configVal.IsNull(): - // If an optional+computed value has gone from set to unset, it - // becomes computed. (this was not possible to do with legacy - // providers) - return cty.UnknownVal(v.Type()), nil - } - - return v, nil - }) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.PlannedPrivate = r.PriorPrivate - resp.PlannedState = val - - return resp -} - -func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - p.Lock() - p.ApplyResourceChangeCalled = true - p.ApplyResourceChangeRequest = r - p.Unlock() - - if !p.ConfigureProviderCalled { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before ApplyResourceChange %q", r.TypeName)) - return resp - } - - if p.ApplyResourceChangeFn != nil { - return p.ApplyResourceChangeFn(r) - } - - if p.ApplyResourceChangeResponse != nil { - return *p.ApplyResourceChangeResponse - } - - // if the value is nil, we return that directly to correspond to a delete - if r.PlannedState.IsNull() { - resp.NewState = r.PlannedState - return resp - } - - // the default behavior will be to create the minimal valid apply value by - // setting unknowns (which correspond to computed attributes) to a zero - // value. - val, _ := cty.Transform(r.PlannedState, func(path cty.Path, v cty.Value) (cty.Value, error) { - if !v.IsKnown() { - ty := v.Type() - switch { - case ty == cty.String: - return cty.StringVal(""), nil - case ty == cty.Number: - return cty.NumberIntVal(0), nil - case ty == cty.Bool: - return cty.False, nil - case ty.IsMapType(): - return cty.MapValEmpty(ty.ElementType()), nil - case ty.IsListType(): - return cty.ListValEmpty(ty.ElementType()), nil - default: - return cty.NullVal(ty), nil - } - } - return v, nil - }) - - resp.NewState = val - resp.Private = r.PlannedPrivate - - return resp -} - -func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - p.Lock() - defer p.Unlock() - - if !p.ConfigureProviderCalled { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before ImportResourceState %q", r.TypeName)) - return resp - } - - p.ImportResourceStateCalled = true - p.ImportResourceStateRequest = r - if p.ImportResourceStateFn != nil { - return p.ImportResourceStateFn(r) - } - - if p.ImportResourceStateResponse != nil { - resp = *p.ImportResourceStateResponse - // fixup the cty value to match the schema - for i, res := range resp.ImportedResources { - schema, ok := p.getProviderSchema().ResourceTypes[res.TypeName] - if !ok { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", res.TypeName)) - return resp - } - - var err error - res.State, err = schema.Block.CoerceValue(res.State) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.ImportedResources[i] = res - } - } - - return resp -} - -func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - p.Lock() - defer p.Unlock() - - if !p.ConfigureProviderCalled { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before ReadDataSource %q", r.TypeName)) - return resp - } - - p.ReadDataSourceCalled = true - p.ReadDataSourceRequest = r - - if p.ReadDataSourceFn != nil { - return p.ReadDataSourceFn(r) - } - - if p.ReadDataSourceResponse != nil { - resp = *p.ReadDataSourceResponse - } - - return resp -} - -func (p *MockProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} diff --git a/internal/terraform/provisioner_mock.go b/internal/terraform/provisioner_mock.go deleted file mode 100644 index fe76157a2daf..000000000000 --- a/internal/terraform/provisioner_mock.go +++ /dev/null @@ -1,104 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/internal/provisioners" -) - -var _ provisioners.Interface = (*MockProvisioner)(nil) - -// MockProvisioner implements provisioners.Interface but mocks out all the -// calls for testing purposes. -type MockProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaResponse provisioners.GetSchemaResponse - - ValidateProvisionerConfigCalled bool - ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest - ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse - ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse - - ProvisionResourceCalled bool - ProvisionResourceRequest provisioners.ProvisionResourceRequest - ProvisionResourceResponse provisioners.ProvisionResourceResponse - ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse - - StopCalled bool - StopResponse error - StopFn func() error - - CloseCalled bool - CloseResponse error - CloseFn func() error -} - -func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - return p.getSchema() -} - -// getSchema is the implementation of GetSchema, which can be called from other -// methods on MockProvisioner that may already be holding the lock. -func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { - return p.GetSchemaResponse -} - -func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateProvisionerConfigCalled = true - p.ValidateProvisionerConfigRequest = r - if p.ValidateProvisionerConfigFn != nil { - return p.ValidateProvisionerConfigFn(r) - } - return p.ValidateProvisionerConfigResponse -} - -func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { - p.Lock() - defer p.Unlock() - - p.ProvisionResourceCalled = true - p.ProvisionResourceRequest = r - if p.ProvisionResourceFn != nil { - fn := p.ProvisionResourceFn - return fn(r) - } - - return p.ProvisionResourceResponse -} - -func (p *MockProvisioner) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provisioner itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvisioner) Close() error { - p.Lock() - defer p.Unlock() - - p.CloseCalled = true - if p.CloseFn != nil { - return p.CloseFn() - } - - return p.CloseResponse -} diff --git a/internal/terraform/provisioner_mock_test.go b/internal/terraform/provisioner_mock_test.go deleted file mode 100644 index a9d9d7bf3dcd..000000000000 --- a/internal/terraform/provisioner_mock_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/provisioners" -) - -// simpleMockProvisioner returns a MockProvisioner that is pre-configured -// with schema for its own config, with the same content as returned by -// function simpleTestSchema. -// -// For most reasonable uses the returned provisioner must be registered in a -// componentFactory under the name "test". Use simpleMockComponentFactory -// to obtain a pre-configured componentFactory containing the result of -// this function along with simpleMockProvider, both registered as "test". -// -// The returned provisioner has no other behaviors by default, but the caller -// may modify it in order to stub any other required functionality, or modify -// the default schema stored in the field GetSchemaReturn. Each new call to -// simpleTestProvisioner produces entirely new instances of all of the nested -// objects so that callers can mutate without affecting mock objects. -func simpleMockProvisioner() *MockProvisioner { - return &MockProvisioner{ - GetSchemaResponse: provisioners.GetSchemaResponse{ - Provisioner: simpleTestSchema(), - }, - } -} diff --git a/internal/terraform/reduce_plan.go b/internal/terraform/reduce_plan.go deleted file mode 100644 index 17a58eff5c13..000000000000 --- a/internal/terraform/reduce_plan.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" -) - -// reducePlan takes a planned resource instance change as might be produced by -// Plan or PlanDestroy and "simplifies" it to a single atomic action to be -// performed by a specific graph node. -// -// Callers must specify whether they are a destroy node or a regular apply node. -// If the result is NoOp then the given change requires no action for the -// specific graph node calling this and so evaluation of the that graph node -// should exit early and take no action. -// -// The returned object may either be identical to the input change or a new -// change object derived from the input. Because of the former case, the caller -// must not mutate the object returned in OutChange. -func reducePlan(addr addrs.ResourceInstance, in *plans.ResourceInstanceChange, destroy bool) *plans.ResourceInstanceChange { - out := in.Simplify(destroy) - if out.Action != in.Action { - if destroy { - log.Printf("[TRACE] reducePlan: %s change simplified from %s to %s for destroy node", addr, in.Action, out.Action) - } else { - log.Printf("[TRACE] reducePlan: %s change simplified from %s to %s for apply node", addr, in.Action, out.Action) - } - } - return out -} diff --git a/internal/terraform/reduce_plan_test.go b/internal/terraform/reduce_plan_test.go deleted file mode 100644 index f32101aaf582..000000000000 --- a/internal/terraform/reduce_plan_test.go +++ /dev/null @@ -1,443 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -func TestProcessIgnoreChangesIndividual(t *testing.T) { - tests := map[string]struct { - Old, New cty.Value - Ignore []string - Want cty.Value - }{ - "string": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("a value"), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("new a value"), - "b": cty.StringVal("new b value"), - }), - []string{"a"}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("a value"), - "b": cty.StringVal("new b value"), - }), - }, - "changed type": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("a value"), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.NumberIntVal(1), - "b": cty.StringVal("new b value"), - }), - []string{"a"}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("a value"), - "b": cty.StringVal("new b value"), - }), - }, - "list": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ListVal([]cty.Value{ - cty.StringVal("a0 value"), - cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ListVal([]cty.Value{ - cty.StringVal("new a0 value"), - cty.StringVal("new a1 value"), - }), - "b": cty.StringVal("new b value"), - }), - []string{"a"}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ListVal([]cty.Value{ - cty.StringVal("a0 value"), - cty.StringVal("a1 value"), - }), - "b": cty.StringVal("new b value"), - }), - }, - "list_index": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ListVal([]cty.Value{ - cty.StringVal("a0 value"), - cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ListVal([]cty.Value{ - cty.StringVal("new a0 value"), - cty.StringVal("new a1 value"), - }), - "b": cty.StringVal("new b value"), - }), - []string{"a[1]"}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ListVal([]cty.Value{ - cty.StringVal("new a0 value"), - cty.StringVal("a1 value"), - }), - "b": cty.StringVal("new b value"), - }), - }, - "map": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("new a0 value"), - "a1": cty.UnknownVal(cty.String), - }), - "b": cty.StringVal("b value"), - }), - []string{`a`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - }, - "map_index": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("new a0 value"), - "a1": cty.StringVal("new a1 value"), - }), - "b": cty.StringVal("b value"), - }), - []string{`a["a1"]`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("new a0 value"), - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - }, - "map_index_no_config": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.NullVal(cty.Map(cty.String)), - "b": cty.StringVal("b value"), - }), - []string{`a["a1"]`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - }, - "map_index_unknown_value": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.UnknownVal(cty.String), - }), - "b": cty.StringVal("b value"), - }), - []string{`a["a1"]`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - }, - "map_index_multiple_keys": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - "a2": cty.StringVal("a2 value"), - "a3": cty.StringVal("a3 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.NullVal(cty.Map(cty.String)), - "b": cty.StringVal("new b value"), - }), - []string{`a["a1"]`, `a["a2"]`, `a["a3"]`, `b`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a1": cty.StringVal("a1 value"), - "a2": cty.StringVal("a2 value"), - "a3": cty.StringVal("a3 value"), - }), - "b": cty.StringVal("b value"), - }), - }, - "map_index_redundant": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - "a2": cty.StringVal("a2 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.NullVal(cty.Map(cty.String)), - "b": cty.StringVal("new b value"), - }), - []string{`a["a1"]`, `a`, `b`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - "a2": cty.StringVal("a2 value"), - }), - "b": cty.StringVal("b value"), - }), - }, - "missing_map_index": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapValEmpty(cty.String), - "b": cty.StringVal("b value"), - }), - []string{`a["a1"]`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a1": cty.StringVal("a1 value"), - }), - "b": cty.StringVal("b value"), - }), - }, - "missing_map_index_empty": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapValEmpty(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a0 value"), - }), - }), - []string{`a["a"]`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapValEmpty(cty.String), - }), - }, - "missing_map_index_to_object": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("aa0"), - "b": cty.StringVal("ab0"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("ba0"), - "b": cty.StringVal("bb0"), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapValEmpty( - cty.Object(map[string]cty.Type{ - "a": cty.String, - "b": cty.String, - }), - ), - }), - // we expect the config to be used here, as the ignore changes was - // `a["a"].b`, but the change was larger than that removing - // `a["a"]` entirely. - []string{`a["a"].b`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapValEmpty( - cty.Object(map[string]cty.Type{ - "a": cty.String, - "b": cty.String, - }), - ), - }), - }, - "missing_prior_map_index": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - "a1": cty.StringVal("new a1 value"), - }), - "b": cty.StringVal("b value"), - }), - []string{`a["a1"]`}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.MapVal(map[string]cty.Value{ - "a0": cty.StringVal("a0 value"), - }), - "b": cty.StringVal("b value"), - }), - }, - "object attribute": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("a.foo value"), - "bar": cty.StringVal("a.bar value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("new a.foo value"), - "bar": cty.StringVal("new a.bar value"), - }), - "b": cty.StringVal("new b value"), - }), - []string{"a.bar"}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("new a.foo value"), - "bar": cty.StringVal("a.bar value"), - }), - "b": cty.StringVal("new b value"), - }), - }, - "unknown_object_attribute": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("a.foo value"), - "bar": cty.StringVal("a.bar value"), - }), - "b": cty.StringVal("b value"), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("new a.foo value"), - "bar": cty.UnknownVal(cty.String), - }), - "b": cty.StringVal("new b value"), - }), - []string{"a.bar"}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("new a.foo value"), - "bar": cty.StringVal("a.bar value"), - }), - "b": cty.StringVal("new b value"), - }), - }, - "null_map": { - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("ok"), - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "s": cty.StringVal("ok"), - "map": cty.NullVal(cty.Map(cty.String)), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "a": cty.NullVal(cty.String), - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "s": cty.StringVal("ok"), - "map": cty.NullVal(cty.Map(cty.String)), - }), - }), - }), - []string{"a"}, - cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("ok"), - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "s": cty.StringVal("ok"), - "map": cty.NullVal(cty.Map(cty.String)), - }), - }), - }), - }, - "marked_map": { - cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("val"), - }).Mark("marked"), - }), - cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("new val"), - }).Mark("marked"), - }), - []string{`map["key"]`}, - cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "key": cty.StringVal("val"), - }).Mark("marked"), - }), - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - ignore := make([]hcl.Traversal, len(test.Ignore)) - for i, ignoreStr := range test.Ignore { - trav, diags := hclsyntax.ParseTraversalAbs([]byte(ignoreStr), "", hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - t.Fatalf("failed to parse %q: %s", ignoreStr, diags.Error()) - } - ignore[i] = trav - } - - ret, diags := processIgnoreChangesIndividual(test.Old, test.New, traversalsToPaths(ignore)) - if diags.HasErrors() { - t.Fatal(diags.Err()) - } - - if got, want := ret, test.Want; !want.RawEquals(got) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, want) - } - }) - } -} diff --git a/internal/terraform/resource_provider_mock_test.go b/internal/terraform/resource_provider_mock_test.go deleted file mode 100644 index 6592b0a96011..000000000000 --- a/internal/terraform/resource_provider_mock_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/zclconf/go-cty/cty" -) - -// mockProviderWithConfigSchema is a test helper to concisely create a mock -// provider with the given schema for its own configuration. -func mockProviderWithConfigSchema(schema *configschema.Block) *MockProvider { - return &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: schema}, - }, - } -} - -// mockProviderWithResourceTypeSchema is a test helper to concisely create a mock -// provider with a schema containing a single resource type. -func mockProviderWithResourceTypeSchema(name string, schema *configschema.Block) *MockProvider { - return &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "string": { - Type: cty.String, - Optional: true, - }, - "list": { - Type: cty.List(cty.String), - Optional: true, - }, - "root": { - Type: cty.Map(cty.String), - Optional: true, - }, - }, - }, - }, - ResourceTypes: map[string]providers.Schema{ - name: providers.Schema{Block: schema}, - }, - }, - } -} - -// getProviderSchemaResponseFromProviderSchema is a test helper to convert a -// ProviderSchema to a GetProviderSchemaResponse for use when building a mock provider. -func getProviderSchemaResponseFromProviderSchema(providerSchema *ProviderSchema) *providers.GetProviderSchemaResponse { - resp := &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: providerSchema.Provider}, - ProviderMeta: providers.Schema{Block: providerSchema.ProviderMeta}, - ResourceTypes: map[string]providers.Schema{}, - DataSources: map[string]providers.Schema{}, - } - - for name, schema := range providerSchema.ResourceTypes { - resp.ResourceTypes[name] = providers.Schema{ - Block: schema, - Version: int64(providerSchema.ResourceTypeSchemaVersions[name]), - } - } - - for name, schema := range providerSchema.DataSources { - resp.DataSources[name] = providers.Schema{Block: schema} - } - - return resp -} - -// simpleMockProvider returns a MockProvider that is pre-configured -// with schema for its own config, for a resource type called "test_object" and -// for a data source also called "test_object". -// -// All three schemas have the same content as returned by function -// simpleTestSchema. -// -// For most reasonable uses the returned provider must be registered in a -// componentFactory under the name "test". Use simpleMockComponentFactory -// to obtain a pre-configured componentFactory containing the result of -// this function along with simpleMockProvisioner, both registered as "test". -// -// The returned provider has no other behaviors by default, but the caller may -// modify it in order to stub any other required functionality, or modify -// the default schema stored in the field GetSchemaReturn. Each new call to -// simpleTestProvider produces entirely new instances of all of the nested -// objects so that callers can mutate without affecting mock objects. -func simpleMockProvider() *MockProvider { - return &MockProvider{ - GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{Block: simpleTestSchema()}, - ResourceTypes: map[string]providers.Schema{ - "test_object": providers.Schema{Block: simpleTestSchema()}, - }, - DataSources: map[string]providers.Schema{ - "test_object": providers.Schema{Block: simpleTestSchema()}, - }, - }, - } -} diff --git a/internal/terraform/schemas.go b/internal/terraform/schemas.go deleted file mode 100644 index 24edeb85aaf9..000000000000 --- a/internal/terraform/schemas.go +++ /dev/null @@ -1,187 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// ProviderSchema is an alias for providers.Schemas, which is the new location -// for what we originally called terraform.ProviderSchema but which has -// moved out as part of ongoing refactoring to shrink down the main "terraform" -// package. -type ProviderSchema = providers.Schemas - -// Schemas is a container for various kinds of schema that Terraform needs -// during processing. -type Schemas struct { - Providers map[addrs.Provider]*providers.Schemas - Provisioners map[string]*configschema.Block -} - -// ProviderSchema returns the entire ProviderSchema object that was produced -// by the plugin for the given provider, or nil if no such schema is available. -// -// It's usually better to go use the more precise methods offered by type -// Schemas to handle this detail automatically. -func (ss *Schemas) ProviderSchema(provider addrs.Provider) *providers.Schemas { - if ss.Providers == nil { - return nil - } - return ss.Providers[provider] -} - -// ProviderConfig returns the schema for the provider configuration of the -// given provider type, or nil if no such schema is available. -func (ss *Schemas) ProviderConfig(provider addrs.Provider) *configschema.Block { - ps := ss.ProviderSchema(provider) - if ps == nil { - return nil - } - return ps.Provider -} - -// ResourceTypeConfig returns the schema for the configuration of a given -// resource type belonging to a given provider type, or nil of no such -// schema is available. -// -// In many cases the provider type is inferrable from the resource type name, -// but this is not always true because users can override the provider for -// a resource using the "provider" meta-argument. Therefore it's important to -// always pass the correct provider name, even though it many cases it feels -// redundant. -func (ss *Schemas) ResourceTypeConfig(provider addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { - ps := ss.ProviderSchema(provider) - if ps == nil || ps.ResourceTypes == nil { - return nil, 0 - } - return ps.SchemaForResourceType(resourceMode, resourceType) -} - -// ProvisionerConfig returns the schema for the configuration of a given -// provisioner, or nil of no such schema is available. -func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { - return ss.Provisioners[name] -} - -// loadSchemas searches the given configuration, state and plan (any of which -// may be nil) for constructs that have an associated schema, requests the -// necessary schemas from the given component factory (which must _not_ be nil), -// and returns a single object representing all of the necessary schemas. -// -// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing -// errors across multiple separate objects. Errors here will usually indicate -// either misbehavior on the part of one of the providers or of the provider -// protocol itself. When returned with errors, the returned schemas object is -// still valid but may be incomplete. -func loadSchemas(config *configs.Config, state *states.State, plugins *contextPlugins) (*Schemas, error) { - schemas := &Schemas{ - Providers: map[addrs.Provider]*providers.Schemas{}, - Provisioners: map[string]*configschema.Block{}, - } - var diags tfdiags.Diagnostics - - newDiags := loadProviderSchemas(schemas.Providers, config, state, plugins) - diags = diags.Append(newDiags) - newDiags = loadProvisionerSchemas(schemas.Provisioners, config, plugins) - diags = diags.Append(newDiags) - - return schemas, diags.Err() -} - -func loadProviderSchemas(schemas map[addrs.Provider]*providers.Schemas, config *configs.Config, state *states.State, plugins *contextPlugins) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(fqn addrs.Provider) { - name := fqn.String() - - if _, exists := schemas[fqn]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", name) - schema, err := plugins.ProviderSchema(fqn) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls, which would then repeat the same error message - // multiple times. - schemas[fqn] = &providers.Schemas{} - diags = diags.Append( - tfdiags.Sourceless( - tfdiags.Error, - "Failed to obtain provider schema", - fmt.Sprintf("Could not load the schema for provider %s: %s.", fqn, err), - ), - ) - return - } - - schemas[fqn] = schema - } - - if config != nil { - for _, fqn := range config.ProviderTypes() { - ensure(fqn) - } - } - - if state != nil { - needed := providers.AddressedTypesAbs(state.ProviderAddrs()) - for _, typeAddr := range needed { - ensure(typeAddr) - } - } - - return diags -} - -func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, plugins *contextPlugins) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(name string) { - if _, exists := schemas[name]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) - schema, err := plugins.ProvisionerSchema(name) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls, which would then repeat the same error message - // multiple times. - schemas[name] = &configschema.Block{} - diags = diags.Append( - tfdiags.Sourceless( - tfdiags.Error, - "Failed to obtain provisioner schema", - fmt.Sprintf("Could not load the schema for provisioner %q: %s.", name, err), - ), - ) - return - } - - schemas[name] = schema - } - - if config != nil { - for _, rc := range config.Module.ManagedResources { - for _, pc := range rc.Managed.Provisioners { - ensure(pc.Type) - } - } - - // Must also visit our child modules, recursively. - for _, cc := range config.Children { - childDiags := loadProvisionerSchemas(schemas, cc, plugins) - diags = diags.Append(childDiags) - } - } - - return diags -} diff --git a/internal/terraform/schemas_test.go b/internal/terraform/schemas_test.go deleted file mode 100644 index 044b795a50a9..000000000000 --- a/internal/terraform/schemas_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" -) - -func simpleTestSchemas() *Schemas { - provider := simpleMockProvider() - provisioner := simpleMockProvisioner() - - return &Schemas{ - Providers: map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("test"): provider.ProviderSchema(), - }, - Provisioners: map[string]*configschema.Block{ - "test": provisioner.GetSchemaResponse.Provisioner, - }, - } -} - -// schemaOnlyProvidersForTesting is a testing helper that constructs a -// plugin library that contains a set of providers that only know how to -// return schema, and will exhibit undefined behavior if used for any other -// purpose. -// -// The intended use for this is in testing components that use schemas to -// drive other behavior, such as reference analysis during graph construction, -// but that don't actually need to interact with providers otherwise. -func schemaOnlyProvidersForTesting(schemas map[addrs.Provider]*ProviderSchema) *contextPlugins { - factories := make(map[addrs.Provider]providers.Factory, len(schemas)) - - for providerAddr, schema := range schemas { - - resp := &providers.GetProviderSchemaResponse{ - Provider: providers.Schema{ - Block: schema.Provider, - }, - ResourceTypes: make(map[string]providers.Schema), - DataSources: make(map[string]providers.Schema), - } - for t, tSchema := range schema.ResourceTypes { - resp.ResourceTypes[t] = providers.Schema{ - Block: tSchema, - Version: int64(schema.ResourceTypeSchemaVersions[t]), - } - } - for t, tSchema := range schema.DataSources { - resp.DataSources[t] = providers.Schema{ - Block: tSchema, - } - } - - provider := &MockProvider{ - GetProviderSchemaResponse: resp, - } - - factories[providerAddr] = func() (providers.Interface, error) { - return provider, nil - } - } - - return newContextPlugins(factories, nil) -} diff --git a/internal/terraform/terraform_test.go b/internal/terraform/terraform_test.go deleted file mode 100644 index 419245958332..000000000000 --- a/internal/terraform/terraform_test.go +++ /dev/null @@ -1,1083 +0,0 @@ -package terraform - -import ( - "context" - "flag" - "io" - "os" - "path/filepath" - "strings" - "sync" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/states" - - _ "github.com/hashicorp/terraform/internal/logging" -) - -// This is the directory where our test fixtures are. -const fixtureDir = "./testdata" - -func TestMain(m *testing.M) { - flag.Parse() - - // We have fmt.Stringer implementations on lots of objects that hide - // details that we very often want to see in tests, so we just disable - // spew's use of String methods globally on the assumption that spew - // usage implies an intent to see the raw values and ignore any - // abstractions. - spew.Config.DisableMethods = true - - os.Exit(m.Run()) -} - -func testModule(t *testing.T, name string) *configs.Config { - t.Helper() - c, _ := testModuleWithSnapshot(t, name) - return c -} - -func testModuleWithSnapshot(t *testing.T, name string) (*configs.Config, *configload.Snapshot) { - t.Helper() - - dir := filepath.Join(fixtureDir, name) - // FIXME: We're not dealing with the cleanup function here because - // this testModule function is used all over and so we don't want to - // change its interface at this late stage. - loader, _ := configload.NewLoaderForTests(t) - - // We need to be able to exercise experimental features in our integration tests. - loader.AllowLanguageExperiments(true) - - // Test modules usually do not refer to remote sources, and for local - // sources only this ultimately just records all of the module paths - // in a JSON file so that we can load them below. - inst := initwd.NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil)) - _, instDiags := inst.InstallModules(context.Background(), dir, true, initwd.ModuleInstallHooksImpl{}) - if instDiags.HasErrors() { - t.Fatal(instDiags.Err()) - } - - // Since module installer has modified the module manifest on disk, we need - // to refresh the cache of it in the loader. - if err := loader.RefreshModules(); err != nil { - t.Fatalf("failed to refresh modules after installation: %s", err) - } - - config, snap, diags := loader.LoadConfigWithSnapshot(dir) - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - return config, snap -} - -// testModuleInline takes a map of path -> config strings and yields a config -// structure with those files loaded from disk -func testModuleInline(t *testing.T, sources map[string]string) *configs.Config { - t.Helper() - - cfgPath := t.TempDir() - - for path, configStr := range sources { - dir := filepath.Dir(path) - if dir != "." { - err := os.MkdirAll(filepath.Join(cfgPath, dir), os.FileMode(0777)) - if err != nil { - t.Fatalf("Error creating subdir: %s", err) - } - } - // Write the configuration - cfgF, err := os.Create(filepath.Join(cfgPath, path)) - if err != nil { - t.Fatalf("Error creating temporary file for config: %s", err) - } - - _, err = io.Copy(cfgF, strings.NewReader(configStr)) - cfgF.Close() - if err != nil { - t.Fatalf("Error creating temporary file for config: %s", err) - } - } - - loader, cleanup := configload.NewLoaderForTests(t) - defer cleanup() - - // We need to be able to exercise experimental features in our integration tests. - loader.AllowLanguageExperiments(true) - - // Test modules usually do not refer to remote sources, and for local - // sources only this ultimately just records all of the module paths - // in a JSON file so that we can load them below. - inst := initwd.NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil)) - _, instDiags := inst.InstallModules(context.Background(), cfgPath, true, initwd.ModuleInstallHooksImpl{}) - if instDiags.HasErrors() { - t.Fatal(instDiags.Err()) - } - - // Since module installer has modified the module manifest on disk, we need - // to refresh the cache of it in the loader. - if err := loader.RefreshModules(); err != nil { - t.Fatalf("failed to refresh modules after installation: %s", err) - } - - config, diags := loader.LoadConfig(cfgPath) - if diags.HasErrors() { - t.Fatal(diags.Error()) - } - - return config -} - -// testSetResourceInstanceCurrent is a helper function for tests that sets a Current, -// Ready resource instance for the given module. -func testSetResourceInstanceCurrent(module *states.Module, resource, attrsJson, provider string) { - module.SetResourceInstanceCurrent( - mustResourceInstanceAddr(resource).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(attrsJson), - }, - mustProviderConfig(provider), - ) -} - -// testSetResourceInstanceTainted is a helper function for tests that sets a Current, -// Tainted resource instance for the given module. -func testSetResourceInstanceTainted(module *states.Module, resource, attrsJson, provider string) { - module.SetResourceInstanceCurrent( - mustResourceInstanceAddr(resource).Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(attrsJson), - }, - mustProviderConfig(provider), - ) -} - -func testProviderFuncFixed(rp providers.Interface) providers.Factory { - return func() (providers.Interface, error) { - if p, ok := rp.(*MockProvider); ok { - // make sure none of the methods were "called" on this new instance - p.GetProviderSchemaCalled = false - p.ValidateProviderConfigCalled = false - p.ValidateResourceConfigCalled = false - p.ValidateDataResourceConfigCalled = false - p.UpgradeResourceStateCalled = false - p.ConfigureProviderCalled = false - p.StopCalled = false - p.ReadResourceCalled = false - p.PlanResourceChangeCalled = false - p.ApplyResourceChangeCalled = false - p.ImportResourceStateCalled = false - p.ReadDataSourceCalled = false - p.CloseCalled = false - } - - return rp, nil - } -} - -func testProvisionerFuncFixed(rp *MockProvisioner) provisioners.Factory { - return func() (provisioners.Interface, error) { - // make sure this provisioner has has not been closed - rp.CloseCalled = false - return rp, nil - } -} - -func mustResourceInstanceAddr(s string) addrs.AbsResourceInstance { - addr, diags := addrs.ParseAbsResourceInstanceStr(s) - if diags.HasErrors() { - panic(diags.Err()) - } - return addr -} - -func mustConfigResourceAddr(s string) addrs.ConfigResource { - addr, diags := addrs.ParseAbsResourceStr(s) - if diags.HasErrors() { - panic(diags.Err()) - } - return addr.Config() -} - -func mustAbsResourceAddr(s string) addrs.AbsResource { - addr, diags := addrs.ParseAbsResourceStr(s) - if diags.HasErrors() { - panic(diags.Err()) - } - return addr -} - -func mustProviderConfig(s string) addrs.AbsProviderConfig { - p, diags := addrs.ParseAbsProviderConfigStr(s) - if diags.HasErrors() { - panic(diags.Err()) - } - return p -} - -// HookRecordApplyOrder is a test hook that records the order of applies -// by recording the PreApply event. -type HookRecordApplyOrder struct { - NilHook - - Active bool - - IDs []string - States []cty.Value - Diffs []*plans.Change - - l sync.Mutex -} - -func (h *HookRecordApplyOrder) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - if plannedNewState.RawEquals(priorState) { - return HookActionContinue, nil - } - - if h.Active { - h.l.Lock() - defer h.l.Unlock() - - h.IDs = append(h.IDs, addr.String()) - h.Diffs = append(h.Diffs, &plans.Change{ - Action: action, - Before: priorState, - After: plannedNewState, - }) - h.States = append(h.States, priorState) - } - - return HookActionContinue, nil -} - -// Below are all the constant strings that are the expected output for -// various tests. - -const testTerraformInputProviderOnlyStr = ` -aws_instance.foo: - ID = - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = us-west-2 - type = -` - -const testTerraformApplyStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance -` - -const testTerraformApplyDataBasicStr = ` -data.null_data_source.testing: - ID = yo - provider = provider["registry.terraform.io/hashicorp/null"] -` - -const testTerraformApplyRefCountStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = 3 - type = aws_instance - - Dependencies: - aws_instance.foo -aws_instance.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.foo.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.foo.2: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -` - -const testTerraformApplyProviderAliasStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"].bar - foo = bar - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance -` - -const testTerraformApplyProviderAliasConfigStr = ` -another_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/another"].two - type = another_instance -another_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/another"] - type = another_instance -` - -const testTerraformApplyEmptyModuleStr = ` - -Outputs: - -end = XXXX -` - -const testTerraformApplyDependsCreateBeforeStr = ` -aws_instance.lb: - ID = baz - provider = provider["registry.terraform.io/hashicorp/aws"] - instance = foo - type = aws_instance - - Dependencies: - aws_instance.web -aws_instance.web: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = ami-new - type = aws_instance -` - -const testTerraformApplyCreateBeforeStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = xyz - type = aws_instance -` - -const testTerraformApplyCreateBeforeUpdateStr = ` -aws_instance.bar: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = baz - type = aws_instance -` - -const testTerraformApplyCancelStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - value = 2 -` - -const testTerraformApplyComputeStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = computed_value - type = aws_instance - - Dependencies: - aws_instance.foo -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - compute = value - compute_value = 1 - num = 2 - type = aws_instance - value = computed_value -` - -const testTerraformApplyCountDecStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.foo.0: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -aws_instance.foo.1: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -` - -const testTerraformApplyCountDecToOneStr = ` -aws_instance.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -` - -const testTerraformApplyCountDecToOneCorruptedStr = ` -aws_instance.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -` - -const testTerraformApplyCountDecToOneCorruptedPlanStr = ` -DIFF: - -DESTROY: aws_instance.foo[0] - id: "baz" => "" - type: "aws_instance" => "" - - - -STATE: - -aws_instance.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -aws_instance.foo.0: - ID = baz - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -` - -const testTerraformApplyCountVariableStr = ` -aws_instance.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -aws_instance.foo.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -` - -const testTerraformApplyCountVariableRefStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = 2 - type = aws_instance - - Dependencies: - aws_instance.foo -aws_instance.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.foo.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -` -const testTerraformApplyForEachVariableStr = ` -aws_instance.foo["b15c6d616d6143248c575900dff57325eb1de498"]: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -aws_instance.foo["c3de47d34b0a9f13918dd705c141d579dd6555fd"]: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -aws_instance.foo["e30a7edcc42a846684f2a4eea5f3cd261d33c46d"]: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - type = aws_instance -aws_instance.one["a"]: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.one["b"]: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.two["a"]: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - - Dependencies: - aws_instance.one -aws_instance.two["b"]: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - - Dependencies: - aws_instance.one` -const testTerraformApplyMinimalStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -` - -const testTerraformApplyModuleStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - -module.child: - aws_instance.baz: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -` - -const testTerraformApplyModuleBoolStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = true - type = aws_instance -` - -const testTerraformApplyModuleDestroyOrderStr = ` - -` - -const testTerraformApplyMultiProviderStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -do_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/do"] - num = 2 - type = do_instance -` - -const testTerraformApplyModuleOnlyProviderStr = ` - -module.child: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - test_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/test"] - type = test_instance -` - -const testTerraformApplyModuleProviderAliasStr = ` - -module.child: - aws_instance.foo: - ID = foo - provider = module.child.provider["registry.terraform.io/hashicorp/aws"].eu - type = aws_instance -` - -const testTerraformApplyModuleVarRefExistingStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance - -module.child: - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - value = bar - - Dependencies: - aws_instance.foo -` - -const testTerraformApplyOutputOrphanStr = ` - -Outputs: - -foo = bar -` - -const testTerraformApplyOutputOrphanModuleStr = ` - -` - -const testTerraformApplyProvisionerStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - - Dependencies: - aws_instance.foo -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - compute = value - compute_value = 1 - num = 2 - type = aws_instance - value = computed_value -` - -const testTerraformApplyProvisionerModuleStr = ` - -module.child: - aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -` - -const testTerraformApplyProvisionerFailStr = ` -aws_instance.bar: (tainted) - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance -` - -const testTerraformApplyProvisionerFailCreateStr = ` -aws_instance.bar: (tainted) - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -` - -const testTerraformApplyProvisionerFailCreateNoIdStr = ` - -` - -const testTerraformApplyProvisionerFailCreateBeforeDestroyStr = ` -aws_instance.bar: (tainted) (1 deposed) - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = xyz - type = aws_instance - Deposed ID 1 = bar -` - -const testTerraformApplyProvisionerResourceRefStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance -` - -const testTerraformApplyProvisionerSelfRefStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -` - -const testTerraformApplyProvisionerMultiSelfRefStr = ` -aws_instance.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = number 0 - type = aws_instance -aws_instance.foo.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = number 1 - type = aws_instance -aws_instance.foo.2: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = number 2 - type = aws_instance -` - -const testTerraformApplyProvisionerMultiSelfRefSingleStr = ` -aws_instance.foo.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = number 0 - type = aws_instance -aws_instance.foo.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = number 1 - type = aws_instance -aws_instance.foo.2: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = number 2 - type = aws_instance -` - -const testTerraformApplyProvisionerDiffStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -` - -const testTerraformApplyProvisionerSensitiveStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance -` - -const testTerraformApplyDestroyStr = ` - -` - -const testTerraformApplyErrorStr = ` -aws_instance.bar: (tainted) - ID = - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = 2 - - Dependencies: - aws_instance.foo -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - value = 2 -` - -const testTerraformApplyErrorCreateBeforeDestroyStr = ` -aws_instance.bar: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = abc - type = aws_instance -` - -const testTerraformApplyErrorDestroyCreateBeforeDestroyStr = ` -aws_instance.bar: (1 deposed) - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - require_new = xyz - type = aws_instance - Deposed ID 1 = bar -` - -const testTerraformApplyErrorPartialStr = ` -aws_instance.bar: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - - Dependencies: - aws_instance.foo -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - type = aws_instance - value = 2 -` - -const testTerraformApplyResourceDependsOnModuleStr = ` -aws_instance.a: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - ami = parent - type = aws_instance - - Dependencies: - module.child.aws_instance.child - -module.child: - aws_instance.child: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - ami = child - type = aws_instance -` - -const testTerraformApplyResourceDependsOnModuleDeepStr = ` -aws_instance.a: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - ami = parent - type = aws_instance - - Dependencies: - module.child.module.grandchild.aws_instance.c - -module.child.grandchild: - aws_instance.c: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - ami = grandchild - type = aws_instance -` - -const testTerraformApplyResourceDependsOnModuleInModuleStr = ` - -module.child: - aws_instance.b: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - ami = child - type = aws_instance - - Dependencies: - module.child.module.grandchild.aws_instance.c -module.child.grandchild: - aws_instance.c: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - ami = grandchild - type = aws_instance -` - -const testTerraformApplyTaintStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance -` - -const testTerraformApplyTaintDepStr = ` -aws_instance.bar: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - num = 2 - type = aws_instance - - Dependencies: - aws_instance.foo -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance -` - -const testTerraformApplyTaintDepRequireNewStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo - require_new = yes - type = aws_instance - - Dependencies: - aws_instance.foo -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance -` - -const testTerraformApplyOutputStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - -Outputs: - -foo_num = 2 -` - -const testTerraformApplyOutputAddStr = ` -aws_instance.test.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo0 - type = aws_instance -aws_instance.test.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = foo1 - type = aws_instance - -Outputs: - -firstOutput = foo0 -secondOutput = foo1 -` - -const testTerraformApplyOutputListStr = ` -aws_instance.bar.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.bar.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.bar.2: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - -Outputs: - -foo_num = [bar,bar,bar] -` - -const testTerraformApplyOutputMultiStr = ` -aws_instance.bar.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.bar.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.bar.2: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - -Outputs: - -foo_num = bar,bar,bar -` - -const testTerraformApplyOutputMultiIndexStr = ` -aws_instance.bar.0: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.bar.1: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.bar.2: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = bar - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance - -Outputs: - -foo_num = bar -` - -const testTerraformApplyUnknownAttrStr = ` -aws_instance.foo: (tainted) - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - num = 2 - type = aws_instance -` - -const testTerraformApplyVarsStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - bar = override - baz = override - foo = us-east-1 -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - bar = baz - list.# = 2 - list.0 = Hello - list.1 = World - map.Baz = Foo - map.Foo = Bar - map.Hello = World - num = 2 -` - -const testTerraformApplyVarsEnvStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - list.# = 2 - list.0 = Hello - list.1 = World - map.Baz = Foo - map.Foo = Bar - map.Hello = World - string = baz - type = aws_instance -` - -const testTerraformRefreshDataRefDataStr = ` -data.null_data_source.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/null"] - bar = yes -data.null_data_source.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/null"] - foo = yes -` diff --git a/internal/terraform/testdata/apply-blank/main.tf b/internal/terraform/testdata/apply-blank/main.tf deleted file mode 100644 index 0081db1861a6..000000000000 --- a/internal/terraform/testdata/apply-blank/main.tf +++ /dev/null @@ -1 +0,0 @@ -// Nothing! diff --git a/internal/terraform/testdata/apply-cancel-block/main.tf b/internal/terraform/testdata/apply-cancel-block/main.tf deleted file mode 100644 index 98f5ee87e9f0..000000000000 --- a/internal/terraform/testdata/apply-cancel-block/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/apply-cancel-provisioner/main.tf b/internal/terraform/testdata/apply-cancel-provisioner/main.tf deleted file mode 100644 index dadabd882c01..000000000000 --- a/internal/terraform/testdata/apply-cancel-provisioner/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" - - provisioner "shell" { - foo = "bar" - } -} diff --git a/internal/terraform/testdata/apply-cancel/main.tf b/internal/terraform/testdata/apply-cancel/main.tf deleted file mode 100644 index 7c4af5f71a48..000000000000 --- a/internal/terraform/testdata/apply-cancel/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - value = "2" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.value}" -} diff --git a/internal/terraform/testdata/apply-cbd-count/main.tf b/internal/terraform/testdata/apply-cbd-count/main.tf deleted file mode 100644 index 058d3382c533..000000000000 --- a/internal/terraform/testdata/apply-cbd-count/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "bar" { - count = 2 - foo = "bar" - - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-cbd-cycle/main.tf b/internal/terraform/testdata/apply-cbd-cycle/main.tf deleted file mode 100644 index 5ac53107ebee..000000000000 --- a/internal/terraform/testdata/apply-cbd-cycle/main.tf +++ /dev/null @@ -1,19 +0,0 @@ -resource "test_instance" "a" { - foo = test_instance.b.id - require_new = "changed" - - lifecycle { - create_before_destroy = true - } -} - -resource "test_instance" "b" { - foo = test_instance.c.id - require_new = "changed" -} - - -resource "test_instance" "c" { - require_new = "changed" -} - diff --git a/internal/terraform/testdata/apply-cbd-depends-non-cbd/main.tf b/internal/terraform/testdata/apply-cbd-depends-non-cbd/main.tf deleted file mode 100644 index 6ba1b983fb85..000000000000 --- a/internal/terraform/testdata/apply-cbd-depends-non-cbd/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "foo" { - require_new = "yes" -} - -resource "aws_instance" "bar" { - require_new = "yes" - value = "${aws_instance.foo.id}" - - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-cbd-deposed-only/main.tf b/internal/terraform/testdata/apply-cbd-deposed-only/main.tf deleted file mode 100644 index 0d2e2d3f92bf..000000000000 --- a/internal/terraform/testdata/apply-cbd-deposed-only/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "bar" { - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-compute/main.tf b/internal/terraform/testdata/apply-compute/main.tf deleted file mode 100644 index e785294ab44e..000000000000 --- a/internal/terraform/testdata/apply-compute/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "value" { - default = "" -} - -resource "aws_instance" "foo" { - num = "2" - compute = "value" - compute_value = "${var.value}" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.value}" -} diff --git a/internal/terraform/testdata/apply-count-dec-one/main.tf b/internal/terraform/testdata/apply-count-dec-one/main.tf deleted file mode 100644 index 3b0fd9428595..000000000000 --- a/internal/terraform/testdata/apply-count-dec-one/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - foo = "foo" -} diff --git a/internal/terraform/testdata/apply-count-dec/main.tf b/internal/terraform/testdata/apply-count-dec/main.tf deleted file mode 100644 index f18748c3b5cc..000000000000 --- a/internal/terraform/testdata/apply-count-dec/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - foo = "foo" - count = 2 -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-count-tainted/main.tf b/internal/terraform/testdata/apply-count-tainted/main.tf deleted file mode 100644 index ba35b034377a..000000000000 --- a/internal/terraform/testdata/apply-count-tainted/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_instance" "foo" { - count = 2 - foo = "foo" -} diff --git a/internal/terraform/testdata/apply-count-variable-ref/main.tf b/internal/terraform/testdata/apply-count-variable-ref/main.tf deleted file mode 100644 index 8e9e4526612a..000000000000 --- a/internal/terraform/testdata/apply-count-variable-ref/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "foo" { - default = "2" -} - -resource "aws_instance" "foo" { - count = "${var.foo}" -} - -resource "aws_instance" "bar" { - foo = length(aws_instance.foo) -} diff --git a/internal/terraform/testdata/apply-count-variable/main.tf b/internal/terraform/testdata/apply-count-variable/main.tf deleted file mode 100644 index 6f322f2187f0..000000000000 --- a/internal/terraform/testdata/apply-count-variable/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "foo" { - default = "2" -} - -resource "aws_instance" "foo" { - foo = "foo" - count = "${var.foo}" -} diff --git a/internal/terraform/testdata/apply-data-basic/main.tf b/internal/terraform/testdata/apply-data-basic/main.tf deleted file mode 100644 index 0c3bd8817ec8..000000000000 --- a/internal/terraform/testdata/apply-data-basic/main.tf +++ /dev/null @@ -1 +0,0 @@ -data "null_data_source" "testing" {} diff --git a/internal/terraform/testdata/apply-data-sensitive/main.tf b/internal/terraform/testdata/apply-data-sensitive/main.tf deleted file mode 100644 index c248a7c3316a..000000000000 --- a/internal/terraform/testdata/apply-data-sensitive/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "foo" { - sensitive = true - default = "foo" -} - -data "null_data_source" "testing" { - foo = var.foo -} diff --git a/internal/terraform/testdata/apply-depends-create-before/main.tf b/internal/terraform/testdata/apply-depends-create-before/main.tf deleted file mode 100644 index 63478d893d9c..000000000000 --- a/internal/terraform/testdata/apply-depends-create-before/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "aws_instance" "web" { - require_new = "ami-new" - lifecycle { - create_before_destroy = true - } -} - -resource "aws_instance" "lb" { - instance = aws_instance.web.id -} diff --git a/internal/terraform/testdata/apply-destroy-cbd/main.tf b/internal/terraform/testdata/apply-destroy-cbd/main.tf deleted file mode 100644 index 3c7a46f7c170..000000000000 --- a/internal/terraform/testdata/apply-destroy-cbd/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { } -resource "aws_instance" "bar" { - depends_on = ["aws_instance.foo"] - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-destroy-computed/child/main.tf b/internal/terraform/testdata/apply-destroy-computed/child/main.tf deleted file mode 100644 index 5cd1f02b666c..000000000000 --- a/internal/terraform/testdata/apply-destroy-computed/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -resource "aws_instance" "bar" { - value = "${var.value}" -} diff --git a/internal/terraform/testdata/apply-destroy-computed/main.tf b/internal/terraform/testdata/apply-destroy-computed/main.tf deleted file mode 100644 index 768c9680d801..000000000000 --- a/internal/terraform/testdata/apply-destroy-computed/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "aws_instance" "foo" {} - -module "child" { - source = "./child" - value = "${aws_instance.foo.output}" -} diff --git a/internal/terraform/testdata/apply-destroy-cross-providers/child/main.tf b/internal/terraform/testdata/apply-destroy-cross-providers/child/main.tf deleted file mode 100644 index 048b26dec80a..000000000000 --- a/internal/terraform/testdata/apply-destroy-cross-providers/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -resource "aws_vpc" "bar" { - value = "${var.value}" -} diff --git a/internal/terraform/testdata/apply-destroy-cross-providers/main.tf b/internal/terraform/testdata/apply-destroy-cross-providers/main.tf deleted file mode 100644 index 1ff123a73b59..000000000000 --- a/internal/terraform/testdata/apply-destroy-cross-providers/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "shared" { -} - -module "child" { - source = "./child" - value = "${aws_instance.shared.id}" -} diff --git a/internal/terraform/testdata/apply-destroy-data-cycle/main.tf b/internal/terraform/testdata/apply-destroy-data-cycle/main.tf deleted file mode 100644 index 591af82004a4..000000000000 --- a/internal/terraform/testdata/apply-destroy-data-cycle/main.tf +++ /dev/null @@ -1,14 +0,0 @@ -locals { - l = data.null_data_source.d.id -} - -data "null_data_source" "d" { -} - -resource "null_resource" "a" { - count = local.l == "NONE" ? 1 : 0 -} - -provider "test" { - foo = data.null_data_source.d.id -} diff --git a/internal/terraform/testdata/apply-destroy-data-resource/main.tf b/internal/terraform/testdata/apply-destroy-data-resource/main.tf deleted file mode 100644 index 0d941a707746..000000000000 --- a/internal/terraform/testdata/apply-destroy-data-resource/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "null_data_source" "testing" { - foo = "yes" -} diff --git a/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/main.tf b/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/main.tf deleted file mode 100644 index 3694951f572f..000000000000 --- a/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "subchild" { - source = "./subchild" -} diff --git a/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/subchild/main.tf b/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/subchild/main.tf deleted file mode 100644 index d31b87e0c640..000000000000 --- a/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/subchild/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -/* -module "subsubchild" { - source = "./subsubchild" -} -*/ diff --git a/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/subchild/subsubchild/main.tf b/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/subchild/subsubchild/main.tf deleted file mode 100644 index 6ff716a4d4c1..000000000000 --- a/internal/terraform/testdata/apply-destroy-deeply-nested-module/child/subchild/subsubchild/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "bar" {} diff --git a/internal/terraform/testdata/apply-destroy-deeply-nested-module/main.tf b/internal/terraform/testdata/apply-destroy-deeply-nested-module/main.tf deleted file mode 100644 index 1f95749fa7ea..000000000000 --- a/internal/terraform/testdata/apply-destroy-deeply-nested-module/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-destroy-depends-on/main.tf b/internal/terraform/testdata/apply-destroy-depends-on/main.tf deleted file mode 100644 index 3c3ee656f5b9..000000000000 --- a/internal/terraform/testdata/apply-destroy-depends-on/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" { - depends_on = ["aws_instance.bar"] -} - -resource "aws_instance" "bar" {} diff --git a/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/child/child2/main.tf b/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/child/child2/main.tf deleted file mode 100644 index 6a4f91d5e903..000000000000 --- a/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/child/child2/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "mod_count_child2" { } - -resource "aws_instance" "foo" { - count = "${var.mod_count_child2}" -} diff --git a/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/child/main.tf b/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/child/main.tf deleted file mode 100644 index 28b526795806..000000000000 --- a/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/child/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "mod_count_child" { } - -module "child2" { - source = "./child2" - mod_count_child2 = "${var.mod_count_child}" -} - -resource "aws_instance" "foo" { } diff --git a/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/main.tf b/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/main.tf deleted file mode 100644 index 58600cdb94a0..000000000000 --- a/internal/terraform/testdata/apply-destroy-mod-var-and-count-nested/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "mod_count_root" { - type = string - default = "3" -} - -module "child" { - source = "./child" - mod_count_child = var.mod_count_root -} diff --git a/internal/terraform/testdata/apply-destroy-mod-var-and-count/child/main.tf b/internal/terraform/testdata/apply-destroy-mod-var-and-count/child/main.tf deleted file mode 100644 index 67dac02a2754..000000000000 --- a/internal/terraform/testdata/apply-destroy-mod-var-and-count/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "mod_count" { } - -resource "aws_instance" "foo" { - count = "${var.mod_count}" -} diff --git a/internal/terraform/testdata/apply-destroy-mod-var-and-count/main.tf b/internal/terraform/testdata/apply-destroy-mod-var-and-count/main.tf deleted file mode 100644 index 918b40d06711..000000000000 --- a/internal/terraform/testdata/apply-destroy-mod-var-and-count/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child" { - source = "./child" - mod_count = "3" -} diff --git a/internal/terraform/testdata/apply-destroy-mod-var-provider-config/child/child.tf b/internal/terraform/testdata/apply-destroy-mod-var-provider-config/child/child.tf deleted file mode 100644 index 6544cf6cb45f..000000000000 --- a/internal/terraform/testdata/apply-destroy-mod-var-provider-config/child/child.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "input" {} - -provider "aws" { - region = "us-east-${var.input}" -} - -resource "aws_instance" "foo" { } diff --git a/internal/terraform/testdata/apply-destroy-mod-var-provider-config/main.tf b/internal/terraform/testdata/apply-destroy-mod-var-provider-config/main.tf deleted file mode 100644 index 1e2dfb3521df..000000000000 --- a/internal/terraform/testdata/apply-destroy-mod-var-provider-config/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child" { - source = "./child" - input = "1" -} diff --git a/internal/terraform/testdata/apply-destroy-module-resource-prefix/child/main.tf b/internal/terraform/testdata/apply-destroy-module-resource-prefix/child/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/apply-destroy-module-resource-prefix/child/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/apply-destroy-module-resource-prefix/main.tf b/internal/terraform/testdata/apply-destroy-module-resource-prefix/main.tf deleted file mode 100644 index 0f6991c536ca..000000000000 --- a/internal/terraform/testdata/apply-destroy-module-resource-prefix/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-destroy-module-with-attrs/child/main.tf b/internal/terraform/testdata/apply-destroy-module-with-attrs/child/main.tf deleted file mode 100644 index 55fa601707ff..000000000000 --- a/internal/terraform/testdata/apply-destroy-module-with-attrs/child/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "vpc_id" {} - -resource "aws_instance" "child" { - vpc_id = var.vpc_id -} - -output "modout" { - value = aws_instance.child.id -} diff --git a/internal/terraform/testdata/apply-destroy-module-with-attrs/main.tf b/internal/terraform/testdata/apply-destroy-module-with-attrs/main.tf deleted file mode 100644 index 9b2d46db7414..000000000000 --- a/internal/terraform/testdata/apply-destroy-module-with-attrs/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "aws_instance" "vpc" { } - -module "child" { - source = "./child" - vpc_id = aws_instance.vpc.id -} - -output "out" { - value = module.child.modout -} diff --git a/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf b/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf deleted file mode 100644 index b5db44ee33e6..000000000000 --- a/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable bottom_param {} - -resource "null_resource" "bottom" { - value = "${var.bottom_param}" -} diff --git a/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/middle/middle.tf b/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/middle/middle.tf deleted file mode 100644 index 76652ee443df..000000000000 --- a/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/middle/middle.tf +++ /dev/null @@ -1,10 +0,0 @@ -variable param {} - -module "bottom" { - source = "./bottom" - bottom_param = "${var.param}" -} - -resource "null_resource" "middle" { - value = "${var.param}" -} diff --git a/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/top.tf b/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/top.tf deleted file mode 100644 index 1b631f4d5c08..000000000000 --- a/internal/terraform/testdata/apply-destroy-nested-module-with-attrs/top.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "middle" { - source = "./middle" - param = "foo" -} diff --git a/internal/terraform/testdata/apply-destroy-nested-module/child/main.tf b/internal/terraform/testdata/apply-destroy-nested-module/child/main.tf deleted file mode 100644 index 852bce8b9f39..000000000000 --- a/internal/terraform/testdata/apply-destroy-nested-module/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "subchild" { - source = "./subchild" -} diff --git a/internal/terraform/testdata/apply-destroy-nested-module/child/subchild/main.tf b/internal/terraform/testdata/apply-destroy-nested-module/child/subchild/main.tf deleted file mode 100644 index 6ff716a4d4c1..000000000000 --- a/internal/terraform/testdata/apply-destroy-nested-module/child/subchild/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "bar" {} diff --git a/internal/terraform/testdata/apply-destroy-nested-module/main.tf b/internal/terraform/testdata/apply-destroy-nested-module/main.tf deleted file mode 100644 index 8a5a1b2e5be7..000000000000 --- a/internal/terraform/testdata/apply-destroy-nested-module/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -/* -module "child" { - source = "./child" -} -*/ diff --git a/internal/terraform/testdata/apply-destroy-outputs/main.tf b/internal/terraform/testdata/apply-destroy-outputs/main.tf deleted file mode 100644 index 8a0384798eaf..000000000000 --- a/internal/terraform/testdata/apply-destroy-outputs/main.tf +++ /dev/null @@ -1,34 +0,0 @@ -data "test_data_source" "bar" { - for_each = { - a = "b" - } - foo = "zing" -} - -data "test_data_source" "foo" { - for_each = data.test_data_source.bar - foo = "ok" -} - -locals { - l = [ - { - name = data.test_data_source.foo["a"].id - val = "null" - }, - ] - - m = { for v in local.l : - v.name => v - } -} - -resource "test_instance" "bar" { - for_each = local.m - foo = format("%s", each.value.name) - dep = each.value.val -} - -output "out" { - value = test_instance.bar -} diff --git a/internal/terraform/testdata/apply-destroy-provisioner/main.tf b/internal/terraform/testdata/apply-destroy-provisioner/main.tf deleted file mode 100644 index 51b29c72a082..000000000000 --- a/internal/terraform/testdata/apply-destroy-provisioner/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - provisioner "shell" {} -} diff --git a/internal/terraform/testdata/apply-destroy-tainted/main.tf b/internal/terraform/testdata/apply-destroy-tainted/main.tf deleted file mode 100644 index 48f4f13783e0..000000000000 --- a/internal/terraform/testdata/apply-destroy-tainted/main.tf +++ /dev/null @@ -1,17 +0,0 @@ -resource "test_instance" "a" { - foo = "a" -} - -resource "test_instance" "b" { - foo = "b" - lifecycle { - create_before_destroy = true - } -} - -resource "test_instance" "c" { - foo = "c" - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-destroy-targeted-count/main.tf b/internal/terraform/testdata/apply-destroy-targeted-count/main.tf deleted file mode 100644 index 680d30ffaa36..000000000000 --- a/internal/terraform/testdata/apply-destroy-targeted-count/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 -} - -resource "aws_instance" "bar" { - foo = ["${aws_instance.foo.*.id}"] -} diff --git a/internal/terraform/testdata/apply-destroy-with-locals/main.tf b/internal/terraform/testdata/apply-destroy-with-locals/main.tf deleted file mode 100644 index 1ab75187155e..000000000000 --- a/internal/terraform/testdata/apply-destroy-with-locals/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -locals { - name = "test-${aws_instance.foo.id}" -} -resource "aws_instance" "foo" {} - -output "name" { - value = "${local.name}" -} diff --git a/internal/terraform/testdata/apply-destroy/main.tf b/internal/terraform/testdata/apply-destroy/main.tf deleted file mode 100644 index 1b6cdae67b0e..000000000000 --- a/internal/terraform/testdata/apply-destroy/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.num}" -} diff --git a/internal/terraform/testdata/apply-empty-module/child/main.tf b/internal/terraform/testdata/apply-empty-module/child/main.tf deleted file mode 100644 index 6db38ea162c5..000000000000 --- a/internal/terraform/testdata/apply-empty-module/child/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -output "aws_route53_zone_id" { - value = "XXXX" -} - -output "aws_access_key" { - value = "YYYYY" -} - -output "aws_secret_key" { - value = "ZZZZ" -} diff --git a/internal/terraform/testdata/apply-empty-module/main.tf b/internal/terraform/testdata/apply-empty-module/main.tf deleted file mode 100644 index 50ce84f0bc3f..000000000000 --- a/internal/terraform/testdata/apply-empty-module/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -output "end" { - value = "${module.child.aws_route53_zone_id}" -} diff --git a/internal/terraform/testdata/apply-error-create-before/main.tf b/internal/terraform/testdata/apply-error-create-before/main.tf deleted file mode 100644 index c7c2776eb773..000000000000 --- a/internal/terraform/testdata/apply-error-create-before/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "aws_instance" "bar" { - require_new = "xyz" - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-error/main.tf b/internal/terraform/testdata/apply-error/main.tf deleted file mode 100644 index 7c4af5f71a48..000000000000 --- a/internal/terraform/testdata/apply-error/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - value = "2" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.value}" -} diff --git a/internal/terraform/testdata/apply-escape/main.tf b/internal/terraform/testdata/apply-escape/main.tf deleted file mode 100644 index bca2c9b7e27c..000000000000 --- a/internal/terraform/testdata/apply-escape/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "bar" { - foo = "${"\"bar\""}" -} diff --git a/internal/terraform/testdata/apply-good-create-before-update/main.tf b/internal/terraform/testdata/apply-good-create-before-update/main.tf deleted file mode 100644 index d0a2fc937668..000000000000 --- a/internal/terraform/testdata/apply-good-create-before-update/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "bar" { - foo = "baz" - - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-good-create-before/main.tf b/internal/terraform/testdata/apply-good-create-before/main.tf deleted file mode 100644 index c7c2776eb773..000000000000 --- a/internal/terraform/testdata/apply-good-create-before/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "aws_instance" "bar" { - require_new = "xyz" - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-good/main.tf b/internal/terraform/testdata/apply-good/main.tf deleted file mode 100644 index 5c22c19d109e..000000000000 --- a/internal/terraform/testdata/apply-good/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = 2 -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-idattr/main.tf b/internal/terraform/testdata/apply-idattr/main.tf deleted file mode 100644 index 1c49f3975554..000000000000 --- a/internal/terraform/testdata/apply-idattr/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = 42 -} diff --git a/internal/terraform/testdata/apply-ignore-changes-all/main.tf b/internal/terraform/testdata/apply-ignore-changes-all/main.tf deleted file mode 100644 index a89889a09be3..000000000000 --- a/internal/terraform/testdata/apply-ignore-changes-all/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - required_field = "set" - - lifecycle { - ignore_changes = all - } -} diff --git a/internal/terraform/testdata/apply-ignore-changes-create/main.tf b/internal/terraform/testdata/apply-ignore-changes-create/main.tf deleted file mode 100644 index d470660ec1cc..000000000000 --- a/internal/terraform/testdata/apply-ignore-changes-create/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - required_field = "set" - - lifecycle { - ignore_changes = ["required_field"] - } -} diff --git a/internal/terraform/testdata/apply-ignore-changes-dep/main.tf b/internal/terraform/testdata/apply-ignore-changes-dep/main.tf deleted file mode 100644 index 097d48942839..000000000000 --- a/internal/terraform/testdata/apply-ignore-changes-dep/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "foo" { - count = 2 - ami = "ami-bcd456" - lifecycle { - ignore_changes = ["ami"] - } -} - -resource "aws_eip" "foo" { - count = 2 - instance = "${aws_instance.foo.*.id[count.index]}" -} diff --git a/internal/terraform/testdata/apply-inconsistent-with-plan/main.tf b/internal/terraform/testdata/apply-inconsistent-with-plan/main.tf deleted file mode 100644 index 9284072dc9c1..000000000000 --- a/internal/terraform/testdata/apply-inconsistent-with-plan/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "test" "foo" { -} diff --git a/internal/terraform/testdata/apply-interpolated-count/main.tf b/internal/terraform/testdata/apply-interpolated-count/main.tf deleted file mode 100644 index 527a0b84205c..000000000000 --- a/internal/terraform/testdata/apply-interpolated-count/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "instance_count" { - default = 1 -} - -resource "aws_instance" "test" { - count = "${var.instance_count}" -} - -resource "aws_instance" "dependent" { - count = "${length(aws_instance.test)}" -} diff --git a/internal/terraform/testdata/apply-invalid-index/main.tf b/internal/terraform/testdata/apply-invalid-index/main.tf deleted file mode 100644 index 8ea02d77384e..000000000000 --- a/internal/terraform/testdata/apply-invalid-index/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_instance" "a" { - count = 0 -} - -resource "test_instance" "b" { - value = test_instance.a[0].value -} diff --git a/internal/terraform/testdata/apply-issue19908/issue19908.tf b/internal/terraform/testdata/apply-issue19908/issue19908.tf deleted file mode 100644 index 0c802fb653fa..000000000000 --- a/internal/terraform/testdata/apply-issue19908/issue19908.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "test" "foo" { - baz = "updated" -} diff --git a/internal/terraform/testdata/apply-local-val/child/child.tf b/internal/terraform/testdata/apply-local-val/child/child.tf deleted file mode 100644 index f7febc42f656..000000000000 --- a/internal/terraform/testdata/apply-local-val/child/child.tf +++ /dev/null @@ -1,4 +0,0 @@ - -output "result" { - value = "hello" -} diff --git a/internal/terraform/testdata/apply-local-val/main.tf b/internal/terraform/testdata/apply-local-val/main.tf deleted file mode 100644 index 51ca2dedcf3a..000000000000 --- a/internal/terraform/testdata/apply-local-val/main.tf +++ /dev/null @@ -1,10 +0,0 @@ - -module "child" { - source = "./child" -} - -locals { - result_1 = "${module.child.result}" - result_2 = "${local.result_1}" - result_3 = "${local.result_2} world" -} diff --git a/internal/terraform/testdata/apply-local-val/outputs.tf b/internal/terraform/testdata/apply-local-val/outputs.tf deleted file mode 100644 index f0078c190b39..000000000000 --- a/internal/terraform/testdata/apply-local-val/outputs.tf +++ /dev/null @@ -1,9 +0,0 @@ -# These are in a separate file to make sure config merging is working properly - -output "result_1" { - value = "${local.result_1}" -} - -output "result_3" { - value = "${local.result_3}" -} diff --git a/internal/terraform/testdata/apply-map-var-through-module/amodule/main.tf b/internal/terraform/testdata/apply-map-var-through-module/amodule/main.tf deleted file mode 100644 index a5284966ed08..000000000000 --- a/internal/terraform/testdata/apply-map-var-through-module/amodule/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "amis" { - type = map(string) -} - -resource "null_resource" "noop" {} - -output "amis_out" { - value = var.amis -} diff --git a/internal/terraform/testdata/apply-map-var-through-module/main.tf b/internal/terraform/testdata/apply-map-var-through-module/main.tf deleted file mode 100644 index 4cec4a678b0d..000000000000 --- a/internal/terraform/testdata/apply-map-var-through-module/main.tf +++ /dev/null @@ -1,19 +0,0 @@ -variable "amis_in" { - type = map(string) - default = { - "us-west-1" = "ami-123456" - "us-west-2" = "ami-456789" - "eu-west-1" = "ami-789012" - "eu-west-2" = "ami-989484" - } -} - -module "test" { - source = "./amodule" - - amis = var.amis_in -} - -output "amis_from_module" { - value = module.test.amis_out -} diff --git a/internal/terraform/testdata/apply-minimal/main.tf b/internal/terraform/testdata/apply-minimal/main.tf deleted file mode 100644 index 88002d078a1b..000000000000 --- a/internal/terraform/testdata/apply-minimal/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" { -} - -resource "aws_instance" "bar" { -} diff --git a/internal/terraform/testdata/apply-module-bool/child/main.tf b/internal/terraform/testdata/apply-module-bool/child/main.tf deleted file mode 100644 index d2a38434c296..000000000000 --- a/internal/terraform/testdata/apply-module-bool/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "leader" { - default = false -} - -output "leader" { - value = "${var.leader}" -} diff --git a/internal/terraform/testdata/apply-module-bool/main.tf b/internal/terraform/testdata/apply-module-bool/main.tf deleted file mode 100644 index 1d40cd4f4ae1..000000000000 --- a/internal/terraform/testdata/apply-module-bool/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "child" { - source = "./child" - leader = true -} - -resource "aws_instance" "bar" { - foo = "${module.child.leader}" -} diff --git a/internal/terraform/testdata/apply-module-depends-on/main.tf b/internal/terraform/testdata/apply-module-depends-on/main.tf deleted file mode 100644 index 9f7102d531cf..000000000000 --- a/internal/terraform/testdata/apply-module-depends-on/main.tf +++ /dev/null @@ -1,32 +0,0 @@ -module "moda" { - source = "./moda" - depends_on = [test_instance.a, module.modb] -} - -resource "test_instance" "a" { - depends_on = [module.modb] - num = 4 - foo = test_instance.aa.id -} - -resource "test_instance" "aa" { - num = 3 - foo = module.modb.out -} - -module "modb" { - source = "./modb" - depends_on = [test_instance.b] -} - -resource "test_instance" "b" { - num = 1 -} - -output "moda_data" { - value = module.moda.out -} - -output "modb_resource" { - value = module.modb.out -} diff --git a/internal/terraform/testdata/apply-module-depends-on/moda/main.tf b/internal/terraform/testdata/apply-module-depends-on/moda/main.tf deleted file mode 100644 index e60d300bae2c..000000000000 --- a/internal/terraform/testdata/apply-module-depends-on/moda/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -resource "test_instance" "a" { - num = 5 -} - -data "test_data_source" "a" { - foo = "a" -} - -output "out" { - value = data.test_data_source.a.id -} diff --git a/internal/terraform/testdata/apply-module-depends-on/modb/main.tf b/internal/terraform/testdata/apply-module-depends-on/modb/main.tf deleted file mode 100644 index 961c5d560bd7..000000000000 --- a/internal/terraform/testdata/apply-module-depends-on/modb/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -resource "test_instance" "b" { - num = 2 -} - -data "test_data_source" "b" { - foo = "b" -} - -output "out" { - value = test_instance.b.id -} diff --git a/internal/terraform/testdata/apply-module-destroy-order/child/main.tf b/internal/terraform/testdata/apply-module-destroy-order/child/main.tf deleted file mode 100644 index 0b2a8bc07dd1..000000000000 --- a/internal/terraform/testdata/apply-module-destroy-order/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "a" { - id = "a" -} - -output "a_output" { - value = "${aws_instance.a.id}" -} diff --git a/internal/terraform/testdata/apply-module-destroy-order/main.tf b/internal/terraform/testdata/apply-module-destroy-order/main.tf deleted file mode 100644 index 2c47edadff9a..000000000000 --- a/internal/terraform/testdata/apply-module-destroy-order/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "b" { - id = "b" - blah = "${module.child.a_output}" -} diff --git a/internal/terraform/testdata/apply-module-grandchild-provider-inherit/child/grandchild/main.tf b/internal/terraform/testdata/apply-module-grandchild-provider-inherit/child/grandchild/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/apply-module-grandchild-provider-inherit/child/grandchild/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/apply-module-grandchild-provider-inherit/child/main.tf b/internal/terraform/testdata/apply-module-grandchild-provider-inherit/child/main.tf deleted file mode 100644 index b422300ec984..000000000000 --- a/internal/terraform/testdata/apply-module-grandchild-provider-inherit/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "grandchild" { - source = "./grandchild" -} diff --git a/internal/terraform/testdata/apply-module-grandchild-provider-inherit/main.tf b/internal/terraform/testdata/apply-module-grandchild-provider-inherit/main.tf deleted file mode 100644 index 25d0993d1e40..000000000000 --- a/internal/terraform/testdata/apply-module-grandchild-provider-inherit/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - value = "foo" -} - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-module-only-provider/child/main.tf b/internal/terraform/testdata/apply-module-only-provider/child/main.tf deleted file mode 100644 index e15099c171b3..000000000000 --- a/internal/terraform/testdata/apply-module-only-provider/child/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "aws_instance" "foo" {} -resource "test_instance" "foo" {} diff --git a/internal/terraform/testdata/apply-module-only-provider/main.tf b/internal/terraform/testdata/apply-module-only-provider/main.tf deleted file mode 100644 index 2276b5f36ca2..000000000000 --- a/internal/terraform/testdata/apply-module-only-provider/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" {} - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-module-orphan-provider-inherit/main.tf b/internal/terraform/testdata/apply-module-orphan-provider-inherit/main.tf deleted file mode 100644 index e334ff2c77b8..000000000000 --- a/internal/terraform/testdata/apply-module-orphan-provider-inherit/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -provider "aws" { - value = "foo" -} diff --git a/internal/terraform/testdata/apply-module-provider-alias/child/main.tf b/internal/terraform/testdata/apply-module-provider-alias/child/main.tf deleted file mode 100644 index ee923f255ae8..000000000000 --- a/internal/terraform/testdata/apply-module-provider-alias/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - alias = "eu" -} - -resource "aws_instance" "foo" { - provider = "aws.eu" -} diff --git a/internal/terraform/testdata/apply-module-provider-alias/main.tf b/internal/terraform/testdata/apply-module-provider-alias/main.tf deleted file mode 100644 index 0f6991c536ca..000000000000 --- a/internal/terraform/testdata/apply-module-provider-alias/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-module-provider-close-nested/child/main.tf b/internal/terraform/testdata/apply-module-provider-close-nested/child/main.tf deleted file mode 100644 index 852bce8b9f39..000000000000 --- a/internal/terraform/testdata/apply-module-provider-close-nested/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "subchild" { - source = "./subchild" -} diff --git a/internal/terraform/testdata/apply-module-provider-close-nested/child/subchild/main.tf b/internal/terraform/testdata/apply-module-provider-close-nested/child/subchild/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/apply-module-provider-close-nested/child/subchild/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/apply-module-provider-close-nested/main.tf b/internal/terraform/testdata/apply-module-provider-close-nested/main.tf deleted file mode 100644 index 0f6991c536ca..000000000000 --- a/internal/terraform/testdata/apply-module-provider-close-nested/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-module-provider-inherit-alias-orphan/main.tf b/internal/terraform/testdata/apply-module-provider-inherit-alias-orphan/main.tf deleted file mode 100644 index 4332b9adb723..000000000000 --- a/internal/terraform/testdata/apply-module-provider-inherit-alias-orphan/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -provider "aws" { -} - -provider "aws" { - alias = "eu" -} diff --git a/internal/terraform/testdata/apply-module-provider-inherit-alias/child/main.tf b/internal/terraform/testdata/apply-module-provider-inherit-alias/child/main.tf deleted file mode 100644 index 2db7c4ee88b6..000000000000 --- a/internal/terraform/testdata/apply-module-provider-inherit-alias/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - alias = "eu" -} - -resource "aws_instance" "foo" { - provider = "aws.eu" -} diff --git a/internal/terraform/testdata/apply-module-provider-inherit-alias/main.tf b/internal/terraform/testdata/apply-module-provider-inherit-alias/main.tf deleted file mode 100644 index a018d1468f13..000000000000 --- a/internal/terraform/testdata/apply-module-provider-inherit-alias/main.tf +++ /dev/null @@ -1,15 +0,0 @@ -provider "aws" { - root = 1 -} - -provider "aws" { - value = "eu" - alias = "eu" -} - -module "child" { - source = "./child" - providers = { - "aws.eu" = "aws.eu" - } -} diff --git a/internal/terraform/testdata/apply-module-replace-cycle-cbd/main.tf b/internal/terraform/testdata/apply-module-replace-cycle-cbd/main.tf deleted file mode 100644 index 6393231d6858..000000000000 --- a/internal/terraform/testdata/apply-module-replace-cycle-cbd/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "a" { - source = "./mod1" -} - -module "b" { - source = "./mod2" - ids = module.a.ids -} diff --git a/internal/terraform/testdata/apply-module-replace-cycle-cbd/mod1/main.tf b/internal/terraform/testdata/apply-module-replace-cycle-cbd/mod1/main.tf deleted file mode 100644 index 2ade442bfd3f..000000000000 --- a/internal/terraform/testdata/apply-module-replace-cycle-cbd/mod1/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "aws_instance" "a" { - require_new = "new" - lifecycle { - create_before_destroy = true - } -} - -output "ids" { - value = [aws_instance.a.id] -} diff --git a/internal/terraform/testdata/apply-module-replace-cycle-cbd/mod2/main.tf b/internal/terraform/testdata/apply-module-replace-cycle-cbd/mod2/main.tf deleted file mode 100644 index 83fb1dcd467b..000000000000 --- a/internal/terraform/testdata/apply-module-replace-cycle-cbd/mod2/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "b" { - count = length(var.ids) - require_new = var.ids[count.index] -} - -variable "ids" { - type = list(string) -} diff --git a/internal/terraform/testdata/apply-module-replace-cycle/main.tf b/internal/terraform/testdata/apply-module-replace-cycle/main.tf deleted file mode 100644 index 6393231d6858..000000000000 --- a/internal/terraform/testdata/apply-module-replace-cycle/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "a" { - source = "./mod1" -} - -module "b" { - source = "./mod2" - ids = module.a.ids -} diff --git a/internal/terraform/testdata/apply-module-replace-cycle/mod1/main.tf b/internal/terraform/testdata/apply-module-replace-cycle/mod1/main.tf deleted file mode 100644 index 3dd26cb8e7e8..000000000000 --- a/internal/terraform/testdata/apply-module-replace-cycle/mod1/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "a" { - require_new = "new" -} - -output "ids" { - value = [aws_instance.a.id] -} diff --git a/internal/terraform/testdata/apply-module-replace-cycle/mod2/main.tf b/internal/terraform/testdata/apply-module-replace-cycle/mod2/main.tf deleted file mode 100644 index 83fb1dcd467b..000000000000 --- a/internal/terraform/testdata/apply-module-replace-cycle/mod2/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "b" { - count = length(var.ids) - require_new = var.ids[count.index] -} - -variable "ids" { - type = list(string) -} diff --git a/internal/terraform/testdata/apply-module-var-resource-count/child/main.tf b/internal/terraform/testdata/apply-module-var-resource-count/child/main.tf deleted file mode 100644 index 1a19910e8f34..000000000000 --- a/internal/terraform/testdata/apply-module-var-resource-count/child/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -variable "num" { -} - -resource "aws_instance" "foo" { - count = "${var.num}" -} diff --git a/internal/terraform/testdata/apply-module-var-resource-count/main.tf b/internal/terraform/testdata/apply-module-var-resource-count/main.tf deleted file mode 100644 index 6f7d20c48bf7..000000000000 --- a/internal/terraform/testdata/apply-module-var-resource-count/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "num" { -} - -module "child" { - source = "./child" - num = "${var.num}" -} diff --git a/internal/terraform/testdata/apply-module/child/main.tf b/internal/terraform/testdata/apply-module/child/main.tf deleted file mode 100644 index f279d9b80bff..000000000000 --- a/internal/terraform/testdata/apply-module/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "baz" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-module/main.tf b/internal/terraform/testdata/apply-module/main.tf deleted file mode 100644 index f9119a109eb4..000000000000 --- a/internal/terraform/testdata/apply-module/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-multi-depose-create-before-destroy/main.tf b/internal/terraform/testdata/apply-multi-depose-create-before-destroy/main.tf deleted file mode 100644 index e5a723b3a495..000000000000 --- a/internal/terraform/testdata/apply-multi-depose-create-before-destroy/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "require_new" { - type = string -} - -resource "aws_instance" "web" { - // require_new is a special attribute recognized by testDiffFn that forces - // a new resource on every apply - require_new = var.require_new - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-multi-provider-destroy-child/child/main.tf b/internal/terraform/testdata/apply-multi-provider-destroy-child/child/main.tf deleted file mode 100644 index ae1bc8ee4c25..000000000000 --- a/internal/terraform/testdata/apply-multi-provider-destroy-child/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-multi-provider-destroy-child/main.tf b/internal/terraform/testdata/apply-multi-provider-destroy-child/main.tf deleted file mode 100644 index 9b799979b139..000000000000 --- a/internal/terraform/testdata/apply-multi-provider-destroy-child/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "vault_instance" "foo" {} - -provider "aws" { - value = "${vault_instance.foo.id}" -} - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-multi-provider-destroy/main.tf b/internal/terraform/testdata/apply-multi-provider-destroy/main.tf deleted file mode 100644 index dd3041bb5d4b..000000000000 --- a/internal/terraform/testdata/apply-multi-provider-destroy/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "vault_instance" "foo" {} - -provider "aws" { - addr = "${vault_instance.foo.id}" -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-multi-provider/main.tf b/internal/terraform/testdata/apply-multi-provider/main.tf deleted file mode 100644 index 4ee94a3bfe6d..000000000000 --- a/internal/terraform/testdata/apply-multi-provider/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "do_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-multi-ref/main.tf b/internal/terraform/testdata/apply-multi-ref/main.tf deleted file mode 100644 index 2a6a67152179..000000000000 --- a/internal/terraform/testdata/apply-multi-ref/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "create" { - bar = "abc" -} - -resource "aws_instance" "other" { - var = "${aws_instance.create.id}" - foo = "${aws_instance.create.bar}" -} diff --git a/internal/terraform/testdata/apply-multi-var-comprehensive/child/child.tf b/internal/terraform/testdata/apply-multi-var-comprehensive/child/child.tf deleted file mode 100644 index 8fe7df7c232e..000000000000 --- a/internal/terraform/testdata/apply-multi-var-comprehensive/child/child.tf +++ /dev/null @@ -1,29 +0,0 @@ -variable "num" { -} - -variable "source_ids" { - type = list(string) -} - -variable "source_names" { - type = list(string) -} - -resource "test_thing" "multi_count_var" { - count = var.num - - key = "child.multi_count_var.${count.index}" - - # Can pluck a single item out of a multi-var - source_id = var.source_ids[count.index] -} - -resource "test_thing" "whole_splat" { - key = "child.whole_splat" - - # Can "splat" the ids directly into an attribute of type list. - source_ids = var.source_ids - source_names = var.source_names - source_ids_wrapped = ["${var.source_ids}"] - source_names_wrapped = ["${var.source_names}"] -} diff --git a/internal/terraform/testdata/apply-multi-var-comprehensive/root.tf b/internal/terraform/testdata/apply-multi-var-comprehensive/root.tf deleted file mode 100644 index 64ada6be6f22..000000000000 --- a/internal/terraform/testdata/apply-multi-var-comprehensive/root.tf +++ /dev/null @@ -1,74 +0,0 @@ -variable "num" { -} - -resource "test_thing" "source" { - count = var.num - - key = "source.${count.index}" - - # The diffFunc in the test exports "name" here too, which we can use - # to test values that are known during plan. -} - -resource "test_thing" "multi_count_var" { - count = var.num - - key = "multi_count_var.${count.index}" - - # Can pluck a single item out of a multi-var - source_id = test_thing.source.*.id[count.index] - source_name = test_thing.source.*.name[count.index] -} - -resource "test_thing" "multi_count_derived" { - # Can use the source to get the count - count = length(test_thing.source) - - key = "multi_count_derived.${count.index}" - - source_id = test_thing.source.*.id[count.index] - source_name = test_thing.source.*.name[count.index] -} - -resource "test_thing" "whole_splat" { - key = "whole_splat" - - # Can "splat" the ids directly into an attribute of type list. - source_ids = test_thing.source.*.id - source_names = test_thing.source.*.name - - # Accessing through a function should work. - source_ids_from_func = split(" ", join(" ", test_thing.source.*.id)) - source_names_from_func = split(" ", join(" ", test_thing.source.*.name)) - - # A common pattern of selecting with a default. - first_source_id = element(concat(test_thing.source.*.id, ["default"]), 0) - first_source_name = element(concat(test_thing.source.*.name, ["default"]), 0) - - # Prior to v0.12 we were handling lists containing list interpolations as - # a special case, flattening the result, for compatibility with behavior - # prior to v0.10. This deprecated handling is now removed, and so these - # each produce a list of lists. We're still using the interpolation syntax - # here, rather than the splat expression directly, to properly mimic how - # this would've looked prior to v0.12 to be explicit about what the new - # behavior is for this old syntax. - source_ids_wrapped = ["${test_thing.source.*.id}"] - source_names_wrapped = ["${test_thing.source.*.name}"] - -} - -module "child" { - source = "./child" - - num = var.num - source_ids = test_thing.source.*.id - source_names = test_thing.source.*.name -} - -output "source_ids" { - value = test_thing.source.*.id -} - -output "source_names" { - value = test_thing.source.*.name -} diff --git a/internal/terraform/testdata/apply-multi-var-count-dec/main.tf b/internal/terraform/testdata/apply-multi-var-count-dec/main.tf deleted file mode 100644 index 40476512fa09..000000000000 --- a/internal/terraform/testdata/apply-multi-var-count-dec/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "num" {} - -resource "aws_instance" "foo" { - count = "${var.num}" - value = "foo" -} - -resource "aws_instance" "bar" { - ami = "special" - - value = "${join(",", aws_instance.foo.*.id)}" -} diff --git a/internal/terraform/testdata/apply-multi-var-missing-state/child/child.tf b/internal/terraform/testdata/apply-multi-var-missing-state/child/child.tf deleted file mode 100644 index b5df05d0e247..000000000000 --- a/internal/terraform/testdata/apply-multi-var-missing-state/child/child.tf +++ /dev/null @@ -1,15 +0,0 @@ - -# This resource gets visited first on the apply walk, but since it DynamicExpands -# to an empty subgraph it ends up being a no-op, leaving the module state -# uninitialized. -resource "test_thing" "a" { - count = 0 -} - -# This resource is visited second. During its eval walk we try to build the -# array for the null_resource.a.*.id interpolation, which involves iterating -# over all of the resource in the state. This should succeed even though the -# module state will be nil when evaluating the variable. -resource "test_thing" "b" { - a_ids = "${join(" ", test_thing.a.*.id)}" -} diff --git a/internal/terraform/testdata/apply-multi-var-missing-state/root.tf b/internal/terraform/testdata/apply-multi-var-missing-state/root.tf deleted file mode 100644 index 25a0a1f9b49e..000000000000 --- a/internal/terraform/testdata/apply-multi-var-missing-state/root.tf +++ /dev/null @@ -1,7 +0,0 @@ -// We test this in a child module, since the root module state exists -// very early on, even before any resources are created in it, but that is not -// true for child modules. - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-multi-var-order-interp/main.tf b/internal/terraform/testdata/apply-multi-var-order-interp/main.tf deleted file mode 100644 index 6cc2e29d9add..000000000000 --- a/internal/terraform/testdata/apply-multi-var-order-interp/main.tf +++ /dev/null @@ -1,17 +0,0 @@ -variable "num" { - default = 15 -} - -resource "aws_instance" "bar" { - count = "${var.num}" - foo = "index-${count.index}" -} - -resource "aws_instance" "baz" { - count = "${var.num}" - foo = "baz-${element(aws_instance.bar.*.foo, count.index)}" -} - -output "should-be-11" { - value = "${element(aws_instance.baz.*.foo, 11)}" -} diff --git a/internal/terraform/testdata/apply-multi-var-order/main.tf b/internal/terraform/testdata/apply-multi-var-order/main.tf deleted file mode 100644 index 7ffefb6f349b..000000000000 --- a/internal/terraform/testdata/apply-multi-var-order/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "num" { - default = 15 -} - -resource "aws_instance" "bar" { - count = "${var.num}" - foo = "index-${count.index}" -} - -output "should-be-11" { - value = "${element(aws_instance.bar.*.foo, 11)}" -} diff --git a/internal/terraform/testdata/apply-multi-var/main.tf b/internal/terraform/testdata/apply-multi-var/main.tf deleted file mode 100644 index c7ed45c6a816..000000000000 --- a/internal/terraform/testdata/apply-multi-var/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -variable "num" {} - -resource "aws_instance" "bar" { - count = "${var.num}" - foo = "bar${count.index}" -} - -output "output" { - value = "${join(",", aws_instance.bar.*.foo)}" -} diff --git a/internal/terraform/testdata/apply-nullable-variables/main.tf b/internal/terraform/testdata/apply-nullable-variables/main.tf deleted file mode 100644 index ed4b6c7f26f2..000000000000 --- a/internal/terraform/testdata/apply-nullable-variables/main.tf +++ /dev/null @@ -1,28 +0,0 @@ -module "mod" { - source = "./mod" - nullable_null_default = null - nullable_non_null_default = null - nullable_no_default = null - non_nullable_default = null - non_nullable_no_default = "ok" -} - -output "nullable_null_default" { - value = module.mod.nullable_null_default -} - -output "nullable_non_null_default" { - value = module.mod.nullable_non_null_default -} - -output "nullable_no_default" { - value = module.mod.nullable_no_default -} - -output "non_nullable_default" { - value = module.mod.non_nullable_default -} - -output "non_nullable_no_default" { - value = module.mod.non_nullable_no_default -} diff --git a/internal/terraform/testdata/apply-nullable-variables/mod/main.tf b/internal/terraform/testdata/apply-nullable-variables/mod/main.tf deleted file mode 100644 index fcac3ba37260..000000000000 --- a/internal/terraform/testdata/apply-nullable-variables/mod/main.tf +++ /dev/null @@ -1,59 +0,0 @@ -// optional, and this can take null as an input -variable "nullable_null_default" { - // This is implied now as the default, and probably should be implied even - // when nullable=false is the default, so we're leaving this unset for the test. - // nullable = true - - default = null -} - -// assigning null can still override the default. -variable "nullable_non_null_default" { - nullable = true - default = "ok" -} - -// required, and assigning null is valid. -variable "nullable_no_default" { - nullable = true -} - - -// this combination is invalid -//variable "non_nullable_null_default" { -// nullable = false -// default = null -//} - - -// assigning null will take the default -variable "non_nullable_default" { - nullable = false - default = "ok" -} - -// required, but null is not a valid value -variable "non_nullable_no_default" { - nullable = false -} - -output "nullable_null_default" { - value = var.nullable_null_default -} - -output "nullable_non_null_default" { - value = var.nullable_non_null_default -} - -output "nullable_no_default" { - value = var.nullable_no_default -} - -output "non_nullable_default" { - value = var.non_nullable_default -} - -output "non_nullable_no_default" { - value = var.non_nullable_no_default -} - diff --git a/internal/terraform/testdata/apply-orphan-resource/main.tf b/internal/terraform/testdata/apply-orphan-resource/main.tf deleted file mode 100644 index 3e093ac83f50..000000000000 --- a/internal/terraform/testdata/apply-orphan-resource/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_thing" "zero" { - count = 0 -} - -resource "test_thing" "one" { - count = 1 -} diff --git a/internal/terraform/testdata/apply-output-add-after/main.tf b/internal/terraform/testdata/apply-output-add-after/main.tf deleted file mode 100644 index 1c10eaafc571..000000000000 --- a/internal/terraform/testdata/apply-output-add-after/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -provider "aws" {} - -resource "aws_instance" "test" { - foo = "${format("foo%d", count.index)}" - count = 2 -} diff --git a/internal/terraform/testdata/apply-output-add-after/outputs.tf.json b/internal/terraform/testdata/apply-output-add-after/outputs.tf.json deleted file mode 100644 index 32e96b0ee07c..000000000000 --- a/internal/terraform/testdata/apply-output-add-after/outputs.tf.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "output": { - "firstOutput": { - "value": "${aws_instance.test.0.foo}" - }, - "secondOutput": { - "value": "${aws_instance.test.1.foo}" - } - } -} diff --git a/internal/terraform/testdata/apply-output-add-before/main.tf b/internal/terraform/testdata/apply-output-add-before/main.tf deleted file mode 100644 index 1c10eaafc571..000000000000 --- a/internal/terraform/testdata/apply-output-add-before/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -provider "aws" {} - -resource "aws_instance" "test" { - foo = "${format("foo%d", count.index)}" - count = 2 -} diff --git a/internal/terraform/testdata/apply-output-add-before/outputs.tf.json b/internal/terraform/testdata/apply-output-add-before/outputs.tf.json deleted file mode 100644 index 238668ef3d17..000000000000 --- a/internal/terraform/testdata/apply-output-add-before/outputs.tf.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "output": { - "firstOutput": { - "value": "${aws_instance.test.0.foo}" - } - } -} diff --git a/internal/terraform/testdata/apply-output-list/main.tf b/internal/terraform/testdata/apply-output-list/main.tf deleted file mode 100644 index 11b8107dffd4..000000000000 --- a/internal/terraform/testdata/apply-output-list/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "bar" - count = 3 -} - -output "foo_num" { - value = ["${join(",", aws_instance.bar.*.foo)}"] -} diff --git a/internal/terraform/testdata/apply-output-multi-index/main.tf b/internal/terraform/testdata/apply-output-multi-index/main.tf deleted file mode 100644 index c7ede94d5a83..000000000000 --- a/internal/terraform/testdata/apply-output-multi-index/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "bar" - count = 3 -} - -output "foo_num" { - value = "${aws_instance.bar.0.foo}" -} diff --git a/internal/terraform/testdata/apply-output-multi/main.tf b/internal/terraform/testdata/apply-output-multi/main.tf deleted file mode 100644 index a70e334b16be..000000000000 --- a/internal/terraform/testdata/apply-output-multi/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "bar" - count = 3 -} - -output "foo_num" { - value = "${join(",", aws_instance.bar.*.foo)}" -} diff --git a/internal/terraform/testdata/apply-output-orphan-module/child/main.tf b/internal/terraform/testdata/apply-output-orphan-module/child/main.tf deleted file mode 100644 index ae32f8aa13b3..000000000000 --- a/internal/terraform/testdata/apply-output-orphan-module/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "foo" { - value = "bar" -} diff --git a/internal/terraform/testdata/apply-output-orphan-module/main.tf b/internal/terraform/testdata/apply-output-orphan-module/main.tf deleted file mode 100644 index 0f6991c536ca..000000000000 --- a/internal/terraform/testdata/apply-output-orphan-module/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-output-orphan/main.tf b/internal/terraform/testdata/apply-output-orphan/main.tf deleted file mode 100644 index ae32f8aa13b3..000000000000 --- a/internal/terraform/testdata/apply-output-orphan/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "foo" { - value = "bar" -} diff --git a/internal/terraform/testdata/apply-output/main.tf b/internal/terraform/testdata/apply-output/main.tf deleted file mode 100644 index 1f91a40f150e..000000000000 --- a/internal/terraform/testdata/apply-output/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "bar" -} - -output "foo_num" { - value = "${aws_instance.foo.num}" -} diff --git a/internal/terraform/testdata/apply-plan-connection-refs/main.tf b/internal/terraform/testdata/apply-plan-connection-refs/main.tf deleted file mode 100644 index d20191f33b13..000000000000 --- a/internal/terraform/testdata/apply-plan-connection-refs/main.tf +++ /dev/null @@ -1,18 +0,0 @@ -variable "msg" { - default = "ok" -} - -resource "test_instance" "a" { - foo = "a" -} - - -resource "test_instance" "b" { - foo = "b" - provisioner "shell" { - command = "echo ${var.msg}" - } - connection { - host = test_instance.a.id - } -} diff --git a/internal/terraform/testdata/apply-provider-alias-configure/main.tf b/internal/terraform/testdata/apply-provider-alias-configure/main.tf deleted file mode 100644 index 4487e4573ab3..000000000000 --- a/internal/terraform/testdata/apply-provider-alias-configure/main.tf +++ /dev/null @@ -1,14 +0,0 @@ -provider "another" { - foo = "bar" -} - -provider "another" { - alias = "two" - foo = "bar" -} - -resource "another_instance" "foo" {} - -resource "another_instance" "bar" { - provider = "another.two" -} diff --git a/internal/terraform/testdata/apply-provider-alias/main.tf b/internal/terraform/testdata/apply-provider-alias/main.tf deleted file mode 100644 index 19fd985abf2c..000000000000 --- a/internal/terraform/testdata/apply-provider-alias/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -provider "aws" { - alias = "bar" -} - -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "bar" - provider = "aws.bar" -} diff --git a/internal/terraform/testdata/apply-provider-computed/main.tf b/internal/terraform/testdata/apply-provider-computed/main.tf deleted file mode 100644 index 81acf7cfaa9d..000000000000 --- a/internal/terraform/testdata/apply-provider-computed/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "aws" { - value = test_instance.foo.id -} - -resource "aws_instance" "bar" {} - -resource "test_instance" "foo" { - value = "yes" -} diff --git a/internal/terraform/testdata/apply-provider-configure-disabled/child/main.tf b/internal/terraform/testdata/apply-provider-configure-disabled/child/main.tf deleted file mode 100644 index c421bf743c30..000000000000 --- a/internal/terraform/testdata/apply-provider-configure-disabled/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" { - value = "foo" -} - -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/apply-provider-configure-disabled/main.tf b/internal/terraform/testdata/apply-provider-configure-disabled/main.tf deleted file mode 100644 index dbfc52745d69..000000000000 --- a/internal/terraform/testdata/apply-provider-configure-disabled/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - foo = "bar" -} - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-provider-warning/main.tf b/internal/terraform/testdata/apply-provider-warning/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/apply-provider-warning/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/apply-provisioner-compute/main.tf b/internal/terraform/testdata/apply-provisioner-compute/main.tf deleted file mode 100644 index 598296501d00..000000000000 --- a/internal/terraform/testdata/apply-provisioner-compute/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "value" {} - -resource "aws_instance" "foo" { - num = "2" - compute = "value" - compute_value = "${var.value}" -} - -resource "aws_instance" "bar" { - provisioner "shell" { - command = "${aws_instance.foo.value}" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-destroy-continue/main.tf b/internal/terraform/testdata/apply-provisioner-destroy-continue/main.tf deleted file mode 100644 index 0be0d331e51b..000000000000 --- a/internal/terraform/testdata/apply-provisioner-destroy-continue/main.tf +++ /dev/null @@ -1,15 +0,0 @@ -resource "aws_instance" "foo" { - foo = "bar" - - provisioner "shell" { - command = "one" - when = "destroy" - on_failure = "continue" - } - - provisioner "shell" { - command = "two" - when = "destroy" - on_failure = "continue" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-destroy-fail/main.tf b/internal/terraform/testdata/apply-provisioner-destroy-fail/main.tf deleted file mode 100644 index 14ad1258293d..000000000000 --- a/internal/terraform/testdata/apply-provisioner-destroy-fail/main.tf +++ /dev/null @@ -1,14 +0,0 @@ -resource "aws_instance" "foo" { - foo = "bar" - - provisioner "shell" { - command = "one" - when = "destroy" - on_failure = "continue" - } - - provisioner "shell" { - command = "two" - when = "destroy" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-destroy/main.tf b/internal/terraform/testdata/apply-provisioner-destroy/main.tf deleted file mode 100644 index 8804f6495245..000000000000 --- a/internal/terraform/testdata/apply-provisioner-destroy/main.tf +++ /dev/null @@ -1,18 +0,0 @@ -resource "aws_instance" "foo" { - for_each = var.input - foo = "bar" - - provisioner "shell" { - command = "create ${each.key} ${each.value}" - } - - provisioner "shell" { - when = "destroy" - command = "destroy ${each.key} ${self.foo}" - } -} - -variable "input" { - type = map(string) - default = {} -} diff --git a/internal/terraform/testdata/apply-provisioner-diff/main.tf b/internal/terraform/testdata/apply-provisioner-diff/main.tf deleted file mode 100644 index ac4f38e97a9c..000000000000 --- a/internal/terraform/testdata/apply-provisioner-diff/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_instance" "bar" { - foo = "bar" - provisioner "shell" {} -} diff --git a/internal/terraform/testdata/apply-provisioner-explicit-self-ref/main.tf b/internal/terraform/testdata/apply-provisioner-explicit-self-ref/main.tf deleted file mode 100644 index 7ceca47db81c..000000000000 --- a/internal/terraform/testdata/apply-provisioner-explicit-self-ref/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - foo = "bar" - - provisioner "shell" { - command = "${aws_instance.foo.foo}" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-fail-continue/main.tf b/internal/terraform/testdata/apply-provisioner-fail-continue/main.tf deleted file mode 100644 index 39587984e66c..000000000000 --- a/internal/terraform/testdata/apply-provisioner-fail-continue/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - foo = "bar" - - provisioner "shell" { - on_failure = "continue" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-fail-create-before/main.tf b/internal/terraform/testdata/apply-provisioner-fail-create-before/main.tf deleted file mode 100644 index 00d32cbc24f8..000000000000 --- a/internal/terraform/testdata/apply-provisioner-fail-create-before/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "bar" { - require_new = "xyz" - provisioner "shell" {} - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/apply-provisioner-fail-create/main.tf b/internal/terraform/testdata/apply-provisioner-fail-create/main.tf deleted file mode 100644 index c1dcd222c0b1..000000000000 --- a/internal/terraform/testdata/apply-provisioner-fail-create/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "bar" { - provisioner "shell" {} -} diff --git a/internal/terraform/testdata/apply-provisioner-fail/main.tf b/internal/terraform/testdata/apply-provisioner-fail/main.tf deleted file mode 100644 index 4aacf4b5b16e..000000000000 --- a/internal/terraform/testdata/apply-provisioner-fail/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - provisioner "shell" {} -} diff --git a/internal/terraform/testdata/apply-provisioner-for-each-self/main.tf b/internal/terraform/testdata/apply-provisioner-for-each-self/main.tf deleted file mode 100644 index f3e1d58df260..000000000000 --- a/internal/terraform/testdata/apply-provisioner-for-each-self/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - for_each = toset(["a", "b", "c"]) - foo = "number ${each.value}" - - provisioner "shell" { - command = "${self.foo}" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-interp-count/provisioner-interp-count.tf b/internal/terraform/testdata/apply-provisioner-interp-count/provisioner-interp-count.tf deleted file mode 100644 index 337129e61b08..000000000000 --- a/internal/terraform/testdata/apply-provisioner-interp-count/provisioner-interp-count.tf +++ /dev/null @@ -1,17 +0,0 @@ -variable "num" { - default = 3 -} - -resource "aws_instance" "a" { - count = var.num -} - -resource "aws_instance" "b" { - provisioner "local-exec" { - # Since we're in a provisioner block here, this expression is - # resolved during the apply walk and so the resource count must - # be known during that walk, even though apply walk doesn't - # do DynamicExpand. - command = "echo ${length(aws_instance.a)}" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-module/child/main.tf b/internal/terraform/testdata/apply-provisioner-module/child/main.tf deleted file mode 100644 index 85b58ff94dc1..000000000000 --- a/internal/terraform/testdata/apply-provisioner-module/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "bar" { - provisioner "shell" { - foo = "bar" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-module/main.tf b/internal/terraform/testdata/apply-provisioner-module/main.tf deleted file mode 100644 index 1f95749fa7ea..000000000000 --- a/internal/terraform/testdata/apply-provisioner-module/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-provisioner-multi-self-ref-single/main.tf b/internal/terraform/testdata/apply-provisioner-multi-self-ref-single/main.tf deleted file mode 100644 index d6c995115ea9..000000000000 --- a/internal/terraform/testdata/apply-provisioner-multi-self-ref-single/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 - foo = "number ${count.index}" - - provisioner "shell" { - command = aws_instance.foo[0].foo - order = count.index - } -} diff --git a/internal/terraform/testdata/apply-provisioner-multi-self-ref/main.tf b/internal/terraform/testdata/apply-provisioner-multi-self-ref/main.tf deleted file mode 100644 index 72a1e7920076..000000000000 --- a/internal/terraform/testdata/apply-provisioner-multi-self-ref/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 - foo = "number ${count.index}" - - provisioner "shell" { - command = "${self.foo}" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-resource-ref/main.tf b/internal/terraform/testdata/apply-provisioner-resource-ref/main.tf deleted file mode 100644 index 25da37781cc4..000000000000 --- a/internal/terraform/testdata/apply-provisioner-resource-ref/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "bar" { - num = "2" - - provisioner "shell" { - command = "${aws_instance.bar.num}" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-self-ref/main.tf b/internal/terraform/testdata/apply-provisioner-self-ref/main.tf deleted file mode 100644 index 5f401f7c07f7..000000000000 --- a/internal/terraform/testdata/apply-provisioner-self-ref/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - foo = "bar" - - provisioner "shell" { - command = "${self.foo}" - } -} diff --git a/internal/terraform/testdata/apply-provisioner-sensitive/main.tf b/internal/terraform/testdata/apply-provisioner-sensitive/main.tf deleted file mode 100644 index 99ec4a290b78..000000000000 --- a/internal/terraform/testdata/apply-provisioner-sensitive/main.tf +++ /dev/null @@ -1,18 +0,0 @@ -variable "password" { - type = string - sensitive = true -} - -resource "aws_instance" "foo" { - connection { - host = "localhost" - type = "telnet" - user = "superuser" - port = 2222 - password = var.password - } - - provisioner "shell" { - command = "echo ${var.password} > secrets" - } -} diff --git a/internal/terraform/testdata/apply-ref-count/main.tf b/internal/terraform/testdata/apply-ref-count/main.tf deleted file mode 100644 index 1ce2ffe21f5d..000000000000 --- a/internal/terraform/testdata/apply-ref-count/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 -} - -resource "aws_instance" "bar" { - foo = length(aws_instance.foo) -} diff --git a/internal/terraform/testdata/apply-ref-existing/child/main.tf b/internal/terraform/testdata/apply-ref-existing/child/main.tf deleted file mode 100644 index cd1e56eec90e..000000000000 --- a/internal/terraform/testdata/apply-ref-existing/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "var" {} - -resource "aws_instance" "foo" { - value = "${var.var}" -} diff --git a/internal/terraform/testdata/apply-ref-existing/main.tf b/internal/terraform/testdata/apply-ref-existing/main.tf deleted file mode 100644 index a05056c52e54..000000000000 --- a/internal/terraform/testdata/apply-ref-existing/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" { - foo = "bar" -} - -module "child" { - source = "./child" - - var = "${aws_instance.foo.foo}" -} diff --git a/internal/terraform/testdata/apply-resource-count-one-list/main.tf b/internal/terraform/testdata/apply-resource-count-one-list/main.tf deleted file mode 100644 index 0aeb75b1afa9..000000000000 --- a/internal/terraform/testdata/apply-resource-count-one-list/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "null_resource" "foo" { - count = 1 -} - -output "test" { - value = "${sort(null_resource.foo.*.id)}" -} diff --git a/internal/terraform/testdata/apply-resource-count-zero-list/main.tf b/internal/terraform/testdata/apply-resource-count-zero-list/main.tf deleted file mode 100644 index 6d9b4d55d286..000000000000 --- a/internal/terraform/testdata/apply-resource-count-zero-list/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "null_resource" "foo" { - count = 0 -} - -output "test" { - value = "${sort(null_resource.foo.*.id)}" -} diff --git a/internal/terraform/testdata/apply-resource-depends-on-module-deep/child/child/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module-deep/child/child/main.tf deleted file mode 100644 index 77203263df45..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module-deep/child/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "c" { - ami = "grandchild" -} diff --git a/internal/terraform/testdata/apply-resource-depends-on-module-deep/child/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module-deep/child/main.tf deleted file mode 100644 index 6cbe350a7958..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module-deep/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "grandchild" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-resource-depends-on-module-deep/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module-deep/main.tf deleted file mode 100644 index 1a7862b0a3f0..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module-deep/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "a" { - ami = "parent" - - depends_on = ["module.child"] -} diff --git a/internal/terraform/testdata/apply-resource-depends-on-module-empty/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module-empty/main.tf deleted file mode 100644 index f2316bd73ada..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module-empty/main.tf +++ /dev/null @@ -1 +0,0 @@ -# Empty! diff --git a/internal/terraform/testdata/apply-resource-depends-on-module-in-module/child/child/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module-in-module/child/child/main.tf deleted file mode 100644 index 77203263df45..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module-in-module/child/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "c" { - ami = "grandchild" -} diff --git a/internal/terraform/testdata/apply-resource-depends-on-module-in-module/child/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module-in-module/child/main.tf deleted file mode 100644 index a816cae90e5b..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module-in-module/child/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "grandchild" { - source = "./child" -} - -resource "aws_instance" "b" { - ami = "child" - depends_on = ["module.grandchild"] -} diff --git a/internal/terraform/testdata/apply-resource-depends-on-module-in-module/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module-in-module/main.tf deleted file mode 100644 index 0f6991c536ca..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module-in-module/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-resource-depends-on-module/child/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module/child/main.tf deleted file mode 100644 index 949d8e1b5e67..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "child" { - ami = "child" -} diff --git a/internal/terraform/testdata/apply-resource-depends-on-module/main.tf b/internal/terraform/testdata/apply-resource-depends-on-module/main.tf deleted file mode 100644 index 1a7862b0a3f0..000000000000 --- a/internal/terraform/testdata/apply-resource-depends-on-module/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "a" { - ami = "parent" - - depends_on = ["module.child"] -} diff --git a/internal/terraform/testdata/apply-resource-scale-in/main.tf b/internal/terraform/testdata/apply-resource-scale-in/main.tf deleted file mode 100644 index 8cb38473e163..000000000000 --- a/internal/terraform/testdata/apply-resource-scale-in/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "instance_count" {} - -resource "aws_instance" "one" { - count = var.instance_count -} - -locals { - one_id = element(concat(aws_instance.one.*.id, [""]), 0) -} - -resource "aws_instance" "two" { - value = local.one_id -} diff --git a/internal/terraform/testdata/apply-taint-dep-requires-new/main.tf b/internal/terraform/testdata/apply-taint-dep-requires-new/main.tf deleted file mode 100644 index f964fe46e9de..000000000000 --- a/internal/terraform/testdata/apply-taint-dep-requires-new/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.id}" - require_new = "yes" -} diff --git a/internal/terraform/testdata/apply-taint-dep/main.tf b/internal/terraform/testdata/apply-taint-dep/main.tf deleted file mode 100644 index 164db2d18ae2..000000000000 --- a/internal/terraform/testdata/apply-taint-dep/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - num = "2" - foo = "${aws_instance.foo.id}" -} diff --git a/internal/terraform/testdata/apply-taint/main.tf b/internal/terraform/testdata/apply-taint/main.tf deleted file mode 100644 index 801ddbaf9b36..000000000000 --- a/internal/terraform/testdata/apply-taint/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "bar" { - num = "2" -} diff --git a/internal/terraform/testdata/apply-tainted-targets/main.tf b/internal/terraform/testdata/apply-tainted-targets/main.tf deleted file mode 100644 index 8f6b317d5bd2..000000000000 --- a/internal/terraform/testdata/apply-tainted-targets/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "ifailedprovisioners" { } - -resource "aws_instance" "iambeingadded" { } diff --git a/internal/terraform/testdata/apply-targeted-count/main.tf b/internal/terraform/testdata/apply-targeted-count/main.tf deleted file mode 100644 index cd861898f203..000000000000 --- a/internal/terraform/testdata/apply-targeted-count/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 -} - -resource "aws_instance" "bar" { - count = 3 -} diff --git a/internal/terraform/testdata/apply-targeted-module-dep/child/main.tf b/internal/terraform/testdata/apply-targeted-module-dep/child/main.tf deleted file mode 100644 index 90a7c407b949..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-dep/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "mod" { } - -output "output" { - value = "${aws_instance.mod.id}" -} diff --git a/internal/terraform/testdata/apply-targeted-module-dep/main.tf b/internal/terraform/testdata/apply-targeted-module-dep/main.tf deleted file mode 100644 index 754219c3e3fc..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-dep/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "foo" { - foo = "${module.child.output}" -} diff --git a/internal/terraform/testdata/apply-targeted-module-recursive/child/main.tf b/internal/terraform/testdata/apply-targeted-module-recursive/child/main.tf deleted file mode 100644 index 852bce8b9f39..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-recursive/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "subchild" { - source = "./subchild" -} diff --git a/internal/terraform/testdata/apply-targeted-module-recursive/child/subchild/main.tf b/internal/terraform/testdata/apply-targeted-module-recursive/child/subchild/main.tf deleted file mode 100644 index 98f5ee87e9f0..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-recursive/child/subchild/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/apply-targeted-module-recursive/main.tf b/internal/terraform/testdata/apply-targeted-module-recursive/main.tf deleted file mode 100644 index 0f6991c536ca..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-recursive/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/apply-targeted-module-resource/child/main.tf b/internal/terraform/testdata/apply-targeted-module-resource/child/main.tf deleted file mode 100644 index 7872c90fcf5a..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-resource/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - num = "2" -} diff --git a/internal/terraform/testdata/apply-targeted-module-resource/main.tf b/internal/terraform/testdata/apply-targeted-module-resource/main.tf deleted file mode 100644 index 88bf07f6995c..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-resource/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/child1/main.tf b/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/child1/main.tf deleted file mode 100644 index cffe3829e792..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/child1/main.tf +++ /dev/null @@ -1,17 +0,0 @@ -variable "instance_id" { -} - -output "instance_id" { - # The instance here isn't targeted, so this output shouldn't get updated. - # But it already has an existing value in state (specified within the - # test code) so we expect this to remain unchanged afterwards. - value = "${aws_instance.foo.id}" -} - -output "given_instance_id" { - value = "${var.instance_id}" -} - -resource "aws_instance" "foo" { - foo = "${var.instance_id}" -} diff --git a/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/child2/main.tf b/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/child2/main.tf deleted file mode 100644 index d8aa6cf3535a..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/child2/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" { -} - -output "instance_id" { - # Even though we're targeting just the resource above, this should still - # be populated because outputs are implicitly targeted when their - # dependencies are - value = "${aws_instance.foo.id}" -} diff --git a/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/main.tf b/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/main.tf deleted file mode 100644 index 11700723769f..000000000000 --- a/internal/terraform/testdata/apply-targeted-module-unrelated-outputs/main.tf +++ /dev/null @@ -1,37 +0,0 @@ -resource "aws_instance" "foo" {} - -module "child1" { - source = "./child1" - instance_id = "${aws_instance.foo.id}" -} - -module "child2" { - source = "./child2" -} - -output "child1_id" { - value = "${module.child1.instance_id}" -} - -output "child1_given_id" { - value = "${module.child1.given_instance_id}" -} - -output "child2_id" { - # This should get updated even though we're targeting specifically - # module.child2, because outputs are implicitly targeted when their - # dependencies are. - value = "${module.child2.instance_id}" -} - -output "all_ids" { - # Here we are intentionally referencing values covering three different scenarios: - # - not targeted and not already in state - # - not targeted and already in state - # - targeted - # This is important because this output must appear in the graph after - # target filtering in case the targeted node changes its value, but we must - # therefore silently ignore the failure that results from trying to - # interpolate the un-targeted, not-in-state node. - value = "${aws_instance.foo.id} ${module.child1.instance_id} ${module.child2.instance_id}" -} diff --git a/internal/terraform/testdata/apply-targeted-module/child/main.tf b/internal/terraform/testdata/apply-targeted-module/child/main.tf deleted file mode 100644 index 7872c90fcf5a..000000000000 --- a/internal/terraform/testdata/apply-targeted-module/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - num = "2" -} diff --git a/internal/terraform/testdata/apply-targeted-module/main.tf b/internal/terraform/testdata/apply-targeted-module/main.tf deleted file mode 100644 index 938ce3a56069..000000000000 --- a/internal/terraform/testdata/apply-targeted-module/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "foo" { - foo = "bar" -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-targeted-resource-orphan-module/child/main.tf b/internal/terraform/testdata/apply-targeted-resource-orphan-module/child/main.tf deleted file mode 100644 index 6ff716a4d4c1..000000000000 --- a/internal/terraform/testdata/apply-targeted-resource-orphan-module/child/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "bar" {} diff --git a/internal/terraform/testdata/apply-targeted-resource-orphan-module/main.tf b/internal/terraform/testdata/apply-targeted-resource-orphan-module/main.tf deleted file mode 100644 index 0c15c4bb2e12..000000000000 --- a/internal/terraform/testdata/apply-targeted-resource-orphan-module/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -//module "child" { -// source = "./child" -//} - -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/apply-targeted/main.tf b/internal/terraform/testdata/apply-targeted/main.tf deleted file mode 100644 index b07fc97f4d46..000000000000 --- a/internal/terraform/testdata/apply-targeted/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/apply-terraform-workspace/main.tf b/internal/terraform/testdata/apply-terraform-workspace/main.tf deleted file mode 100644 index cc50f578fac4..000000000000 --- a/internal/terraform/testdata/apply-terraform-workspace/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "output" { - value = "${terraform.workspace}" -} diff --git a/internal/terraform/testdata/apply-unknown-interpolate/child/main.tf b/internal/terraform/testdata/apply-unknown-interpolate/child/main.tf deleted file mode 100644 index 1caedabc4586..000000000000 --- a/internal/terraform/testdata/apply-unknown-interpolate/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -resource "aws_instance" "bar" { - foo = "${var.value}" -} diff --git a/internal/terraform/testdata/apply-unknown-interpolate/main.tf b/internal/terraform/testdata/apply-unknown-interpolate/main.tf deleted file mode 100644 index 1ee7dd6cbc4b..000000000000 --- a/internal/terraform/testdata/apply-unknown-interpolate/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "aws_instance" "foo" {} - -module "child" { - source = "./child" - value = "${aws_instance.foo.nope}" -} diff --git a/internal/terraform/testdata/apply-unknown/main.tf b/internal/terraform/testdata/apply-unknown/main.tf deleted file mode 100644 index 98f5ee87e9f0..000000000000 --- a/internal/terraform/testdata/apply-unknown/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/apply-unstable/main.tf b/internal/terraform/testdata/apply-unstable/main.tf deleted file mode 100644 index 32754bb46640..000000000000 --- a/internal/terraform/testdata/apply-unstable/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "test_resource" "foo" { - random = "${uuid()}" -} diff --git a/internal/terraform/testdata/apply-vars-env/main.tf b/internal/terraform/testdata/apply-vars-env/main.tf deleted file mode 100644 index 1b62ad633826..000000000000 --- a/internal/terraform/testdata/apply-vars-env/main.tf +++ /dev/null @@ -1,20 +0,0 @@ -variable "string" { - default = "foo" - type = string -} - -variable "list" { - default = [] - type = list(string) -} - -variable "map" { - default = {} - type = map(string) -} - -resource "aws_instance" "bar" { - string = var.string - list = var.list - map = var.map -} diff --git a/internal/terraform/testdata/apply-vars/main.tf b/internal/terraform/testdata/apply-vars/main.tf deleted file mode 100644 index dc413c0be4cc..000000000000 --- a/internal/terraform/testdata/apply-vars/main.tf +++ /dev/null @@ -1,33 +0,0 @@ -variable "amis" { - default = { - us-east-1 = "foo" - us-west-2 = "foo" - } -} - -variable "test_list" { - type = list(string) -} - -variable "test_map" { - type = map(string) -} - -variable "bar" { - default = "baz" -} - -variable "foo" {} - -resource "aws_instance" "foo" { - num = "2" - bar = var.bar - list = var.test_list - map = var.test_map -} - -resource "aws_instance" "bar" { - foo = var.foo - bar = var.amis[var.foo] - baz = var.amis["us-east-1"] -} diff --git a/internal/terraform/testdata/context-required-version-module/child/main.tf b/internal/terraform/testdata/context-required-version-module/child/main.tf deleted file mode 100644 index 3b52ffab9112..000000000000 --- a/internal/terraform/testdata/context-required-version-module/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -terraform { - required_version = ">= 0.5.0" -} diff --git a/internal/terraform/testdata/context-required-version-module/main.tf b/internal/terraform/testdata/context-required-version-module/main.tf deleted file mode 100644 index 0f6991c536ca..000000000000 --- a/internal/terraform/testdata/context-required-version-module/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/context-required-version/main.tf b/internal/terraform/testdata/context-required-version/main.tf deleted file mode 100644 index 75db792903e4..000000000000 --- a/internal/terraform/testdata/context-required-version/main.tf +++ /dev/null @@ -1 +0,0 @@ -terraform {} diff --git a/internal/terraform/testdata/data-source-read-with-plan-error/main.tf b/internal/terraform/testdata/data-source-read-with-plan-error/main.tf deleted file mode 100644 index 2559406f7ab5..000000000000 --- a/internal/terraform/testdata/data-source-read-with-plan-error/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "foo" { -} - -// this will be postponed until apply -data "aws_data_source" "foo" { - foo = aws_instance.foo.id -} - -// this will cause an error in the final plan -resource "test_instance" "bar" { - foo = "error" -} diff --git a/internal/terraform/testdata/destroy-module-with-provider/main.tf b/internal/terraform/testdata/destroy-module-with-provider/main.tf deleted file mode 100644 index 3b183ecac498..000000000000 --- a/internal/terraform/testdata/destroy-module-with-provider/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -// this is the provider that should actually be used by orphaned resources -provider "aws" { - alias = "bar" -} - -module "mod" { - source = "./mod" - providers = { - aws.foo = "aws.bar" - } -} diff --git a/internal/terraform/testdata/destroy-module-with-provider/mod/main.tf b/internal/terraform/testdata/destroy-module-with-provider/mod/main.tf deleted file mode 100644 index 3e360ee46048..000000000000 --- a/internal/terraform/testdata/destroy-module-with-provider/mod/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -provider "aws" { - alias = "foo" -} - -// removed module configuration referencing aws.foo, which was passed in by the -// root module diff --git a/internal/terraform/testdata/destroy-targeted/child/main.tf b/internal/terraform/testdata/destroy-targeted/child/main.tf deleted file mode 100644 index 47ef076b12de..000000000000 --- a/internal/terraform/testdata/destroy-targeted/child/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -variable "in" { -} - -resource "aws_instance" "b" { - foo = var.in -} - -output "out" { - value = var.in -} diff --git a/internal/terraform/testdata/destroy-targeted/main.tf b/internal/terraform/testdata/destroy-targeted/main.tf deleted file mode 100644 index 70048b50c017..000000000000 --- a/internal/terraform/testdata/destroy-targeted/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "a" { - foo = "bar" -} - -module "child" { - source = "./child" - in = aws_instance.a.id -} - -output "out" { - value = aws_instance.a.id -} diff --git a/internal/terraform/testdata/empty/main.tf b/internal/terraform/testdata/empty/main.tf deleted file mode 100644 index 8974d9ed2542..000000000000 --- a/internal/terraform/testdata/empty/main.tf +++ /dev/null @@ -1 +0,0 @@ -# Empty, use this for any test that requires a module but no config. diff --git a/internal/terraform/testdata/eval-context-basic/child/main.tf b/internal/terraform/testdata/eval-context-basic/child/main.tf deleted file mode 100644 index e24069df759f..000000000000 --- a/internal/terraform/testdata/eval-context-basic/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "list" { -} - - -output "result" { - value = length(var.list) -} diff --git a/internal/terraform/testdata/eval-context-basic/main.tf b/internal/terraform/testdata/eval-context-basic/main.tf deleted file mode 100644 index 2dc96ad86351..000000000000 --- a/internal/terraform/testdata/eval-context-basic/main.tf +++ /dev/null @@ -1,39 +0,0 @@ -variable "number" { - default = 3 -} - -variable "string" { - default = "Hello, World" -} - -variable "map" { - type = map(string) - default = { - "foo" = "bar", - "baz" = "bat", - } -} - -locals { - result = length(var.list) -} - -variable "list" { - type = list(string) - default = ["red", "orange", "yellow", "green", "blue", "purple"] -} - -resource "test_resource" "example" { - for_each = var.map - name = each.key - tag = each.value -} - -module "child" { - source = "./child" - list = var.list -} - -output "result" { - value = module.child.result -} diff --git a/internal/terraform/testdata/graph-basic/main.tf b/internal/terraform/testdata/graph-basic/main.tf deleted file mode 100644 index a40802cc98eb..000000000000 --- a/internal/terraform/testdata/graph-basic/main.tf +++ /dev/null @@ -1,24 +0,0 @@ -variable "foo" { - default = "bar" - description = "bar" -} - -provider "aws" { - foo = "${openstack_floating_ip.random.value}" -} - -resource "openstack_floating_ip" "random" {} - -resource "aws_security_group" "firewall" {} - -resource "aws_instance" "web" { - ami = "${var.foo}" - security_groups = [ - "foo", - "${aws_security_group.firewall.foo}" - ] -} - -resource "aws_load_balancer" "weblb" { - members = "${aws_instance.web.id_list}" -} diff --git a/internal/terraform/testdata/graph-builder-apply-basic/child/main.tf b/internal/terraform/testdata/graph-builder-apply-basic/child/main.tf deleted file mode 100644 index 79be97bf1618..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-basic/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_object" "create" { - provisioner "test" {} -} - -resource "test_object" "other" { - test_string = "${test_object.create.test_string}" -} diff --git a/internal/terraform/testdata/graph-builder-apply-basic/main.tf b/internal/terraform/testdata/graph-builder-apply-basic/main.tf deleted file mode 100644 index b42bd439e407..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-basic/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "child" { - source = "./child" -} - -resource "test_object" "create" {} - -resource "test_object" "other" { - test_string = "${test_object.create.test_string}" -} diff --git a/internal/terraform/testdata/graph-builder-apply-count/main.tf b/internal/terraform/testdata/graph-builder-apply-count/main.tf deleted file mode 100644 index dee4eb41259a..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-count/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_object" "A" { - count = 1 -} - -resource "test_object" "B" { - test_list = test_object.A.*.test_string -} diff --git a/internal/terraform/testdata/graph-builder-apply-dep-cbd/main.tf b/internal/terraform/testdata/graph-builder-apply-dep-cbd/main.tf deleted file mode 100644 index df6f2908cf3a..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-dep-cbd/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "test_object" "A" { - lifecycle { - create_before_destroy = true - } -} - -resource "test_object" "B" { - test_list = test_object.A.*.test_string -} diff --git a/internal/terraform/testdata/graph-builder-apply-double-cbd/main.tf b/internal/terraform/testdata/graph-builder-apply-double-cbd/main.tf deleted file mode 100644 index cb1f73422670..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-double-cbd/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -resource "test_object" "A" { - lifecycle { - create_before_destroy = true - } -} - -resource "test_object" "B" { - test_list = test_object.A.*.test_string - - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/graph-builder-apply-module-destroy/A/main.tf b/internal/terraform/testdata/graph-builder-apply-module-destroy/A/main.tf deleted file mode 100644 index 2c427f5c3b2a..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-module-destroy/A/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "input" {} - -resource "test_object" "foo" { - test_string = var.input -} - -output "output" { - value = test_object.foo.id -} diff --git a/internal/terraform/testdata/graph-builder-apply-module-destroy/main.tf b/internal/terraform/testdata/graph-builder-apply-module-destroy/main.tf deleted file mode 100644 index 3c566646d137..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-module-destroy/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "input" { - default = "value" -} - -module "A" { - source = "./A" - input = var.input -} - -module "B" { - source = "./A" - input = module.A.output -} diff --git a/internal/terraform/testdata/graph-builder-apply-orphan-update/main.tf b/internal/terraform/testdata/graph-builder-apply-orphan-update/main.tf deleted file mode 100644 index 22e7ae0f1a19..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-orphan-update/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "test_object" "b" { - test_string = "changed" -} diff --git a/internal/terraform/testdata/graph-builder-apply-provisioner/main.tf b/internal/terraform/testdata/graph-builder-apply-provisioner/main.tf deleted file mode 100644 index 1ea5d2122ee2..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-provisioner/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "test_object" "foo" { - provisioner "test" {} -} diff --git a/internal/terraform/testdata/graph-builder-apply-target-module/child1/main.tf b/internal/terraform/testdata/graph-builder-apply-target-module/child1/main.tf deleted file mode 100644 index 7ac75f5edb9b..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-target-module/child1/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "instance_id" {} - -output "instance_id" { - value = "${var.instance_id}" -} - -resource "test_object" "foo" { - triggers = { - instance_id = "${var.instance_id}" - } -} diff --git a/internal/terraform/testdata/graph-builder-apply-target-module/child2/main.tf b/internal/terraform/testdata/graph-builder-apply-target-module/child2/main.tf deleted file mode 100644 index 0afe7efac644..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-target-module/child2/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "test_object" "foo" {} diff --git a/internal/terraform/testdata/graph-builder-apply-target-module/main.tf b/internal/terraform/testdata/graph-builder-apply-target-module/main.tf deleted file mode 100644 index 994d8fca17dc..000000000000 --- a/internal/terraform/testdata/graph-builder-apply-target-module/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "test_object" "foo" {} - -module "child1" { - source = "./child1" - instance_id = "${test_object.foo.id}" -} - -module "child2" { - source = "./child2" -} diff --git a/internal/terraform/testdata/graph-builder-orphan-alias/main.tf b/internal/terraform/testdata/graph-builder-orphan-alias/main.tf deleted file mode 100644 index 039881847c51..000000000000 --- a/internal/terraform/testdata/graph-builder-orphan-alias/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -provider "test" { - alias = "foo" -} diff --git a/internal/terraform/testdata/graph-builder-plan-attr-as-blocks/attr-as-blocks.tf b/internal/terraform/testdata/graph-builder-plan-attr-as-blocks/attr-as-blocks.tf deleted file mode 100644 index d154cc264218..000000000000 --- a/internal/terraform/testdata/graph-builder-plan-attr-as-blocks/attr-as-blocks.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "test_thing" "a" { -} - -resource "test_thing" "b" { - nested { - foo = test_thing.a.id - } -} diff --git a/internal/terraform/testdata/graph-builder-plan-basic/main.tf b/internal/terraform/testdata/graph-builder-plan-basic/main.tf deleted file mode 100644 index df74468a1906..000000000000 --- a/internal/terraform/testdata/graph-builder-plan-basic/main.tf +++ /dev/null @@ -1,33 +0,0 @@ -variable "foo" { - default = "bar" - description = "bar" -} - -provider "aws" { - test_string = "${openstack_floating_ip.random.test_string}" -} - -resource "openstack_floating_ip" "random" {} - -resource "aws_security_group" "firewall" {} - -resource "aws_instance" "web" { - test_string = var.foo - - test_list = [ - "foo", - aws_security_group.firewall.test_string, - ] -} - -resource "aws_load_balancer" "weblb" { - test_list = aws_instance.web.test_list -} - -locals { - instance_id = "${aws_instance.web.test_string}" -} - -output "instance_id" { - value = "${local.instance_id}" -} diff --git a/internal/terraform/testdata/graph-builder-plan-dynblock/dynblock.tf b/internal/terraform/testdata/graph-builder-plan-dynblock/dynblock.tf deleted file mode 100644 index 8946969775c1..000000000000 --- a/internal/terraform/testdata/graph-builder-plan-dynblock/dynblock.tf +++ /dev/null @@ -1,14 +0,0 @@ -resource "test_thing" "a" { -} - -resource "test_thing" "b" { -} - -resource "test_thing" "c" { - dynamic "nested" { - for_each = test_thing.a.list - content { - foo = test_thing.b.id - } - } -} diff --git a/internal/terraform/testdata/graph-builder-plan-target-module-provider/child1/main.tf b/internal/terraform/testdata/graph-builder-plan-target-module-provider/child1/main.tf deleted file mode 100644 index f95800f7a0d9..000000000000 --- a/internal/terraform/testdata/graph-builder-plan-target-module-provider/child1/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "key" {} - -provider "test" { - test_string = "${var.key}" -} - -resource "test_object" "foo" {} diff --git a/internal/terraform/testdata/graph-builder-plan-target-module-provider/child2/main.tf b/internal/terraform/testdata/graph-builder-plan-target-module-provider/child2/main.tf deleted file mode 100644 index f95800f7a0d9..000000000000 --- a/internal/terraform/testdata/graph-builder-plan-target-module-provider/child2/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "key" {} - -provider "test" { - test_string = "${var.key}" -} - -resource "test_object" "foo" {} diff --git a/internal/terraform/testdata/graph-builder-plan-target-module-provider/main.tf b/internal/terraform/testdata/graph-builder-plan-target-module-provider/main.tf deleted file mode 100644 index d5a01db9a0d5..000000000000 --- a/internal/terraform/testdata/graph-builder-plan-target-module-provider/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "child1" { - source = "./child1" - key = "!" -} - -module "child2" { - source = "./child2" - key = "!" -} diff --git a/internal/terraform/testdata/import-module/child/main.tf b/internal/terraform/testdata/import-module/child/main.tf deleted file mode 100644 index 8a8164b3b24b..000000000000 --- a/internal/terraform/testdata/import-module/child/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -# Empty -provider "aws" {} - -resource "aws_instance" "foo" { - id = "bar" -} - -module "nested" { - source = "./submodule" -} diff --git a/internal/terraform/testdata/import-module/child/submodule/main.tf b/internal/terraform/testdata/import-module/child/submodule/main.tf deleted file mode 100644 index 93c90158bb13..000000000000 --- a/internal/terraform/testdata/import-module/child/submodule/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - id = "baz" -} diff --git a/internal/terraform/testdata/import-module/main.tf b/internal/terraform/testdata/import-module/main.tf deleted file mode 100644 index c899a2c510e3..000000000000 --- a/internal/terraform/testdata/import-module/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -provider "aws" { - foo = "bar" -} - -module "child" { - count = 1 - source = "./child" - providers = { - aws = aws - } -} diff --git a/internal/terraform/testdata/import-provider-locals/main.tf b/internal/terraform/testdata/import-provider-locals/main.tf deleted file mode 100644 index a83512ccd98e..000000000000 --- a/internal/terraform/testdata/import-provider-locals/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "foo" {} - -locals { - baz = "baz-${var.foo}" -} - -provider "aws" { - foo = "${local.baz}" -} - -resource "aws_instance" "foo" { - id = "bar" -} diff --git a/internal/terraform/testdata/import-provider-resources/main.tf b/internal/terraform/testdata/import-provider-resources/main.tf deleted file mode 100644 index a99ee5e94160..000000000000 --- a/internal/terraform/testdata/import-provider-resources/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -provider "aws" { - value = "${test_instance.bar.id}" -} - -resource "aws_instance" "foo" { - bar = "value" -} - -resource "test_instance" "bar" { - value = "yes" -} diff --git a/internal/terraform/testdata/import-provider-vars/main.tf b/internal/terraform/testdata/import-provider-vars/main.tf deleted file mode 100644 index 6a88bc926b86..000000000000 --- a/internal/terraform/testdata/import-provider-vars/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "foo" {} - -provider "aws" { - foo = "${var.foo}" -} - -resource "aws_instance" "foo" { - id = "bar" -} diff --git a/internal/terraform/testdata/import-provider/main.tf b/internal/terraform/testdata/import-provider/main.tf deleted file mode 100644 index 5d41fb3e6162..000000000000 --- a/internal/terraform/testdata/import-provider/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -provider "aws" { - foo = "bar" -} - -resource "aws_instance" "foo" { -} diff --git a/internal/terraform/testdata/input-interpolate-var/child/main.tf b/internal/terraform/testdata/input-interpolate-var/child/main.tf deleted file mode 100644 index beb8c098c095..000000000000 --- a/internal/terraform/testdata/input-interpolate-var/child/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -variable "length" { } - -resource "template_file" "temp" { - count = var.length - template = "foo" -} diff --git a/internal/terraform/testdata/input-interpolate-var/main.tf b/internal/terraform/testdata/input-interpolate-var/main.tf deleted file mode 100644 index 4e68495e7b9d..000000000000 --- a/internal/terraform/testdata/input-interpolate-var/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "source" { - source = "./source" -} -module "child" { - source = "./child" - length = module.source.length -} diff --git a/internal/terraform/testdata/input-interpolate-var/source/main.tf b/internal/terraform/testdata/input-interpolate-var/source/main.tf deleted file mode 100644 index 1405fe296d78..000000000000 --- a/internal/terraform/testdata/input-interpolate-var/source/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "length" { - value = 3 -} diff --git a/internal/terraform/testdata/input-module-data-vars/child/main.tf b/internal/terraform/testdata/input-module-data-vars/child/main.tf deleted file mode 100644 index aa5d69bd5f8a..000000000000 --- a/internal/terraform/testdata/input-module-data-vars/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "in" {} - -output "out" { - value = "${var.in}" -} diff --git a/internal/terraform/testdata/input-module-data-vars/main.tf b/internal/terraform/testdata/input-module-data-vars/main.tf deleted file mode 100644 index 0a327b10247f..000000000000 --- a/internal/terraform/testdata/input-module-data-vars/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -data "null_data_source" "bar" { - foo = ["a", "b"] -} - -module "child" { - source = "./child" - in = "${data.null_data_source.bar.foo[1]}" -} diff --git a/internal/terraform/testdata/input-provider-multi/main.tf b/internal/terraform/testdata/input-provider-multi/main.tf deleted file mode 100644 index db49fd3b0a79..000000000000 --- a/internal/terraform/testdata/input-provider-multi/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "aws" { - alias = "east" -} - -resource "aws_instance" "foo" { - provider = aws.east -} - -resource "aws_instance" "bar" {} diff --git a/internal/terraform/testdata/input-provider-once/child/main.tf b/internal/terraform/testdata/input-provider-once/child/main.tf deleted file mode 100644 index ca39ff5e561b..000000000000 --- a/internal/terraform/testdata/input-provider-once/child/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -provider "aws" {} -resource "aws_instance" "bar" {} diff --git a/internal/terraform/testdata/input-provider-once/main.tf b/internal/terraform/testdata/input-provider-once/main.tf deleted file mode 100644 index 006a74087c51..000000000000 --- a/internal/terraform/testdata/input-provider-once/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" {} - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/input-provider-vars/main.tf b/internal/terraform/testdata/input-provider-vars/main.tf deleted file mode 100644 index 692bfb30f3bc..000000000000 --- a/internal/terraform/testdata/input-provider-vars/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "foo" {} - -resource "aws_instance" "foo" { - foo = "${var.foo}" -} diff --git a/internal/terraform/testdata/input-provider-with-vars-and-module/child/main.tf b/internal/terraform/testdata/input-provider-with-vars-and-module/child/main.tf deleted file mode 100644 index 7ec25bda0c90..000000000000 --- a/internal/terraform/testdata/input-provider-with-vars-and-module/child/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" { } diff --git a/internal/terraform/testdata/input-provider-with-vars-and-module/main.tf b/internal/terraform/testdata/input-provider-with-vars-and-module/main.tf deleted file mode 100644 index c5112dca05f1..000000000000 --- a/internal/terraform/testdata/input-provider-with-vars-and-module/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - access_key = "abc123" -} - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/input-provider-with-vars/main.tf b/internal/terraform/testdata/input-provider-with-vars/main.tf deleted file mode 100644 index d8f9311150e6..000000000000 --- a/internal/terraform/testdata/input-provider-with-vars/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "foo" {} - -provider "aws" { - foo = "${var.foo}" -} - -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/input-provider/main.tf b/internal/terraform/testdata/input-provider/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/input-provider/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/input-submodule-count/main.tf b/internal/terraform/testdata/input-submodule-count/main.tf deleted file mode 100644 index 723a15c6d5eb..000000000000 --- a/internal/terraform/testdata/input-submodule-count/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "mod" { - source = "./mod" - instance_count = 2 -} diff --git a/internal/terraform/testdata/input-submodule-count/mod/main.tf b/internal/terraform/testdata/input-submodule-count/mod/main.tf deleted file mode 100644 index dd7cf3d9a84a..000000000000 --- a/internal/terraform/testdata/input-submodule-count/mod/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "instance_count" { -} - -resource "aws_instance" "foo" { - count = "${var.instance_count}" -} - -module "submod" { - source = "./submod" - list = ["${aws_instance.foo.*.id}"] -} diff --git a/internal/terraform/testdata/input-submodule-count/mod/submod/main.tf b/internal/terraform/testdata/input-submodule-count/mod/submod/main.tf deleted file mode 100644 index 732ce43b1ab4..000000000000 --- a/internal/terraform/testdata/input-submodule-count/mod/submod/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "list" { - type = list(string) -} - -resource "aws_instance" "bar" { - count = var.list[0] -} diff --git a/internal/terraform/testdata/input-variables/main.tf b/internal/terraform/testdata/input-variables/main.tf deleted file mode 100644 index 9d6d49aa3988..000000000000 --- a/internal/terraform/testdata/input-variables/main.tf +++ /dev/null @@ -1,30 +0,0 @@ -# Required -variable "foo" { -} - -# Optional -variable "bar" { - default = "baz" -} - -# Mapping -variable "map" { - default = { - foo = "bar" - } -} - -# Complex Object Types -variable "object_map" { - type = map(object({ - foo = string, - bar = any - })) -} - -variable "object_list" { - type = list(object({ - foo = string, - bar = any - })) -} diff --git a/internal/terraform/testdata/issue-5254/step-0/main.tf b/internal/terraform/testdata/issue-5254/step-0/main.tf deleted file mode 100644 index dd666eba18cb..000000000000 --- a/internal/terraform/testdata/issue-5254/step-0/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "c" { - default = 1 -} - -resource "template_file" "parent" { - count = var.c - template = "Hi" -} - -resource "template_file" "child" { - template = "${join(",", template_file.parent.*.template)} ok" -} diff --git a/internal/terraform/testdata/issue-5254/step-1/main.tf b/internal/terraform/testdata/issue-5254/step-1/main.tf deleted file mode 100644 index 3510fe1c4b44..000000000000 --- a/internal/terraform/testdata/issue-5254/step-1/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "c" { - default = 1 -} - -resource "template_file" "parent" { - count = var.c - template = "Hi" -} - -resource "template_file" "child" { - template = join(",", template_file.parent.*.template) - __template_requires_new = true -} diff --git a/internal/terraform/testdata/issue-7824/main.tf b/internal/terraform/testdata/issue-7824/main.tf deleted file mode 100644 index ec76bc39223d..000000000000 --- a/internal/terraform/testdata/issue-7824/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -variable "test" { - type = map(string) - default = { - "test" = "1" - } -} \ No newline at end of file diff --git a/internal/terraform/testdata/issue-9549/main.tf b/internal/terraform/testdata/issue-9549/main.tf deleted file mode 100644 index 5bf28c66d0c8..000000000000 --- a/internal/terraform/testdata/issue-9549/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -module "mod" { - source = "./mod" -} - -output "out" { - value = module.mod.base_config["base_template"] -} - -resource "template_instance" "root_template" { - foo = module.mod.base_config["base_template"] -} diff --git a/internal/terraform/testdata/issue-9549/mod/main.tf b/internal/terraform/testdata/issue-9549/mod/main.tf deleted file mode 100644 index aedf9f003ed7..000000000000 --- a/internal/terraform/testdata/issue-9549/mod/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "template_instance" "example" { - compute_value = "template text" - compute = "value" -} - -output "base_config" { - value = { - base_template = template_instance.example.value - } -} diff --git a/internal/terraform/testdata/nested-resource-count-plan/main.tf b/internal/terraform/testdata/nested-resource-count-plan/main.tf deleted file mode 100644 index f803fd1f6541..000000000000 --- a/internal/terraform/testdata/nested-resource-count-plan/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -resource "aws_instance" "foo" { - count = 2 -} - -resource "aws_instance" "bar" { - count = "${length(aws_instance.foo.*.id)}" -} - -resource "aws_instance" "baz" { - count = "${length(aws_instance.bar.*.id)}" -} diff --git a/internal/terraform/testdata/plan-block-nesting-group/block-nesting-group.tf b/internal/terraform/testdata/plan-block-nesting-group/block-nesting-group.tf deleted file mode 100644 index 9284072dc9c1..000000000000 --- a/internal/terraform/testdata/plan-block-nesting-group/block-nesting-group.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "test" "foo" { -} diff --git a/internal/terraform/testdata/plan-cbd-depends-datasource/main.tf b/internal/terraform/testdata/plan-cbd-depends-datasource/main.tf deleted file mode 100644 index b523204a8de4..000000000000 --- a/internal/terraform/testdata/plan-cbd-depends-datasource/main.tf +++ /dev/null @@ -1,14 +0,0 @@ -resource "aws_instance" "foo" { - count = 2 - num = "2" - computed = data.aws_vpc.bar[count.index].id - - lifecycle { - create_before_destroy = true - } -} - -data "aws_vpc" "bar" { - count = 2 - foo = count.index -} diff --git a/internal/terraform/testdata/plan-cbd-maintain-root/main.tf b/internal/terraform/testdata/plan-cbd-maintain-root/main.tf deleted file mode 100644 index 99c96b9eee42..000000000000 --- a/internal/terraform/testdata/plan-cbd-maintain-root/main.tf +++ /dev/null @@ -1,19 +0,0 @@ -resource "aws_instance" "foo" { - count = "2" - - lifecycle { - create_before_destroy = true - } -} - -resource "aws_instance" "bar" { - count = "2" - - lifecycle { - create_before_destroy = true - } -} - -output "out" { - value = "${aws_instance.foo.0.id}" -} diff --git a/internal/terraform/testdata/plan-cbd/main.tf b/internal/terraform/testdata/plan-cbd/main.tf deleted file mode 100644 index 83d173a53573..000000000000 --- a/internal/terraform/testdata/plan-cbd/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" { - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/plan-close-module-provider/main.tf b/internal/terraform/testdata/plan-close-module-provider/main.tf deleted file mode 100644 index ba846846994e..000000000000 --- a/internal/terraform/testdata/plan-close-module-provider/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "mod" { - source = "./mod" -} diff --git a/internal/terraform/testdata/plan-close-module-provider/mod/main.tf b/internal/terraform/testdata/plan-close-module-provider/mod/main.tf deleted file mode 100644 index 3ce1991f2025..000000000000 --- a/internal/terraform/testdata/plan-close-module-provider/mod/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - alias = "mod" -} - -resource "aws_instance" "bar" { - provider = "aws.mod" -} diff --git a/internal/terraform/testdata/plan-computed-attr-ref-type-mismatch/main.tf b/internal/terraform/testdata/plan-computed-attr-ref-type-mismatch/main.tf deleted file mode 100644 index 41761b2d5dbe..000000000000 --- a/internal/terraform/testdata/plan-computed-attr-ref-type-mismatch/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "aws_ami_list" "foo" { - # assume this has a computed attr called "ids" -} - -resource "aws_instance" "foo" { - # this is erroneously referencing the list of all ids. The value of this - # is unknown during plan, but we should still know that the unknown value - # is a list of strings and so catch this during plan. - ami = "${aws_ami_list.foo.ids}" -} diff --git a/internal/terraform/testdata/plan-computed-data-count/main.tf b/internal/terraform/testdata/plan-computed-data-count/main.tf deleted file mode 100644 index 2d014045271e..000000000000 --- a/internal/terraform/testdata/plan-computed-data-count/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" - compute = "foo" -} - -data "aws_vpc" "bar" { - count = 3 - foo = "${aws_instance.foo.foo}" -} diff --git a/internal/terraform/testdata/plan-computed-data-resource/main.tf b/internal/terraform/testdata/plan-computed-data-resource/main.tf deleted file mode 100644 index aff26ebde5e4..000000000000 --- a/internal/terraform/testdata/plan-computed-data-resource/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" - compute = "foo" -} - -data "aws_vpc" "bar" { - foo = "${aws_instance.foo.foo}" -} diff --git a/internal/terraform/testdata/plan-computed-in-function/main.tf b/internal/terraform/testdata/plan-computed-in-function/main.tf deleted file mode 100644 index 554394de6aae..000000000000 --- a/internal/terraform/testdata/plan-computed-in-function/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -data "aws_data_source" "foo" { - -} - -resource "aws_instance" "bar" { - attr = "${length(data.aws_data_source.foo.computed)}" -} diff --git a/internal/terraform/testdata/plan-computed-list/main.tf b/internal/terraform/testdata/plan-computed-list/main.tf deleted file mode 100644 index aeec6ba9350c..000000000000 --- a/internal/terraform/testdata/plan-computed-list/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" - compute = "list.#" -} - -resource "aws_instance" "bar" { - foo = aws_instance.foo.list.0 -} diff --git a/internal/terraform/testdata/plan-computed-multi-index/main.tf b/internal/terraform/testdata/plan-computed-multi-index/main.tf deleted file mode 100644 index 2d8a799d0587..000000000000 --- a/internal/terraform/testdata/plan-computed-multi-index/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" { - count = 2 - compute = "ip.#" -} - -resource "aws_instance" "bar" { - count = 1 - foo = "${aws_instance.foo.*.ip[count.index]}" -} diff --git a/internal/terraform/testdata/plan-computed-value-in-map/main.tf b/internal/terraform/testdata/plan-computed-value-in-map/main.tf deleted file mode 100644 index ef2cf08099ab..000000000000 --- a/internal/terraform/testdata/plan-computed-value-in-map/main.tf +++ /dev/null @@ -1,16 +0,0 @@ -resource "aws_computed_source" "intermediates" {} - -module "test_mod" { - source = "./mod" - - services = [ - { - "exists" = "true" - "elb" = "${aws_computed_source.intermediates.computed_read_only}" - }, - { - "otherexists" = " true" - "elb" = "${aws_computed_source.intermediates.computed_read_only}" - }, - ] -} diff --git a/internal/terraform/testdata/plan-computed-value-in-map/mod/main.tf b/internal/terraform/testdata/plan-computed-value-in-map/mod/main.tf deleted file mode 100644 index f6adccf40dab..000000000000 --- a/internal/terraform/testdata/plan-computed-value-in-map/mod/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "services" { - type = list(map(string)) -} - -resource "aws_instance" "inner2" { - looked_up = var.services[0]["elb"] -} - diff --git a/internal/terraform/testdata/plan-computed/main.tf b/internal/terraform/testdata/plan-computed/main.tf deleted file mode 100644 index 71809138b126..000000000000 --- a/internal/terraform/testdata/plan-computed/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" - compute = "foo" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.foo}" -} diff --git a/internal/terraform/testdata/plan-count-computed-module/child/main.tf b/internal/terraform/testdata/plan-count-computed-module/child/main.tf deleted file mode 100644 index f80d699d9c30..000000000000 --- a/internal/terraform/testdata/plan-count-computed-module/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -resource "aws_instance" "bar" { - count = "${var.value}" -} diff --git a/internal/terraform/testdata/plan-count-computed-module/main.tf b/internal/terraform/testdata/plan-count-computed-module/main.tf deleted file mode 100644 index c87beb5f896c..000000000000 --- a/internal/terraform/testdata/plan-count-computed-module/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - compute = "foo" -} - -module "child" { - source = "./child" - value = "${aws_instance.foo.foo}" -} diff --git a/internal/terraform/testdata/plan-count-computed/main.tf b/internal/terraform/testdata/plan-count-computed/main.tf deleted file mode 100644 index 8a029236b1e9..000000000000 --- a/internal/terraform/testdata/plan-count-computed/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" - compute = "foo" -} - -resource "aws_instance" "bar" { - count = "${aws_instance.foo.foo}" -} diff --git a/internal/terraform/testdata/plan-count-dec/main.tf b/internal/terraform/testdata/plan-count-dec/main.tf deleted file mode 100644 index 7837f58655f7..000000000000 --- a/internal/terraform/testdata/plan-count-dec/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - foo = "foo" -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/plan-count-inc/main.tf b/internal/terraform/testdata/plan-count-inc/main.tf deleted file mode 100644 index 3c7fdb9fff79..000000000000 --- a/internal/terraform/testdata/plan-count-inc/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - foo = "foo" - count = 3 -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/plan-count-index/main.tf b/internal/terraform/testdata/plan-count-index/main.tf deleted file mode 100644 index 9a0d1ebbcc2f..000000000000 --- a/internal/terraform/testdata/plan-count-index/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_instance" "foo" { - count = 2 - foo = "${count.index}" -} diff --git a/internal/terraform/testdata/plan-count-module-static-grandchild/child/child/main.tf b/internal/terraform/testdata/plan-count-module-static-grandchild/child/child/main.tf deleted file mode 100644 index 5b75831fdc1e..000000000000 --- a/internal/terraform/testdata/plan-count-module-static-grandchild/child/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -resource "aws_instance" "foo" { - count = "${var.value}" -} diff --git a/internal/terraform/testdata/plan-count-module-static-grandchild/child/main.tf b/internal/terraform/testdata/plan-count-module-static-grandchild/child/main.tf deleted file mode 100644 index 4dff927d51e9..000000000000 --- a/internal/terraform/testdata/plan-count-module-static-grandchild/child/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -variable "value" {} - -module "child" { - source = "./child" - value = "${var.value}" -} diff --git a/internal/terraform/testdata/plan-count-module-static-grandchild/main.tf b/internal/terraform/testdata/plan-count-module-static-grandchild/main.tf deleted file mode 100644 index b2c7ca66e7ae..000000000000 --- a/internal/terraform/testdata/plan-count-module-static-grandchild/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "foo" { - default = "3" -} - -module "child" { - source = "./child" - value = "${var.foo}" -} diff --git a/internal/terraform/testdata/plan-count-module-static/child/main.tf b/internal/terraform/testdata/plan-count-module-static/child/main.tf deleted file mode 100644 index 5b75831fdc1e..000000000000 --- a/internal/terraform/testdata/plan-count-module-static/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -resource "aws_instance" "foo" { - count = "${var.value}" -} diff --git a/internal/terraform/testdata/plan-count-module-static/main.tf b/internal/terraform/testdata/plan-count-module-static/main.tf deleted file mode 100644 index b2c7ca66e7ae..000000000000 --- a/internal/terraform/testdata/plan-count-module-static/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "foo" { - default = "3" -} - -module "child" { - source = "./child" - value = "${var.foo}" -} diff --git a/internal/terraform/testdata/plan-count-one-index/main.tf b/internal/terraform/testdata/plan-count-one-index/main.tf deleted file mode 100644 index 58d4acf7113f..000000000000 --- a/internal/terraform/testdata/plan-count-one-index/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - count = 1 - foo = "foo" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.0.foo}" -} diff --git a/internal/terraform/testdata/plan-count-splat-reference/main.tf b/internal/terraform/testdata/plan-count-splat-reference/main.tf deleted file mode 100644 index 76834e2555c8..000000000000 --- a/internal/terraform/testdata/plan-count-splat-reference/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" { - name = "foo ${count.index}" - count = 3 -} - -resource "aws_instance" "bar" { - foo_name = "${aws_instance.foo.*.name[count.index]}" - count = 3 -} diff --git a/internal/terraform/testdata/plan-count-var/main.tf b/internal/terraform/testdata/plan-count-var/main.tf deleted file mode 100644 index 8b8a04333e32..000000000000 --- a/internal/terraform/testdata/plan-count-var/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -variable "instance_count" {} - -resource "aws_instance" "foo" { - count = var.instance_count - foo = "foo" -} - -resource "aws_instance" "bar" { - foo = join(",", aws_instance.foo.*.foo) -} diff --git a/internal/terraform/testdata/plan-count-zero/main.tf b/internal/terraform/testdata/plan-count-zero/main.tf deleted file mode 100644 index 4845cbb0bf22..000000000000 --- a/internal/terraform/testdata/plan-count-zero/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - count = 0 - foo = "foo" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.*.foo}" -} diff --git a/internal/terraform/testdata/plan-count/main.tf b/internal/terraform/testdata/plan-count/main.tf deleted file mode 100644 index 276670ce4474..000000000000 --- a/internal/terraform/testdata/plan-count/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - count = 5 - foo = "foo" -} - -resource "aws_instance" "bar" { - foo = "${join(",", aws_instance.foo.*.foo)}" -} diff --git a/internal/terraform/testdata/plan-data-depends-on/main.tf b/internal/terraform/testdata/plan-data-depends-on/main.tf deleted file mode 100644 index c7332ad291e8..000000000000 --- a/internal/terraform/testdata/plan-data-depends-on/main.tf +++ /dev/null @@ -1,14 +0,0 @@ -resource "test_resource" "a" { -} - -data "test_data" "d" { - count = 1 - depends_on = [ - test_resource.a - ] -} - -resource "test_resource" "b" { - count = 1 - foo = data.test_data.d[count.index].compute -} diff --git a/internal/terraform/testdata/plan-data-resource-becomes-computed/main.tf b/internal/terraform/testdata/plan-data-resource-becomes-computed/main.tf deleted file mode 100644 index 3f07be3522b9..000000000000 --- a/internal/terraform/testdata/plan-data-resource-becomes-computed/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "aws_instance" "foo" { -} - -data "aws_data_source" "foo" { - foo = "${aws_instance.foo.computed}" -} diff --git a/internal/terraform/testdata/plan-destroy-interpolated-count/main.tf b/internal/terraform/testdata/plan-destroy-interpolated-count/main.tf deleted file mode 100644 index ac0dadbf81f8..000000000000 --- a/internal/terraform/testdata/plan-destroy-interpolated-count/main.tf +++ /dev/null @@ -1,20 +0,0 @@ -variable "list" { - default = ["1", "2"] -} - -resource "aws_instance" "a" { - count = length(var.list) -} - -locals { - ids = aws_instance.a[*].id -} - -module "empty" { - source = "./mod" - input = zipmap(var.list, local.ids) -} - -output "out" { - value = aws_instance.a[*].id -} diff --git a/internal/terraform/testdata/plan-destroy-interpolated-count/mod/main.tf b/internal/terraform/testdata/plan-destroy-interpolated-count/mod/main.tf deleted file mode 100644 index 682e0f0db76a..000000000000 --- a/internal/terraform/testdata/plan-destroy-interpolated-count/mod/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -variable "input" { -} diff --git a/internal/terraform/testdata/plan-destroy/main.tf b/internal/terraform/testdata/plan-destroy/main.tf deleted file mode 100644 index 1b6cdae67b0e..000000000000 --- a/internal/terraform/testdata/plan-destroy/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.num}" -} diff --git a/internal/terraform/testdata/plan-diffvar/main.tf b/internal/terraform/testdata/plan-diffvar/main.tf deleted file mode 100644 index eccc16ff2c39..000000000000 --- a/internal/terraform/testdata/plan-diffvar/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "3" -} - -resource "aws_instance" "bar" { - num = aws_instance.foo.num -} diff --git a/internal/terraform/testdata/plan-empty/main.tf b/internal/terraform/testdata/plan-empty/main.tf deleted file mode 100644 index 88002d078a1b..000000000000 --- a/internal/terraform/testdata/plan-empty/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" { -} - -resource "aws_instance" "bar" { -} diff --git a/internal/terraform/testdata/plan-escaped-var/main.tf b/internal/terraform/testdata/plan-escaped-var/main.tf deleted file mode 100644 index 5a017207ccf7..000000000000 --- a/internal/terraform/testdata/plan-escaped-var/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - foo = "bar-$${baz}" -} diff --git a/internal/terraform/testdata/plan-for-each-unknown-value/main.tf b/internal/terraform/testdata/plan-for-each-unknown-value/main.tf deleted file mode 100644 index 933ed5f4c322..000000000000 --- a/internal/terraform/testdata/plan-for-each-unknown-value/main.tf +++ /dev/null @@ -1,20 +0,0 @@ -# expressions with variable reference -variable "foo" { - type = string -} - -resource "aws_instance" "foo" { - for_each = toset( - [for i in range(0,3) : sha1("${i}${var.foo}")] - ) - foo = "foo" -} - -# referencing another resource, which means it has some unknown values in it -resource "aws_instance" "one" { - for_each = toset(["a", "b"]) -} - -resource "aws_instance" "two" { - for_each = aws_instance.one -} diff --git a/internal/terraform/testdata/plan-for-each/main.tf b/internal/terraform/testdata/plan-for-each/main.tf deleted file mode 100644 index 94572e20a47f..000000000000 --- a/internal/terraform/testdata/plan-for-each/main.tf +++ /dev/null @@ -1,35 +0,0 @@ -# maps -resource "aws_instance" "foo" { - for_each = { - a = "thing" - b = "another thing" - c = "yet another thing" - } - num = "3" -} - -# sets -resource "aws_instance" "bar" { - for_each = toset([]) -} -resource "aws_instance" "bar2" { - for_each = toset(["z", "y", "x"]) -} - -# an empty map should generate no resource -resource "aws_instance" "baz" { - for_each = {} -} - -# references -resource "aws_instance" "boo" { - foo = aws_instance.foo["a"].num -} - -resource "aws_instance" "bat" { - for_each = { - my_key = aws_instance.boo.foo - } - foo = each.value -} - diff --git a/internal/terraform/testdata/plan-good/main.tf b/internal/terraform/testdata/plan-good/main.tf deleted file mode 100644 index 1b6cdae67b0e..000000000000 --- a/internal/terraform/testdata/plan-good/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.num}" -} diff --git a/internal/terraform/testdata/plan-ignore-changes-in-map/ignore-changes-in-map.tf b/internal/terraform/testdata/plan-ignore-changes-in-map/ignore-changes-in-map.tf deleted file mode 100644 index 75adcac5c3d7..000000000000 --- a/internal/terraform/testdata/plan-ignore-changes-in-map/ignore-changes-in-map.tf +++ /dev/null @@ -1,13 +0,0 @@ - -resource "test_ignore_changes_map" "foo" { - tags = { - ignored = "from config" - other = "from config" - } - - lifecycle { - ignore_changes = [ - tags["ignored"], - ] - } -} diff --git a/internal/terraform/testdata/plan-ignore-changes-sensitive/ignore-changes-sensitive.tf b/internal/terraform/testdata/plan-ignore-changes-sensitive/ignore-changes-sensitive.tf deleted file mode 100644 index 1f6cc98acede..000000000000 --- a/internal/terraform/testdata/plan-ignore-changes-sensitive/ignore-changes-sensitive.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "foo" { - sensitive = true -} - -resource "aws_instance" "foo" { - ami = var.foo - - lifecycle { - ignore_changes = [ami] - } -} diff --git a/internal/terraform/testdata/plan-ignore-changes-wildcard/main.tf b/internal/terraform/testdata/plan-ignore-changes-wildcard/main.tf deleted file mode 100644 index ac594a9eb845..000000000000 --- a/internal/terraform/testdata/plan-ignore-changes-wildcard/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "foo" {} - -variable "bar" {} - -resource "aws_instance" "foo" { - ami = "${var.foo}" - instance = "${var.bar}" - foo = "bar" - - lifecycle { - ignore_changes = all - } -} diff --git a/internal/terraform/testdata/plan-ignore-changes-with-flatmaps/main.tf b/internal/terraform/testdata/plan-ignore-changes-with-flatmaps/main.tf deleted file mode 100644 index f61a3d42fc49..000000000000 --- a/internal/terraform/testdata/plan-ignore-changes-with-flatmaps/main.tf +++ /dev/null @@ -1,15 +0,0 @@ -resource "aws_instance" "foo" { - user_data = "x" - require_new = "yes" - - set = [{ - a = "1" - b = "2" - }] - - lst = ["j", "k"] - - lifecycle { - ignore_changes = ["require_new"] - } -} diff --git a/internal/terraform/testdata/plan-ignore-changes/main.tf b/internal/terraform/testdata/plan-ignore-changes/main.tf deleted file mode 100644 index ed17c634497d..000000000000 --- a/internal/terraform/testdata/plan-ignore-changes/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "foo" {} - -resource "aws_instance" "foo" { - ami = var.foo - - lifecycle { - ignore_changes = [ami] - } -} diff --git a/internal/terraform/testdata/plan-list-order/main.tf b/internal/terraform/testdata/plan-list-order/main.tf deleted file mode 100644 index 77db3d0597ea..000000000000 --- a/internal/terraform/testdata/plan-list-order/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "a" { - foo = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 20] -} - -resource "aws_instance" "b" { - foo = "${aws_instance.a.foo}" -} diff --git a/internal/terraform/testdata/plan-local-value-count/main.tf b/internal/terraform/testdata/plan-local-value-count/main.tf deleted file mode 100644 index 34aad96ad650..000000000000 --- a/internal/terraform/testdata/plan-local-value-count/main.tf +++ /dev/null @@ -1,8 +0,0 @@ - -locals { - count = 3 -} - -resource "test_resource" "foo" { - count = "${local.count}" -} diff --git a/internal/terraform/testdata/plan-module-cycle/child/main.tf b/internal/terraform/testdata/plan-module-cycle/child/main.tf deleted file mode 100644 index e2e60c1f086d..000000000000 --- a/internal/terraform/testdata/plan-module-cycle/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "in" {} - -output "out" { - value = "${var.in}" -} diff --git a/internal/terraform/testdata/plan-module-cycle/main.tf b/internal/terraform/testdata/plan-module-cycle/main.tf deleted file mode 100644 index e9c459721f53..000000000000 --- a/internal/terraform/testdata/plan-module-cycle/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -module "a" { - source = "./child" - in = "${aws_instance.b.id}" -} - -resource "aws_instance" "b" {} - -resource "aws_instance" "c" { - some_input = "${module.a.out}" - - depends_on = ["aws_instance.b"] -} diff --git a/internal/terraform/testdata/plan-module-deadlock/child/main.tf b/internal/terraform/testdata/plan-module-deadlock/child/main.tf deleted file mode 100644 index 2451bf0542ff..000000000000 --- a/internal/terraform/testdata/plan-module-deadlock/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - count = "${length("abc")}" - - lifecycle { - create_before_destroy = true - } -} diff --git a/internal/terraform/testdata/plan-module-deadlock/main.tf b/internal/terraform/testdata/plan-module-deadlock/main.tf deleted file mode 100644 index 1f95749fa7ea..000000000000 --- a/internal/terraform/testdata/plan-module-deadlock/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/plan-module-destroy-gh-1835/a/main.tf b/internal/terraform/testdata/plan-module-destroy-gh-1835/a/main.tf deleted file mode 100644 index ca44c757d015..000000000000 --- a/internal/terraform/testdata/plan-module-destroy-gh-1835/a/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "a" {} - -output "a_output" { - value = "${aws_instance.a.id}" -} diff --git a/internal/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf b/internal/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf deleted file mode 100644 index 3b0cc6664500..000000000000 --- a/internal/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "a_id" {} - -resource "aws_instance" "b" { - foo = "echo ${var.a_id}" -} diff --git a/internal/terraform/testdata/plan-module-destroy-gh-1835/main.tf b/internal/terraform/testdata/plan-module-destroy-gh-1835/main.tf deleted file mode 100644 index c2f72c45e329..000000000000 --- a/internal/terraform/testdata/plan-module-destroy-gh-1835/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "a_module" { - source = "./a" -} - -module "b_module" { - source = "./b" - a_id = "${module.a_module.a_output}" -} diff --git a/internal/terraform/testdata/plan-module-destroy-multivar/child/main.tf b/internal/terraform/testdata/plan-module-destroy-multivar/child/main.tf deleted file mode 100644 index 6a496f06f6a3..000000000000 --- a/internal/terraform/testdata/plan-module-destroy-multivar/child/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "instance_count" { - default = "1" -} - -resource "aws_instance" "foo" { - count = "${var.instance_count}" - bar = "bar" -} diff --git a/internal/terraform/testdata/plan-module-destroy-multivar/main.tf b/internal/terraform/testdata/plan-module-destroy-multivar/main.tf deleted file mode 100644 index 2f965b68cc11..000000000000 --- a/internal/terraform/testdata/plan-module-destroy-multivar/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child" { - source = "./child" - instance_count = "2" -} diff --git a/internal/terraform/testdata/plan-module-destroy/child/main.tf b/internal/terraform/testdata/plan-module-destroy/child/main.tf deleted file mode 100644 index 98f5ee87e9f0..000000000000 --- a/internal/terraform/testdata/plan-module-destroy/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/plan-module-destroy/main.tf b/internal/terraform/testdata/plan-module-destroy/main.tf deleted file mode 100644 index 428f89834db8..000000000000 --- a/internal/terraform/testdata/plan-module-destroy/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/plan-module-input-computed/child/main.tf b/internal/terraform/testdata/plan-module-input-computed/child/main.tf deleted file mode 100644 index c1a00c5a326d..000000000000 --- a/internal/terraform/testdata/plan-module-input-computed/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "input" {} - -resource "aws_instance" "foo" { - foo = "${var.input}" -} diff --git a/internal/terraform/testdata/plan-module-input-computed/main.tf b/internal/terraform/testdata/plan-module-input-computed/main.tf deleted file mode 100644 index 3a0576434fbf..000000000000 --- a/internal/terraform/testdata/plan-module-input-computed/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "child" { - input = "${aws_instance.bar.foo}" - source = "./child" -} - -resource "aws_instance" "bar" { - compute = "foo" -} diff --git a/internal/terraform/testdata/plan-module-input-var/child/main.tf b/internal/terraform/testdata/plan-module-input-var/child/main.tf deleted file mode 100644 index c1a00c5a326d..000000000000 --- a/internal/terraform/testdata/plan-module-input-var/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "input" {} - -resource "aws_instance" "foo" { - foo = "${var.input}" -} diff --git a/internal/terraform/testdata/plan-module-input-var/main.tf b/internal/terraform/testdata/plan-module-input-var/main.tf deleted file mode 100644 index 3fba315ee2f9..000000000000 --- a/internal/terraform/testdata/plan-module-input-var/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -variable "foo" {} - -module "child" { - input = "${var.foo}" - source = "./child" -} - -resource "aws_instance" "bar" { - foo = "2" -} diff --git a/internal/terraform/testdata/plan-module-input/child/main.tf b/internal/terraform/testdata/plan-module-input/child/main.tf deleted file mode 100644 index c1a00c5a326d..000000000000 --- a/internal/terraform/testdata/plan-module-input/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "input" {} - -resource "aws_instance" "foo" { - foo = "${var.input}" -} diff --git a/internal/terraform/testdata/plan-module-input/main.tf b/internal/terraform/testdata/plan-module-input/main.tf deleted file mode 100644 index 2ad8ec0ca105..000000000000 --- a/internal/terraform/testdata/plan-module-input/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "child" { - input = "42" - source = "./child" -} - -resource "aws_instance" "bar" { - foo = "2" -} diff --git a/internal/terraform/testdata/plan-module-map-literal/child/main.tf b/internal/terraform/testdata/plan-module-map-literal/child/main.tf deleted file mode 100644 index 912431922a7b..000000000000 --- a/internal/terraform/testdata/plan-module-map-literal/child/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "amap" { - type = map(string) -} - -variable "othermap" { - type = map(string) -} - -resource "aws_instance" "foo" { - tags = "${var.amap}" - meta = "${var.othermap}" -} diff --git a/internal/terraform/testdata/plan-module-map-literal/main.tf b/internal/terraform/testdata/plan-module-map-literal/main.tf deleted file mode 100644 index 90235ed7a2fb..000000000000 --- a/internal/terraform/testdata/plan-module-map-literal/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "child" { - source = "./child" - - amap = { - foo = "bar" - } - - othermap = {} -} diff --git a/internal/terraform/testdata/plan-module-multi-var/child/main.tf b/internal/terraform/testdata/plan-module-multi-var/child/main.tf deleted file mode 100644 index ad8dd6073e5f..000000000000 --- a/internal/terraform/testdata/plan-module-multi-var/child/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -variable "things" {} - -resource "aws_instance" "bar" { - baz = "baz" - count = 2 -} - -resource "aws_instance" "foo" { - foo = "${join(",",aws_instance.bar.*.baz)}" -} diff --git a/internal/terraform/testdata/plan-module-multi-var/main.tf b/internal/terraform/testdata/plan-module-multi-var/main.tf deleted file mode 100644 index 40c7618fe09b..000000000000 --- a/internal/terraform/testdata/plan-module-multi-var/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "parent" { - count = 2 -} - -module "child" { - source = "./child" - things = "${join(",", aws_instance.parent.*.id)}" -} - diff --git a/internal/terraform/testdata/plan-module-provider-defaults-var/child/main.tf b/internal/terraform/testdata/plan-module-provider-defaults-var/child/main.tf deleted file mode 100644 index 5ce4f55fe841..000000000000 --- a/internal/terraform/testdata/plan-module-provider-defaults-var/child/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -provider "aws" { - from = "child" - to = "child" -} - -resource "aws_instance" "foo" { - from = "child" -} diff --git a/internal/terraform/testdata/plan-module-provider-defaults-var/main.tf b/internal/terraform/testdata/plan-module-provider-defaults-var/main.tf deleted file mode 100644 index d3c34908bd1d..000000000000 --- a/internal/terraform/testdata/plan-module-provider-defaults-var/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -module "child" { - source = "./child" -} - -provider "aws" { - from = "${var.foo}" -} - -resource "aws_instance" "foo" {} - -variable "foo" {} diff --git a/internal/terraform/testdata/plan-module-provider-defaults/child/main.tf b/internal/terraform/testdata/plan-module-provider-defaults/child/main.tf deleted file mode 100644 index 5ce4f55fe841..000000000000 --- a/internal/terraform/testdata/plan-module-provider-defaults/child/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -provider "aws" { - from = "child" - to = "child" -} - -resource "aws_instance" "foo" { - from = "child" -} diff --git a/internal/terraform/testdata/plan-module-provider-defaults/main.tf b/internal/terraform/testdata/plan-module-provider-defaults/main.tf deleted file mode 100644 index 5b08577c6e45..000000000000 --- a/internal/terraform/testdata/plan-module-provider-defaults/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -module "child" { - source = "./child" -} - -provider "aws" { - from = "root" -} - -resource "aws_instance" "foo" { - from = "root" -} diff --git a/internal/terraform/testdata/plan-module-provider-inherit-deep/A/main.tf b/internal/terraform/testdata/plan-module-provider-inherit-deep/A/main.tf deleted file mode 100644 index efe683c318e6..000000000000 --- a/internal/terraform/testdata/plan-module-provider-inherit-deep/A/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "B" { - source = "../B" -} diff --git a/internal/terraform/testdata/plan-module-provider-inherit-deep/B/main.tf b/internal/terraform/testdata/plan-module-provider-inherit-deep/B/main.tf deleted file mode 100644 index 29cba7fc3b05..000000000000 --- a/internal/terraform/testdata/plan-module-provider-inherit-deep/B/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "C" { - source = "../C" -} diff --git a/internal/terraform/testdata/plan-module-provider-inherit-deep/C/main.tf b/internal/terraform/testdata/plan-module-provider-inherit-deep/C/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/plan-module-provider-inherit-deep/C/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/plan-module-provider-inherit-deep/main.tf b/internal/terraform/testdata/plan-module-provider-inherit-deep/main.tf deleted file mode 100644 index 12677b69b228..000000000000 --- a/internal/terraform/testdata/plan-module-provider-inherit-deep/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "A" { - source = "./A" -} - -provider "aws" { - from = "root" -} diff --git a/internal/terraform/testdata/plan-module-provider-inherit/child/main.tf b/internal/terraform/testdata/plan-module-provider-inherit/child/main.tf deleted file mode 100644 index 2e890bbc09c6..000000000000 --- a/internal/terraform/testdata/plan-module-provider-inherit/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - from = "child" -} diff --git a/internal/terraform/testdata/plan-module-provider-inherit/main.tf b/internal/terraform/testdata/plan-module-provider-inherit/main.tf deleted file mode 100644 index 5b08577c6e45..000000000000 --- a/internal/terraform/testdata/plan-module-provider-inherit/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -module "child" { - source = "./child" -} - -provider "aws" { - from = "root" -} - -resource "aws_instance" "foo" { - from = "root" -} diff --git a/internal/terraform/testdata/plan-module-provider-var/child/main.tf b/internal/terraform/testdata/plan-module-provider-var/child/main.tf deleted file mode 100644 index 599cb99db5b1..000000000000 --- a/internal/terraform/testdata/plan-module-provider-var/child/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "foo" {} - -provider "aws" { - value = "${var.foo}" -} - -resource "aws_instance" "test" { - value = "hello" -} diff --git a/internal/terraform/testdata/plan-module-provider-var/main.tf b/internal/terraform/testdata/plan-module-provider-var/main.tf deleted file mode 100644 index 43675f913c4c..000000000000 --- a/internal/terraform/testdata/plan-module-provider-var/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "foo" { - default = "bar" -} - -module "child" { - source = "./child" - foo = "${var.foo}" -} diff --git a/internal/terraform/testdata/plan-module-var-computed/child/main.tf b/internal/terraform/testdata/plan-module-var-computed/child/main.tf deleted file mode 100644 index 20a301330bc9..000000000000 --- a/internal/terraform/testdata/plan-module-var-computed/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - compute = "foo" -} - -output "num" { - value = "${aws_instance.foo.foo}" -} diff --git a/internal/terraform/testdata/plan-module-var-computed/main.tf b/internal/terraform/testdata/plan-module-var-computed/main.tf deleted file mode 100644 index b38f538a237d..000000000000 --- a/internal/terraform/testdata/plan-module-var-computed/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "bar" { - foo = "${module.child.num}" -} diff --git a/internal/terraform/testdata/plan-module-var-with-default-value/inner/main.tf b/internal/terraform/testdata/plan-module-var-with-default-value/inner/main.tf deleted file mode 100644 index 5b5cf6cdfc5e..000000000000 --- a/internal/terraform/testdata/plan-module-var-with-default-value/inner/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "im_a_string" { - type = string -} - -variable "service_region_ami" { - type = map(string) - default = { - us-east-1 = "ami-e4c9db8e" - } -} - -resource "null_resource" "noop" {} diff --git a/internal/terraform/testdata/plan-module-var-with-default-value/main.tf b/internal/terraform/testdata/plan-module-var-with-default-value/main.tf deleted file mode 100644 index 96b27418a03f..000000000000 --- a/internal/terraform/testdata/plan-module-var-with-default-value/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "null_resource" "noop" {} - -module "test" { - source = "./inner" - - im_a_string = "hello" -} diff --git a/internal/terraform/testdata/plan-module-var/child/main.tf b/internal/terraform/testdata/plan-module-var/child/main.tf deleted file mode 100644 index c7b1d283e3a0..000000000000 --- a/internal/terraform/testdata/plan-module-var/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -output "num" { - value = "${aws_instance.foo.num}" -} diff --git a/internal/terraform/testdata/plan-module-var/main.tf b/internal/terraform/testdata/plan-module-var/main.tf deleted file mode 100644 index 942bdba92697..000000000000 --- a/internal/terraform/testdata/plan-module-var/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "bar" { - foo = "${module.child.num}" -} diff --git a/internal/terraform/testdata/plan-module-variable-from-splat/main.tf b/internal/terraform/testdata/plan-module-variable-from-splat/main.tf deleted file mode 100644 index be900a3c4a7b..000000000000 --- a/internal/terraform/testdata/plan-module-variable-from-splat/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "mod1" { - source = "./mod" - param = ["this", "one", "works"] -} - -module "mod2" { - source = "./mod" - param = [module.mod1.out_from_splat[0]] -} diff --git a/internal/terraform/testdata/plan-module-variable-from-splat/mod/main.tf b/internal/terraform/testdata/plan-module-variable-from-splat/mod/main.tf deleted file mode 100644 index 66127d36b0ab..000000000000 --- a/internal/terraform/testdata/plan-module-variable-from-splat/mod/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "param" { - type = list(string) -} - -resource "aws_instance" "test" { - count = "2" - thing = "doesnt" -} - -output "out_from_splat" { - value = aws_instance.test.*.thing -} diff --git a/internal/terraform/testdata/plan-module-wrong-var-type-nested/inner/main.tf b/internal/terraform/testdata/plan-module-wrong-var-type-nested/inner/main.tf deleted file mode 100644 index dabe507fe57d..000000000000 --- a/internal/terraform/testdata/plan-module-wrong-var-type-nested/inner/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "inner_in" { - type = map(string) - default = { - us-west-1 = "ami-12345" - us-west-2 = "ami-67890" - } -} - -resource "null_resource" "inner_noop" {} - -output "inner_out" { - value = lookup(var.inner_in, "us-west-1") -} diff --git a/internal/terraform/testdata/plan-module-wrong-var-type-nested/main.tf b/internal/terraform/testdata/plan-module-wrong-var-type-nested/main.tf deleted file mode 100644 index 8f9fdcc56510..000000000000 --- a/internal/terraform/testdata/plan-module-wrong-var-type-nested/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "middle" { - source = "./middle" -} diff --git a/internal/terraform/testdata/plan-module-wrong-var-type-nested/middle/main.tf b/internal/terraform/testdata/plan-module-wrong-var-type-nested/middle/main.tf deleted file mode 100644 index eb989fe93608..000000000000 --- a/internal/terraform/testdata/plan-module-wrong-var-type-nested/middle/main.tf +++ /dev/null @@ -1,19 +0,0 @@ -variable "middle_in" { - type = map(string) - default = { - eu-west-1 = "ami-12345" - eu-west-2 = "ami-67890" - } -} - -module "inner" { - source = "../inner" - - inner_in = "hello" -} - -resource "null_resource" "middle_noop" {} - -output "middle_out" { - value = lookup(var.middle_in, "us-west-1") -} diff --git a/internal/terraform/testdata/plan-module-wrong-var-type/inner/main.tf b/internal/terraform/testdata/plan-module-wrong-var-type/inner/main.tf deleted file mode 100644 index 7782d1b844d4..000000000000 --- a/internal/terraform/testdata/plan-module-wrong-var-type/inner/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "map_in" { - type = map(string) - - default = { - us-west-1 = "ami-12345" - us-west-2 = "ami-67890" - } -} - -// We have to reference it so it isn't pruned -output "output" { - value = var.map_in -} diff --git a/internal/terraform/testdata/plan-module-wrong-var-type/main.tf b/internal/terraform/testdata/plan-module-wrong-var-type/main.tf deleted file mode 100644 index 5a39cd5d5aeb..000000000000 --- a/internal/terraform/testdata/plan-module-wrong-var-type/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -variable "input" { - type = string - default = "hello world" -} - -module "test" { - source = "./inner" - - map_in = var.input -} diff --git a/internal/terraform/testdata/plan-modules-expand/child/main.tf b/internal/terraform/testdata/plan-modules-expand/child/main.tf deleted file mode 100644 index 612478f79d5d..000000000000 --- a/internal/terraform/testdata/plan-modules-expand/child/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "foo" {} -variable "bar" {} - -resource "aws_instance" "foo" { - count = 2 - num = var.foo - bar = "baz" #var.bar -} - -output "out" { - value = aws_instance.foo[0].id -} diff --git a/internal/terraform/testdata/plan-modules-expand/main.tf b/internal/terraform/testdata/plan-modules-expand/main.tf deleted file mode 100644 index 023709596c6c..000000000000 --- a/internal/terraform/testdata/plan-modules-expand/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -locals { - val = 2 - bar = "baz" - m = { - "a" = "b" - } -} - -variable "myvar" { - default = "baz" -} - -module "count_child" { - count = local.val - foo = count.index - bar = var.myvar - source = "./child" -} - -module "for_each_child" { - for_each = aws_instance.foo - foo = 2 - bar = each.key - source = "./child" -} - -resource "aws_instance" "foo" { - for_each = local.m -} diff --git a/internal/terraform/testdata/plan-modules-remove-provisioners/main.tf b/internal/terraform/testdata/plan-modules-remove-provisioners/main.tf deleted file mode 100644 index ce9a38866464..000000000000 --- a/internal/terraform/testdata/plan-modules-remove-provisioners/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "top" {} - -# module "test" { -# source = "./parent" -# } diff --git a/internal/terraform/testdata/plan-modules-remove-provisioners/parent/child/main.tf b/internal/terraform/testdata/plan-modules-remove-provisioners/parent/child/main.tf deleted file mode 100644 index b626e60c824e..000000000000 --- a/internal/terraform/testdata/plan-modules-remove-provisioners/parent/child/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "aws_instance" "foo" { -} diff --git a/internal/terraform/testdata/plan-modules-remove-provisioners/parent/main.tf b/internal/terraform/testdata/plan-modules-remove-provisioners/parent/main.tf deleted file mode 100644 index fbc1aa09c1e3..000000000000 --- a/internal/terraform/testdata/plan-modules-remove-provisioners/parent/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "childone" { - source = "./child" -} - -module "childtwo" { - source = "./child" -} diff --git a/internal/terraform/testdata/plan-modules-remove/main.tf b/internal/terraform/testdata/plan-modules-remove/main.tf deleted file mode 100644 index 98f5ee87e9f0..000000000000 --- a/internal/terraform/testdata/plan-modules-remove/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/plan-modules/child/main.tf b/internal/terraform/testdata/plan-modules/child/main.tf deleted file mode 100644 index 98f5ee87e9f0..000000000000 --- a/internal/terraform/testdata/plan-modules/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/plan-modules/main.tf b/internal/terraform/testdata/plan-modules/main.tf deleted file mode 100644 index dcdb236a1d34..000000000000 --- a/internal/terraform/testdata/plan-modules/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.num}" -} diff --git a/internal/terraform/testdata/plan-orphan/main.tf b/internal/terraform/testdata/plan-orphan/main.tf deleted file mode 100644 index 98f5ee87e9f0..000000000000 --- a/internal/terraform/testdata/plan-orphan/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/plan-path-var/main.tf b/internal/terraform/testdata/plan-path-var/main.tf deleted file mode 100644 index 13012569882d..000000000000 --- a/internal/terraform/testdata/plan-path-var/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" { - cwd = "${path.cwd}/barpath" - module = "${path.module}/foopath" - root = "${path.root}/barpath" -} diff --git a/internal/terraform/testdata/plan-prevent-destroy-bad/main.tf b/internal/terraform/testdata/plan-prevent-destroy-bad/main.tf deleted file mode 100644 index 19077c1a6512..000000000000 --- a/internal/terraform/testdata/plan-prevent-destroy-bad/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - require_new = "yes" - - lifecycle { - prevent_destroy = true - } -} diff --git a/internal/terraform/testdata/plan-prevent-destroy-count-bad/main.tf b/internal/terraform/testdata/plan-prevent-destroy-count-bad/main.tf deleted file mode 100644 index 818f93e70203..000000000000 --- a/internal/terraform/testdata/plan-prevent-destroy-count-bad/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - count = "1" - current = "${count.index}" - - lifecycle { - prevent_destroy = true - } -} diff --git a/internal/terraform/testdata/plan-prevent-destroy-count-good/main.tf b/internal/terraform/testdata/plan-prevent-destroy-count-good/main.tf deleted file mode 100644 index b6b479078501..000000000000 --- a/internal/terraform/testdata/plan-prevent-destroy-count-good/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_instance" "foo" { - count = "1" - current = "${count.index}" -} diff --git a/internal/terraform/testdata/plan-prevent-destroy-good/main.tf b/internal/terraform/testdata/plan-prevent-destroy-good/main.tf deleted file mode 100644 index a88b9e3e101c..000000000000 --- a/internal/terraform/testdata/plan-prevent-destroy-good/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" { - lifecycle { - prevent_destroy = true - } -} diff --git a/internal/terraform/testdata/plan-provider/main.tf b/internal/terraform/testdata/plan-provider/main.tf deleted file mode 100644 index 8010f70aef9e..000000000000 --- a/internal/terraform/testdata/plan-provider/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "foo" {} - -provider "aws" { - foo = "${var.foo}" -} - -resource "aws_instance" "bar" {} diff --git a/internal/terraform/testdata/plan-provisioner-cycle/main.tf b/internal/terraform/testdata/plan-provisioner-cycle/main.tf deleted file mode 100644 index ed65c0918caa..000000000000 --- a/internal/terraform/testdata/plan-provisioner-cycle/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 - - provisioner "local-exec" { - command = "echo ${aws_instance.foo.0.id} ${aws_instance.foo.1.id} ${aws_instance.foo.2.id}" - } -} diff --git a/internal/terraform/testdata/plan-required-output/main.tf b/internal/terraform/testdata/plan-required-output/main.tf deleted file mode 100644 index 227b5c1530ce..000000000000 --- a/internal/terraform/testdata/plan-required-output/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_resource" "root" { - required = module.mod.object.id -} - -module "mod" { - source = "./mod" -} diff --git a/internal/terraform/testdata/plan-required-output/mod/main.tf b/internal/terraform/testdata/plan-required-output/mod/main.tf deleted file mode 100644 index 772f1645f3e8..000000000000 --- a/internal/terraform/testdata/plan-required-output/mod/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_resource" "for_output" { - required = "val" -} - -output "object" { - value = test_resource.for_output -} diff --git a/internal/terraform/testdata/plan-required-whole-mod/main.tf b/internal/terraform/testdata/plan-required-whole-mod/main.tf deleted file mode 100644 index 9deb3c5a162b..000000000000 --- a/internal/terraform/testdata/plan-required-whole-mod/main.tf +++ /dev/null @@ -1,17 +0,0 @@ -resource "test_resource" "root" { - required = local.object.id -} - -locals { - # This indirection is here to force the evaluator to produce the whole - # module object here rather than just fetching the single "object" output. - # This makes this fixture different than plan-required-output, which just - # accesses module.mod.object.id directly and thus visits a different - # codepath in the evaluator. - mod = module.mod - object = local.mod.object -} - -module "mod" { - source = "./mod" -} diff --git a/internal/terraform/testdata/plan-required-whole-mod/mod/main.tf b/internal/terraform/testdata/plan-required-whole-mod/mod/main.tf deleted file mode 100644 index 772f1645f3e8..000000000000 --- a/internal/terraform/testdata/plan-required-whole-mod/mod/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_resource" "for_output" { - required = "val" -} - -output "object" { - value = test_resource.for_output -} diff --git a/internal/terraform/testdata/plan-requires-replace/main.tf b/internal/terraform/testdata/plan-requires-replace/main.tf deleted file mode 100644 index 23cee56b3b81..000000000000 --- a/internal/terraform/testdata/plan-requires-replace/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "test_thing" "foo" { - v = "goodbye" -} diff --git a/internal/terraform/testdata/plan-self-ref-multi-all/main.tf b/internal/terraform/testdata/plan-self-ref-multi-all/main.tf deleted file mode 100644 index d3a9857f7bd3..000000000000 --- a/internal/terraform/testdata/plan-self-ref-multi-all/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_instance" "web" { - foo = "${aws_instance.web.*.foo}" - count = 4 -} diff --git a/internal/terraform/testdata/plan-self-ref-multi/main.tf b/internal/terraform/testdata/plan-self-ref-multi/main.tf deleted file mode 100644 index 5b27cac7150f..000000000000 --- a/internal/terraform/testdata/plan-self-ref-multi/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_instance" "web" { - foo = "${aws_instance.web.0.foo}" - count = 4 -} diff --git a/internal/terraform/testdata/plan-self-ref/main.tf b/internal/terraform/testdata/plan-self-ref/main.tf deleted file mode 100644 index f2bf91d77bf9..000000000000 --- a/internal/terraform/testdata/plan-self-ref/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "web" { - foo = "${aws_instance.web.foo}" -} diff --git a/internal/terraform/testdata/plan-shadow-uuid/main.tf b/internal/terraform/testdata/plan-shadow-uuid/main.tf deleted file mode 100644 index 2b6ec72a0015..000000000000 --- a/internal/terraform/testdata/plan-shadow-uuid/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "test" { - value = "${uuid()}" -} diff --git a/internal/terraform/testdata/plan-taint-ignore-changes/main.tf b/internal/terraform/testdata/plan-taint-ignore-changes/main.tf deleted file mode 100644 index ff95d6596dc2..000000000000 --- a/internal/terraform/testdata/plan-taint-ignore-changes/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - vars = "foo" - - lifecycle { - ignore_changes = ["vars"] - } -} diff --git a/internal/terraform/testdata/plan-taint-interpolated-count/main.tf b/internal/terraform/testdata/plan-taint-interpolated-count/main.tf deleted file mode 100644 index 91d8b65c81c5..000000000000 --- a/internal/terraform/testdata/plan-taint-interpolated-count/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "instance_count" { - default = 3 -} - -resource "aws_instance" "foo" { - count = "${var.instance_count}" -} diff --git a/internal/terraform/testdata/plan-taint/main.tf b/internal/terraform/testdata/plan-taint/main.tf deleted file mode 100644 index 1b6cdae67b0e..000000000000 --- a/internal/terraform/testdata/plan-taint/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.num}" -} diff --git a/internal/terraform/testdata/plan-targeted-cross-module/A/main.tf b/internal/terraform/testdata/plan-targeted-cross-module/A/main.tf deleted file mode 100644 index 4c014aa22343..000000000000 --- a/internal/terraform/testdata/plan-targeted-cross-module/A/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - foo = "bar" -} - -output "value" { - value = "${aws_instance.foo.id}" -} diff --git a/internal/terraform/testdata/plan-targeted-cross-module/B/main.tf b/internal/terraform/testdata/plan-targeted-cross-module/B/main.tf deleted file mode 100644 index c3aeb7b76e39..000000000000 --- a/internal/terraform/testdata/plan-targeted-cross-module/B/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "input" {} - -resource "aws_instance" "bar" { - foo = "${var.input}" -} diff --git a/internal/terraform/testdata/plan-targeted-cross-module/main.tf b/internal/terraform/testdata/plan-targeted-cross-module/main.tf deleted file mode 100644 index e6a83b2a02b9..000000000000 --- a/internal/terraform/testdata/plan-targeted-cross-module/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "A" { - source = "./A" -} - -module "B" { - source = "./B" - input = "${module.A.value}" -} diff --git a/internal/terraform/testdata/plan-targeted-module-orphan/main.tf b/internal/terraform/testdata/plan-targeted-module-orphan/main.tf deleted file mode 100644 index 2b33fedaed10..000000000000 --- a/internal/terraform/testdata/plan-targeted-module-orphan/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -# Once opon a time, there was a child module here -/* -module "child" { - source = "./child" -} -*/ diff --git a/internal/terraform/testdata/plan-targeted-module-untargeted-variable/child/main.tf b/internal/terraform/testdata/plan-targeted-module-untargeted-variable/child/main.tf deleted file mode 100644 index f7b424b8415f..000000000000 --- a/internal/terraform/testdata/plan-targeted-module-untargeted-variable/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "id" {} - -resource "aws_instance" "mod" { - value = "${var.id}" -} diff --git a/internal/terraform/testdata/plan-targeted-module-untargeted-variable/main.tf b/internal/terraform/testdata/plan-targeted-module-untargeted-variable/main.tf deleted file mode 100644 index 90e44dceba60..000000000000 --- a/internal/terraform/testdata/plan-targeted-module-untargeted-variable/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "blue" { } -resource "aws_instance" "green" { } - -module "blue_mod" { - source = "./child" - id = "${aws_instance.blue.id}" -} - -module "green_mod" { - source = "./child" - id = "${aws_instance.green.id}" -} diff --git a/internal/terraform/testdata/plan-targeted-module-with-provider/child1/main.tf b/internal/terraform/testdata/plan-targeted-module-with-provider/child1/main.tf deleted file mode 100644 index c9aaff5f724d..000000000000 --- a/internal/terraform/testdata/plan-targeted-module-with-provider/child1/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "key" {} - -provider "null" { - key = "${var.key}" -} - -resource "null_resource" "foo" {} diff --git a/internal/terraform/testdata/plan-targeted-module-with-provider/child2/main.tf b/internal/terraform/testdata/plan-targeted-module-with-provider/child2/main.tf deleted file mode 100644 index c9aaff5f724d..000000000000 --- a/internal/terraform/testdata/plan-targeted-module-with-provider/child2/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "key" {} - -provider "null" { - key = "${var.key}" -} - -resource "null_resource" "foo" {} diff --git a/internal/terraform/testdata/plan-targeted-module-with-provider/main.tf b/internal/terraform/testdata/plan-targeted-module-with-provider/main.tf deleted file mode 100644 index 0fa7bcffdd7d..000000000000 --- a/internal/terraform/testdata/plan-targeted-module-with-provider/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "child1" { - source = "./child1" - key = "value" -} - -module "child2" { - source = "./child2" - key = "value" -} diff --git a/internal/terraform/testdata/plan-targeted-orphan/main.tf b/internal/terraform/testdata/plan-targeted-orphan/main.tf deleted file mode 100644 index f2020858b148..000000000000 --- a/internal/terraform/testdata/plan-targeted-orphan/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -# This resource was previously "created" and the fixture represents -# it being destroyed subsequently - -/*resource "aws_instance" "orphan" {*/ - /*foo = "bar"*/ -/*}*/ diff --git a/internal/terraform/testdata/plan-targeted-over-ten/main.tf b/internal/terraform/testdata/plan-targeted-over-ten/main.tf deleted file mode 100644 index 1c7bc8769e07..000000000000 --- a/internal/terraform/testdata/plan-targeted-over-ten/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - count = 13 -} diff --git a/internal/terraform/testdata/plan-targeted/main.tf b/internal/terraform/testdata/plan-targeted/main.tf deleted file mode 100644 index ab00a845fa58..000000000000 --- a/internal/terraform/testdata/plan-targeted/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = aws_instance.foo.num -} - -module "mod" { - source = "./mod" - count = 1 -} diff --git a/internal/terraform/testdata/plan-targeted/mod/main.tf b/internal/terraform/testdata/plan-targeted/mod/main.tf deleted file mode 100644 index 98f5ee87e9f0..000000000000 --- a/internal/terraform/testdata/plan-targeted/mod/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} diff --git a/internal/terraform/testdata/plan-untargeted-resource-output/main.tf b/internal/terraform/testdata/plan-untargeted-resource-output/main.tf deleted file mode 100644 index 9d4a1c882d18..000000000000 --- a/internal/terraform/testdata/plan-untargeted-resource-output/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "mod" { - source = "./mod" -} - - -resource "aws_instance" "c" { - name = "${module.mod.output}" -} diff --git a/internal/terraform/testdata/plan-untargeted-resource-output/mod/main.tf b/internal/terraform/testdata/plan-untargeted-resource-output/mod/main.tf deleted file mode 100644 index dd6d791cba4f..000000000000 --- a/internal/terraform/testdata/plan-untargeted-resource-output/mod/main.tf +++ /dev/null @@ -1,15 +0,0 @@ -locals { - one = 1 -} - -resource "aws_instance" "a" { - count = "${local.one}" -} - -resource "aws_instance" "b" { - count = "${local.one}" -} - -output "output" { - value = "${join("", coalescelist(aws_instance.a.*.id, aws_instance.b.*.id))}" -} diff --git a/internal/terraform/testdata/plan-var-list-err/main.tf b/internal/terraform/testdata/plan-var-list-err/main.tf deleted file mode 100644 index 6303064c9f64..000000000000 --- a/internal/terraform/testdata/plan-var-list-err/main.tf +++ /dev/null @@ -1,16 +0,0 @@ -provider "aws" { - access_key = "a" - secret_key = "b" - region = "us-east-1" -} - -resource "aws_instance" "foo" { - ami = "ami-foo" - instance_type = "t2.micro" - security_groups = "${aws_security_group.foo.name}" -} - -resource "aws_security_group" "foo" { - name = "foobar" - description = "foobar" -} diff --git a/internal/terraform/testdata/plan-variable-sensitivity-module/child/main.tf b/internal/terraform/testdata/plan-variable-sensitivity-module/child/main.tf deleted file mode 100644 index e34751aa9b65..000000000000 --- a/internal/terraform/testdata/plan-variable-sensitivity-module/child/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "foo" { - type = string -} - -// "bar" is defined as sensitive by both the parent and the child -variable "bar" { - sensitive = true -} - -resource "aws_instance" "foo" { - foo = var.foo - value = var.bar -} diff --git a/internal/terraform/testdata/plan-variable-sensitivity-module/main.tf b/internal/terraform/testdata/plan-variable-sensitivity-module/main.tf deleted file mode 100644 index 69bdbb4cbed8..000000000000 --- a/internal/terraform/testdata/plan-variable-sensitivity-module/main.tf +++ /dev/null @@ -1,14 +0,0 @@ -variable "sensitive_var" { - default = "foo" - sensitive = true -} - -variable "another_var" { - sensitive = true -} - -module "child" { - source = "./child" - foo = var.sensitive_var - bar = var.another_var -} diff --git a/internal/terraform/testdata/plan-variable-sensitivity/main.tf b/internal/terraform/testdata/plan-variable-sensitivity/main.tf deleted file mode 100644 index 00a4b1ef9ee3..000000000000 --- a/internal/terraform/testdata/plan-variable-sensitivity/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "sensitive_var" { - default = "foo" - sensitive = true -} - -resource "aws_instance" "foo" { - foo = var.sensitive_var -} \ No newline at end of file diff --git a/internal/terraform/testdata/provider-meta-data-set/main.tf b/internal/terraform/testdata/provider-meta-data-set/main.tf deleted file mode 100644 index ef7acd957b38..000000000000 --- a/internal/terraform/testdata/provider-meta-data-set/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -data "test_data_source" "foo" { - foo = "bar" -} - -terraform { - provider_meta "test" { - baz = "quux" - } -} - -module "my_module" { - source = "./my-module" -} diff --git a/internal/terraform/testdata/provider-meta-data-set/my-module/main.tf b/internal/terraform/testdata/provider-meta-data-set/my-module/main.tf deleted file mode 100644 index 61a97706935f..000000000000 --- a/internal/terraform/testdata/provider-meta-data-set/my-module/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -data "test_file" "foo" { - id = "bar" -} - -terraform { - provider_meta "test" { - baz = "quux-submodule" - } -} diff --git a/internal/terraform/testdata/provider-meta-data-unset/main.tf b/internal/terraform/testdata/provider-meta-data-unset/main.tf deleted file mode 100644 index c4091f37b13b..000000000000 --- a/internal/terraform/testdata/provider-meta-data-unset/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -data "test_data_source" "foo" { - foo = "bar" -} - -module "my_module" { - source = "./my-module" -} diff --git a/internal/terraform/testdata/provider-meta-data-unset/my-module/main.tf b/internal/terraform/testdata/provider-meta-data-unset/my-module/main.tf deleted file mode 100644 index 7e0ea46b6b7d..000000000000 --- a/internal/terraform/testdata/provider-meta-data-unset/my-module/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "test_file" "foo" { - id = "bar" -} diff --git a/internal/terraform/testdata/provider-meta-set/main.tf b/internal/terraform/testdata/provider-meta-set/main.tf deleted file mode 100644 index a3e9f804bee8..000000000000 --- a/internal/terraform/testdata/provider-meta-set/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -resource "test_instance" "bar" { - foo = "bar" -} - -terraform { - provider_meta "test" { - baz = "quux" - } -} - -module "my_module" { - source = "./my-module" -} diff --git a/internal/terraform/testdata/provider-meta-set/my-module/main.tf b/internal/terraform/testdata/provider-meta-set/my-module/main.tf deleted file mode 100644 index 2a89dd51f34b..000000000000 --- a/internal/terraform/testdata/provider-meta-set/my-module/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "test_resource" "bar" { - value = "bar" -} - -terraform { - provider_meta "test" { - baz = "quux-submodule" - } -} diff --git a/internal/terraform/testdata/provider-meta-unset/main.tf b/internal/terraform/testdata/provider-meta-unset/main.tf deleted file mode 100644 index 0ae85d39fa27..000000000000 --- a/internal/terraform/testdata/provider-meta-unset/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_instance" "bar" { - foo = "bar" -} - -module "my_module" { - source = "./my-module" -} diff --git a/internal/terraform/testdata/provider-meta-unset/my-module/main.tf b/internal/terraform/testdata/provider-meta-unset/my-module/main.tf deleted file mode 100644 index ec9701f95606..000000000000 --- a/internal/terraform/testdata/provider-meta-unset/my-module/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "test_resource" "bar" { - value = "bar" -} diff --git a/internal/terraform/testdata/provider-with-locals/main.tf b/internal/terraform/testdata/provider-with-locals/main.tf deleted file mode 100644 index 3a7db0f87727..000000000000 --- a/internal/terraform/testdata/provider-with-locals/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -provider "aws" { - region = "${local.foo}" -} - -locals { - foo = "bar" -} - -resource "aws_instance" "foo" { - value = "${local.foo}" -} diff --git a/internal/terraform/testdata/refresh-basic/main.tf b/internal/terraform/testdata/refresh-basic/main.tf deleted file mode 100644 index 64cbf6236650..000000000000 --- a/internal/terraform/testdata/refresh-basic/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "web" {} diff --git a/internal/terraform/testdata/refresh-data-count/refresh-data-count.tf b/internal/terraform/testdata/refresh-data-count/refresh-data-count.tf deleted file mode 100644 index ccabdb2c689c..000000000000 --- a/internal/terraform/testdata/refresh-data-count/refresh-data-count.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "test" "foo" { -} - -data "test" "foo" { - count = length(test.foo.things) -} diff --git a/internal/terraform/testdata/refresh-data-module-var/child/main.tf b/internal/terraform/testdata/refresh-data-module-var/child/main.tf deleted file mode 100644 index 64d21beda045..000000000000 --- a/internal/terraform/testdata/refresh-data-module-var/child/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -variable "key" {} - -data "aws_data_source" "foo" { - id = "${var.key}" -} - diff --git a/internal/terraform/testdata/refresh-data-module-var/main.tf b/internal/terraform/testdata/refresh-data-module-var/main.tf deleted file mode 100644 index a371831bd231..000000000000 --- a/internal/terraform/testdata/refresh-data-module-var/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "A" { - foo = "bar" -} - -module "child" { - source = "./child" - key = "${aws_instance.A.id}" -} diff --git a/internal/terraform/testdata/refresh-data-ref-data/main.tf b/internal/terraform/testdata/refresh-data-ref-data/main.tf deleted file mode 100644 index 5512be233216..000000000000 --- a/internal/terraform/testdata/refresh-data-ref-data/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -data "null_data_source" "foo" { - foo = "yes" -} - -data "null_data_source" "bar" { - bar = "${data.null_data_source.foo.foo}" -} diff --git a/internal/terraform/testdata/refresh-data-resource-basic/main.tf b/internal/terraform/testdata/refresh-data-resource-basic/main.tf deleted file mode 100644 index cb16d9f34140..000000000000 --- a/internal/terraform/testdata/refresh-data-resource-basic/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -data "null_data_source" "testing" { - inputs = { - test = "yes" - } -} diff --git a/internal/terraform/testdata/refresh-dynamic/main.tf b/internal/terraform/testdata/refresh-dynamic/main.tf deleted file mode 100644 index 5c857a2f459e..000000000000 --- a/internal/terraform/testdata/refresh-dynamic/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "test_instance" "foo" { - dynamic = {} -} diff --git a/internal/terraform/testdata/refresh-module-computed-var/child/main.tf b/internal/terraform/testdata/refresh-module-computed-var/child/main.tf deleted file mode 100644 index 38260d6373c5..000000000000 --- a/internal/terraform/testdata/refresh-module-computed-var/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -output "value" { - value = "${var.value}" -} diff --git a/internal/terraform/testdata/refresh-module-computed-var/main.tf b/internal/terraform/testdata/refresh-module-computed-var/main.tf deleted file mode 100644 index a8573327b154..000000000000 --- a/internal/terraform/testdata/refresh-module-computed-var/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "child" { - source = "./child" - value = "${join(" ", aws_instance.test.*.id)}" -} - -resource "aws_instance" "test" { - value = "yes" -} diff --git a/internal/terraform/testdata/refresh-module-input-computed-output/child/main.tf b/internal/terraform/testdata/refresh-module-input-computed-output/child/main.tf deleted file mode 100644 index ebc1e3ffc142..000000000000 --- a/internal/terraform/testdata/refresh-module-input-computed-output/child/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "input" { - type = string -} - -resource "aws_instance" "foo" { - foo = var.input -} - -output "foo" { - value = aws_instance.foo.foo -} diff --git a/internal/terraform/testdata/refresh-module-input-computed-output/main.tf b/internal/terraform/testdata/refresh-module-input-computed-output/main.tf deleted file mode 100644 index 5827a5da25e2..000000000000 --- a/internal/terraform/testdata/refresh-module-input-computed-output/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "child" { - input = aws_instance.bar.foo - source = "./child" -} - -resource "aws_instance" "bar" { - compute = "foo" -} diff --git a/internal/terraform/testdata/refresh-module-orphan/child/grandchild/main.tf b/internal/terraform/testdata/refresh-module-orphan/child/grandchild/main.tf deleted file mode 100644 index 942e93dbc485..000000000000 --- a/internal/terraform/testdata/refresh-module-orphan/child/grandchild/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "baz" {} - -output "id" { value = "${aws_instance.baz.id}" } diff --git a/internal/terraform/testdata/refresh-module-orphan/child/main.tf b/internal/terraform/testdata/refresh-module-orphan/child/main.tf deleted file mode 100644 index 7c3fc842f34d..000000000000 --- a/internal/terraform/testdata/refresh-module-orphan/child/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -module "grandchild" { - source = "./grandchild" -} - -resource "aws_instance" "bar" { - grandchildid = "${module.grandchild.id}" -} - -output "id" { value = "${aws_instance.bar.id}" } -output "grandchild_id" { value = "${module.grandchild.id}" } diff --git a/internal/terraform/testdata/refresh-module-orphan/main.tf b/internal/terraform/testdata/refresh-module-orphan/main.tf deleted file mode 100644 index 244374d9d162..000000000000 --- a/internal/terraform/testdata/refresh-module-orphan/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -/* -module "child" { - source = "./child" -} - -resource "aws_instance" "bar" { - childid = "${module.child.id}" - grandchildid = "${module.child.grandchild_id}" -} -*/ diff --git a/internal/terraform/testdata/refresh-module-var-module/bar/main.tf b/internal/terraform/testdata/refresh-module-var-module/bar/main.tf deleted file mode 100644 index 46ea37f14f29..000000000000 --- a/internal/terraform/testdata/refresh-module-var-module/bar/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -variable "value" {} - -resource "aws_instance" "bar" {} diff --git a/internal/terraform/testdata/refresh-module-var-module/foo/main.tf b/internal/terraform/testdata/refresh-module-var-module/foo/main.tf deleted file mode 100644 index 2ee798058d3f..000000000000 --- a/internal/terraform/testdata/refresh-module-var-module/foo/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "output" { - value = "${aws_instance.foo.foo}" -} - -resource "aws_instance" "foo" { - compute = "foo" -} diff --git a/internal/terraform/testdata/refresh-module-var-module/main.tf b/internal/terraform/testdata/refresh-module-var-module/main.tf deleted file mode 100644 index 76775e3e6d04..000000000000 --- a/internal/terraform/testdata/refresh-module-var-module/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "foo" { - source = "./foo" -} - -module "bar" { - source = "./bar" - value = "${module.foo.output}" -} diff --git a/internal/terraform/testdata/refresh-modules/child/main.tf b/internal/terraform/testdata/refresh-modules/child/main.tf deleted file mode 100644 index 64cbf6236650..000000000000 --- a/internal/terraform/testdata/refresh-modules/child/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "web" {} diff --git a/internal/terraform/testdata/refresh-modules/main.tf b/internal/terraform/testdata/refresh-modules/main.tf deleted file mode 100644 index 6b4520ec0f47..000000000000 --- a/internal/terraform/testdata/refresh-modules/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "web" {} diff --git a/internal/terraform/testdata/refresh-no-state/main.tf b/internal/terraform/testdata/refresh-no-state/main.tf deleted file mode 100644 index 76c0f87671c2..000000000000 --- a/internal/terraform/testdata/refresh-no-state/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "foo" { - value = "" -} diff --git a/internal/terraform/testdata/refresh-output-partial/main.tf b/internal/terraform/testdata/refresh-output-partial/main.tf deleted file mode 100644 index 36ce289a34b7..000000000000 --- a/internal/terraform/testdata/refresh-output-partial/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" {} - -resource "aws_instance" "web" {} - -output "foo" { - value = "${aws_instance.web.foo}" -} diff --git a/internal/terraform/testdata/refresh-output/main.tf b/internal/terraform/testdata/refresh-output/main.tf deleted file mode 100644 index 42a01bd5ca19..000000000000 --- a/internal/terraform/testdata/refresh-output/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "web" {} - -output "foo" { - value = "${aws_instance.web.foo}" -} diff --git a/internal/terraform/testdata/refresh-schema-upgrade/main.tf b/internal/terraform/testdata/refresh-schema-upgrade/main.tf deleted file mode 100644 index ee0590e3c2d2..000000000000 --- a/internal/terraform/testdata/refresh-schema-upgrade/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "test_thing" "bar" { -} diff --git a/internal/terraform/testdata/refresh-targeted-count/main.tf b/internal/terraform/testdata/refresh-targeted-count/main.tf deleted file mode 100644 index f564b629c1ac..000000000000 --- a/internal/terraform/testdata/refresh-targeted-count/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_vpc" "metoo" {} -resource "aws_instance" "notme" { } -resource "aws_instance" "me" { - vpc_id = "${aws_vpc.metoo.id}" - count = 3 -} -resource "aws_elb" "meneither" { - instances = ["${aws_instance.me.*.id}"] -} diff --git a/internal/terraform/testdata/refresh-targeted/main.tf b/internal/terraform/testdata/refresh-targeted/main.tf deleted file mode 100644 index 3a76184647fc..000000000000 --- a/internal/terraform/testdata/refresh-targeted/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_vpc" "metoo" {} -resource "aws_instance" "notme" { } -resource "aws_instance" "me" { - vpc_id = "${aws_vpc.metoo.id}" -} -resource "aws_elb" "meneither" { - instances = ["${aws_instance.me.*.id}"] -} diff --git a/internal/terraform/testdata/refresh-unknown-provider/main.tf b/internal/terraform/testdata/refresh-unknown-provider/main.tf deleted file mode 100644 index 8a29fddd0863..000000000000 --- a/internal/terraform/testdata/refresh-unknown-provider/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "unknown_instance" "foo" { - num = "2" - compute = "foo" -} diff --git a/internal/terraform/testdata/refresh-vars/main.tf b/internal/terraform/testdata/refresh-vars/main.tf deleted file mode 100644 index 86cd6ace3723..000000000000 --- a/internal/terraform/testdata/refresh-vars/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "web" {} - -resource "aws_instance" "db" { - ami = "${aws_instance.web.id}" -} diff --git a/internal/terraform/testdata/static-validate-refs/static-validate-refs.tf b/internal/terraform/testdata/static-validate-refs/static-validate-refs.tf deleted file mode 100644 index 3667a4e11f35..000000000000 --- a/internal/terraform/testdata/static-validate-refs/static-validate-refs.tf +++ /dev/null @@ -1,23 +0,0 @@ -terraform { - required_providers { - boop = { - source = "foobar/beep" # intentional mismatch between local name and type - } - } -} - -resource "aws_instance" "no_count" { -} - -resource "aws_instance" "count" { - count = 1 -} - -resource "boop_instance" "yep" { -} - -resource "boop_whatever" "nope" { -} - -data "beep" "boop" { -} diff --git a/internal/terraform/testdata/transform-cbd-destroy-edge-both-count/main.tf b/internal/terraform/testdata/transform-cbd-destroy-edge-both-count/main.tf deleted file mode 100644 index c19e78eaa2f3..000000000000 --- a/internal/terraform/testdata/transform-cbd-destroy-edge-both-count/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -resource "test_object" "A" { - count = 2 - lifecycle { - create_before_destroy = true - } -} - -resource "test_object" "B" { - count = 2 - test_string = test_object.A[*].test_string[count.index] -} diff --git a/internal/terraform/testdata/transform-cbd-destroy-edge-count/main.tf b/internal/terraform/testdata/transform-cbd-destroy-edge-count/main.tf deleted file mode 100644 index 775900fcdd82..000000000000 --- a/internal/terraform/testdata/transform-cbd-destroy-edge-count/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "test_object" "A" { - lifecycle { - create_before_destroy = true - } -} - -resource "test_object" "B" { - count = 2 - test_string = test_object.A.test_string -} diff --git a/internal/terraform/testdata/transform-config-mode-data/main.tf b/internal/terraform/testdata/transform-config-mode-data/main.tf deleted file mode 100644 index 3c3e7e50d553..000000000000 --- a/internal/terraform/testdata/transform-config-mode-data/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "aws_ami" "foo" {} - -resource "aws_instance" "web" {} diff --git a/internal/terraform/testdata/transform-destroy-cbd-edge-basic/main.tf b/internal/terraform/testdata/transform-destroy-cbd-edge-basic/main.tf deleted file mode 100644 index a17d8b4e35c0..000000000000 --- a/internal/terraform/testdata/transform-destroy-cbd-edge-basic/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "test_object" "A" { - lifecycle { - create_before_destroy = true - } -} - -resource "test_object" "B" { - test_string = "${test_object.A.id}" -} diff --git a/internal/terraform/testdata/transform-destroy-cbd-edge-multi/main.tf b/internal/terraform/testdata/transform-destroy-cbd-edge-multi/main.tf deleted file mode 100644 index 964bc44cfd87..000000000000 --- a/internal/terraform/testdata/transform-destroy-cbd-edge-multi/main.tf +++ /dev/null @@ -1,15 +0,0 @@ -resource "test_object" "A" { - lifecycle { - create_before_destroy = true - } -} - -resource "test_object" "B" { - lifecycle { - create_before_destroy = true - } -} - -resource "test_object" "C" { - test_string = "${test_object.A.id}-${test_object.B.id}" -} diff --git a/internal/terraform/testdata/transform-destroy-edge-basic/main.tf b/internal/terraform/testdata/transform-destroy-edge-basic/main.tf deleted file mode 100644 index 8afeda4feed2..000000000000 --- a/internal/terraform/testdata/transform-destroy-edge-basic/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "test_object" "A" {} - -resource "test_object" "B" { - test_string = "${test_object.A.test_string}" -} diff --git a/internal/terraform/testdata/transform-destroy-edge-module-only/child/main.tf b/internal/terraform/testdata/transform-destroy-edge-module-only/child/main.tf deleted file mode 100644 index 242bb3359041..000000000000 --- a/internal/terraform/testdata/transform-destroy-edge-module-only/child/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "test_object" "a" {} - -resource "test_object" "b" { - test_string = "${test_object.a.test_string}" -} - -resource "test_object" "c" { - test_string = "${test_object.b.test_string}" -} diff --git a/internal/terraform/testdata/transform-destroy-edge-module-only/main.tf b/internal/terraform/testdata/transform-destroy-edge-module-only/main.tf deleted file mode 100644 index 919351443d22..000000000000 --- a/internal/terraform/testdata/transform-destroy-edge-module-only/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child" { - source = "./child" - count = 2 -} diff --git a/internal/terraform/testdata/transform-destroy-edge-module/child/main.tf b/internal/terraform/testdata/transform-destroy-edge-module/child/main.tf deleted file mode 100644 index 337bbe754e70..000000000000 --- a/internal/terraform/testdata/transform-destroy-edge-module/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_object" "b" { - test_string = "foo" -} - -output "output" { - value = "${test_object.b.test_string}" -} diff --git a/internal/terraform/testdata/transform-destroy-edge-module/main.tf b/internal/terraform/testdata/transform-destroy-edge-module/main.tf deleted file mode 100644 index 2a42635e4f5f..000000000000 --- a/internal/terraform/testdata/transform-destroy-edge-module/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "test_object" "a" { - test_string = "${module.child.output}" -} - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/transform-destroy-edge-multi/main.tf b/internal/terraform/testdata/transform-destroy-edge-multi/main.tf deleted file mode 100644 index 3474bf60a422..000000000000 --- a/internal/terraform/testdata/transform-destroy-edge-multi/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "test_object" "A" {} - -resource "test_object" "B" { - test_string = "${test_object.A.test_string}" -} - -resource "test_object" "C" { - test_string = "${test_object.B.test_string}" -} diff --git a/internal/terraform/testdata/transform-destroy-edge-self-ref/main.tf b/internal/terraform/testdata/transform-destroy-edge-self-ref/main.tf deleted file mode 100644 index d91e024c4758..000000000000 --- a/internal/terraform/testdata/transform-destroy-edge-self-ref/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "test" "A" { - provisioner "foo" { - command = "${test.A.id}" - } -} diff --git a/internal/terraform/testdata/transform-module-var-basic/child/main.tf b/internal/terraform/testdata/transform-module-var-basic/child/main.tf deleted file mode 100644 index 53f3cd731d65..000000000000 --- a/internal/terraform/testdata/transform-module-var-basic/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -output "result" { - value = "${var.value}" -} diff --git a/internal/terraform/testdata/transform-module-var-basic/main.tf b/internal/terraform/testdata/transform-module-var-basic/main.tf deleted file mode 100644 index 0adb513f10ef..000000000000 --- a/internal/terraform/testdata/transform-module-var-basic/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child" { - source = "./child" - value = "foo" -} diff --git a/internal/terraform/testdata/transform-module-var-nested/child/child/main.tf b/internal/terraform/testdata/transform-module-var-nested/child/child/main.tf deleted file mode 100644 index 53f3cd731d65..000000000000 --- a/internal/terraform/testdata/transform-module-var-nested/child/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "value" {} - -output "result" { - value = "${var.value}" -} diff --git a/internal/terraform/testdata/transform-module-var-nested/child/main.tf b/internal/terraform/testdata/transform-module-var-nested/child/main.tf deleted file mode 100644 index b8c7f0bac242..000000000000 --- a/internal/terraform/testdata/transform-module-var-nested/child/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -variable "value" {} - -module "child" { - source = "./child" - value = "${var.value}" -} diff --git a/internal/terraform/testdata/transform-module-var-nested/main.tf b/internal/terraform/testdata/transform-module-var-nested/main.tf deleted file mode 100644 index 2c20f1979270..000000000000 --- a/internal/terraform/testdata/transform-module-var-nested/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child" { - source = "./child" - value = "foo" -} diff --git a/internal/terraform/testdata/transform-orphan-basic/main.tf b/internal/terraform/testdata/transform-orphan-basic/main.tf deleted file mode 100644 index 64cbf6236650..000000000000 --- a/internal/terraform/testdata/transform-orphan-basic/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "web" {} diff --git a/internal/terraform/testdata/transform-orphan-count-empty/main.tf b/internal/terraform/testdata/transform-orphan-count-empty/main.tf deleted file mode 100644 index e8045d6fce1c..000000000000 --- a/internal/terraform/testdata/transform-orphan-count-empty/main.tf +++ /dev/null @@ -1 +0,0 @@ -# Purposefully empty diff --git a/internal/terraform/testdata/transform-orphan-count/main.tf b/internal/terraform/testdata/transform-orphan-count/main.tf deleted file mode 100644 index acef373b35de..000000000000 --- a/internal/terraform/testdata/transform-orphan-count/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 -} diff --git a/internal/terraform/testdata/transform-orphan-modules/main.tf b/internal/terraform/testdata/transform-orphan-modules/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/transform-orphan-modules/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/transform-provider-basic/main.tf b/internal/terraform/testdata/transform-provider-basic/main.tf deleted file mode 100644 index 8a44e1dcbb58..000000000000 --- a/internal/terraform/testdata/transform-provider-basic/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -provider "aws" {} -resource "aws_instance" "web" {} diff --git a/internal/terraform/testdata/transform-provider-fqns-module/child/main.tf b/internal/terraform/testdata/transform-provider-fqns-module/child/main.tf deleted file mode 100644 index 5c56b7693975..000000000000 --- a/internal/terraform/testdata/transform-provider-fqns-module/child/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -terraform { - required_providers { - your-aws = { - source = "hashicorp/aws" - } - } -} - -resource "aws_instance" "web" { - provider = "your-aws" -} diff --git a/internal/terraform/testdata/transform-provider-fqns-module/main.tf b/internal/terraform/testdata/transform-provider-fqns-module/main.tf deleted file mode 100644 index dd582c0634b0..000000000000 --- a/internal/terraform/testdata/transform-provider-fqns-module/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -terraform { - required_providers { - my-aws = { - source = "hashicorp/aws" - } - } -} - -resource "aws_instance" "web" { - provider = "my-aws" -} diff --git a/internal/terraform/testdata/transform-provider-fqns/main.tf b/internal/terraform/testdata/transform-provider-fqns/main.tf deleted file mode 100644 index dd582c0634b0..000000000000 --- a/internal/terraform/testdata/transform-provider-fqns/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -terraform { - required_providers { - my-aws = { - source = "hashicorp/aws" - } - } -} - -resource "aws_instance" "web" { - provider = "my-aws" -} diff --git a/internal/terraform/testdata/transform-provider-grandchild-inherit/child/grandchild/main.tf b/internal/terraform/testdata/transform-provider-grandchild-inherit/child/grandchild/main.tf deleted file mode 100644 index 58363ef0c08a..000000000000 --- a/internal/terraform/testdata/transform-provider-grandchild-inherit/child/grandchild/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - alias = "baz" -} - -resource "aws_instance" "baz" { - provider = "aws.baz" -} diff --git a/internal/terraform/testdata/transform-provider-grandchild-inherit/child/main.tf b/internal/terraform/testdata/transform-provider-grandchild-inherit/child/main.tf deleted file mode 100644 index 7ec80343de70..000000000000 --- a/internal/terraform/testdata/transform-provider-grandchild-inherit/child/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -provider "aws" { - alias = "bar" -} - -module "grandchild" { - source = "./grandchild" - providers = { - aws.baz = aws.bar - } -} diff --git a/internal/terraform/testdata/transform-provider-grandchild-inherit/main.tf b/internal/terraform/testdata/transform-provider-grandchild-inherit/main.tf deleted file mode 100644 index cb9a2f9de982..000000000000 --- a/internal/terraform/testdata/transform-provider-grandchild-inherit/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -provider "aws" { - alias = "foo" - value = "config" -} - -module "child" { - source = "./child" - providers = { - aws.bar = aws.foo - } -} diff --git a/internal/terraform/testdata/transform-provider-inherit/child/main.tf b/internal/terraform/testdata/transform-provider-inherit/child/main.tf deleted file mode 100644 index b1f07068461c..000000000000 --- a/internal/terraform/testdata/transform-provider-inherit/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - alias = "bar" -} - -resource "aws_instance" "thing" { - provider = aws.bar -} diff --git a/internal/terraform/testdata/transform-provider-inherit/main.tf b/internal/terraform/testdata/transform-provider-inherit/main.tf deleted file mode 100644 index cb9a2f9de982..000000000000 --- a/internal/terraform/testdata/transform-provider-inherit/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -provider "aws" { - alias = "foo" - value = "config" -} - -module "child" { - source = "./child" - providers = { - aws.bar = aws.foo - } -} diff --git a/internal/terraform/testdata/transform-provider-missing-grandchild/main.tf b/internal/terraform/testdata/transform-provider-missing-grandchild/main.tf deleted file mode 100644 index 385674a891ed..000000000000 --- a/internal/terraform/testdata/transform-provider-missing-grandchild/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "sub" { - source = "./sub" -} diff --git a/internal/terraform/testdata/transform-provider-missing-grandchild/sub/main.tf b/internal/terraform/testdata/transform-provider-missing-grandchild/sub/main.tf deleted file mode 100644 index 65adf2d1ccc2..000000000000 --- a/internal/terraform/testdata/transform-provider-missing-grandchild/sub/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "foo" {} - -module "subsub" { - source = "./subsub" -} diff --git a/internal/terraform/testdata/transform-provider-missing-grandchild/sub/subsub/main.tf b/internal/terraform/testdata/transform-provider-missing-grandchild/sub/subsub/main.tf deleted file mode 100644 index fd865a52501e..000000000000 --- a/internal/terraform/testdata/transform-provider-missing-grandchild/sub/subsub/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "foo_instance" "one" {} -resource "bar_instance" "two" {} diff --git a/internal/terraform/testdata/transform-provider-missing/main.tf b/internal/terraform/testdata/transform-provider-missing/main.tf deleted file mode 100644 index 976f3e5af843..000000000000 --- a/internal/terraform/testdata/transform-provider-missing/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -provider "aws" {} -resource "aws_instance" "web" {} -resource "foo_instance" "web" {} diff --git a/internal/terraform/testdata/transform-provider-prune/main.tf b/internal/terraform/testdata/transform-provider-prune/main.tf deleted file mode 100644 index 986f8840bf92..000000000000 --- a/internal/terraform/testdata/transform-provider-prune/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -provider "aws" {} -resource "foo_instance" "web" {} diff --git a/internal/terraform/testdata/transform-provisioner-basic/main.tf b/internal/terraform/testdata/transform-provisioner-basic/main.tf deleted file mode 100644 index 3898ac4dbe1d..000000000000 --- a/internal/terraform/testdata/transform-provisioner-basic/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "web" { - provisioner "shell" {} -} diff --git a/internal/terraform/testdata/transform-provisioner-module/child/main.tf b/internal/terraform/testdata/transform-provisioner-module/child/main.tf deleted file mode 100644 index 51b29c72a082..000000000000 --- a/internal/terraform/testdata/transform-provisioner-module/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - provisioner "shell" {} -} diff --git a/internal/terraform/testdata/transform-provisioner-module/main.tf b/internal/terraform/testdata/transform-provisioner-module/main.tf deleted file mode 100644 index a825a449eb1b..000000000000 --- a/internal/terraform/testdata/transform-provisioner-module/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - provisioner "shell" {} -} - -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/transform-root-basic/main.tf b/internal/terraform/testdata/transform-root-basic/main.tf deleted file mode 100644 index e4ff4b3e9057..000000000000 --- a/internal/terraform/testdata/transform-root-basic/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" {} -resource "aws_instance" "foo" {} - -provider "do" {} -resource "do_droplet" "bar" {} diff --git a/internal/terraform/testdata/transform-targets-basic/main.tf b/internal/terraform/testdata/transform-targets-basic/main.tf deleted file mode 100644 index 47edc2a7fef7..000000000000 --- a/internal/terraform/testdata/transform-targets-basic/main.tf +++ /dev/null @@ -1,22 +0,0 @@ -resource "aws_vpc" "me" {} - -resource "aws_subnet" "me" { - depends_on = [ - aws_vpc.me, - ] -} - -resource "aws_instance" "me" { - depends_on = [ - aws_subnet.me, - ] -} - -resource "aws_vpc" "notme" {} -resource "aws_subnet" "notme" {} -resource "aws_instance" "notme" {} -resource "aws_instance" "notmeeither" { - depends_on = [ - aws_instance.me, - ] -} diff --git a/internal/terraform/testdata/transform-targets-downstream/child/child.tf b/internal/terraform/testdata/transform-targets-downstream/child/child.tf deleted file mode 100644 index 6548b794930b..000000000000 --- a/internal/terraform/testdata/transform-targets-downstream/child/child.tf +++ /dev/null @@ -1,14 +0,0 @@ -resource "aws_instance" "foo" { -} - -module "grandchild" { - source = "./grandchild" -} - -output "id" { - value = "${aws_instance.foo.id}" -} - -output "grandchild_id" { - value = "${module.grandchild.id}" -} diff --git a/internal/terraform/testdata/transform-targets-downstream/child/grandchild/grandchild.tf b/internal/terraform/testdata/transform-targets-downstream/child/grandchild/grandchild.tf deleted file mode 100644 index 3ad8fd077013..000000000000 --- a/internal/terraform/testdata/transform-targets-downstream/child/grandchild/grandchild.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "aws_instance" "foo" { -} - -output "id" { - value = "${aws_instance.foo.id}" -} diff --git a/internal/terraform/testdata/transform-targets-downstream/main.tf b/internal/terraform/testdata/transform-targets-downstream/main.tf deleted file mode 100644 index b732fdad7ea8..000000000000 --- a/internal/terraform/testdata/transform-targets-downstream/main.tf +++ /dev/null @@ -1,18 +0,0 @@ -resource "aws_instance" "foo" { -} - -module "child" { - source = "./child" -} - -output "root_id" { - value = "${aws_instance.foo.id}" -} - -output "child_id" { - value = "${module.child.id}" -} - -output "grandchild_id" { - value = "${module.child.grandchild_id}" -} diff --git a/internal/terraform/testdata/transform-trans-reduce-basic/main.tf b/internal/terraform/testdata/transform-trans-reduce-basic/main.tf deleted file mode 100644 index 4fb97c7a7b9a..000000000000 --- a/internal/terraform/testdata/transform-trans-reduce-basic/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "aws_instance" "A" {} - -resource "aws_instance" "B" { - A = "${aws_instance.A.id}" -} - -resource "aws_instance" "C" { - A = "${aws_instance.A.id}" - B = "${aws_instance.B.id}" -} diff --git a/internal/terraform/testdata/update-resource-provider/main.tf b/internal/terraform/testdata/update-resource-provider/main.tf deleted file mode 100644 index 6c082d540815..000000000000 --- a/internal/terraform/testdata/update-resource-provider/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - alias = "foo" -} - -resource "aws_instance" "bar" { - provider = "aws.foo" -} diff --git a/internal/terraform/testdata/validate-bad-count/main.tf b/internal/terraform/testdata/validate-bad-count/main.tf deleted file mode 100644 index a582e5ee39ec..000000000000 --- a/internal/terraform/testdata/validate-bad-count/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - count = "${list}" -} diff --git a/internal/terraform/testdata/validate-bad-module-output/child/main.tf b/internal/terraform/testdata/validate-bad-module-output/child/main.tf deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/internal/terraform/testdata/validate-bad-module-output/main.tf b/internal/terraform/testdata/validate-bad-module-output/main.tf deleted file mode 100644 index bda34f51a4e2..000000000000 --- a/internal/terraform/testdata/validate-bad-module-output/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "bar" { - foo = "${module.child.bad}" -} diff --git a/internal/terraform/testdata/validate-bad-pc/main.tf b/internal/terraform/testdata/validate-bad-pc/main.tf deleted file mode 100644 index 70ad701e6cbc..000000000000 --- a/internal/terraform/testdata/validate-bad-pc/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" { - foo = "bar" -} - -resource "aws_instance" "test" {} diff --git a/internal/terraform/testdata/validate-bad-prov-conf/main.tf b/internal/terraform/testdata/validate-bad-prov-conf/main.tf deleted file mode 100644 index af12124b3fa7..000000000000 --- a/internal/terraform/testdata/validate-bad-prov-conf/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "aws" { - foo = "bar" -} - -resource "aws_instance" "test" { - provisioner "shell" { - test_string = "foo" - } -} diff --git a/internal/terraform/testdata/validate-bad-prov-connection/main.tf b/internal/terraform/testdata/validate-bad-prov-connection/main.tf deleted file mode 100644 index 550714ff1d1a..000000000000 --- a/internal/terraform/testdata/validate-bad-prov-connection/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - provisioner "shell" { - test_string = "test" - connection { - user = "test" - } - } -} diff --git a/internal/terraform/testdata/validate-bad-rc/main.tf b/internal/terraform/testdata/validate-bad-rc/main.tf deleted file mode 100644 index 152a23e0d864..000000000000 --- a/internal/terraform/testdata/validate-bad-rc/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "test" { - foo = "bar" -} diff --git a/internal/terraform/testdata/validate-bad-resource-connection/main.tf b/internal/terraform/testdata/validate-bad-resource-connection/main.tf deleted file mode 100644 index 46a16717591c..000000000000 --- a/internal/terraform/testdata/validate-bad-resource-connection/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - connection { - user = "test" - } - provisioner "shell" { - test_string = "test" - } -} diff --git a/internal/terraform/testdata/validate-bad-resource-count/main.tf b/internal/terraform/testdata/validate-bad-resource-count/main.tf deleted file mode 100644 index f852a447eadb..000000000000 --- a/internal/terraform/testdata/validate-bad-resource-count/main.tf +++ /dev/null @@ -1,22 +0,0 @@ -// a resource named "aws_security_groups" does not exist in the schema -variable "sg_ports" { - type = list(number) - description = "List of ingress ports" - default = [8200, 8201, 8300, 9200, 9500] -} - - -resource "aws_security_groups" "dynamicsg" { - name = "dynamicsg" - description = "Ingress for Vault" - - dynamic "ingress" { - for_each = var.sg_ports - content { - from_port = ingress.value - to_port = ingress.value - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - } -} diff --git a/internal/terraform/testdata/validate-bad-var/main.tf b/internal/terraform/testdata/validate-bad-var/main.tf deleted file mode 100644 index 50028453d416..000000000000 --- a/internal/terraform/testdata/validate-bad-var/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" -} - -resource "aws_instance" "bar" { - foo = "${var.foo}" -} diff --git a/internal/terraform/testdata/validate-computed-in-function/main.tf b/internal/terraform/testdata/validate-computed-in-function/main.tf deleted file mode 100644 index 504e19426128..000000000000 --- a/internal/terraform/testdata/validate-computed-in-function/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -data "aws_data_source" "foo" { - optional_attr = "value" -} - -resource "aws_instance" "bar" { - attr = "${length(data.aws_data_source.foo.computed)}" -} diff --git a/internal/terraform/testdata/validate-computed-module-var-ref/dest/main.tf b/internal/terraform/testdata/validate-computed-module-var-ref/dest/main.tf deleted file mode 100644 index 44095ea75422..000000000000 --- a/internal/terraform/testdata/validate-computed-module-var-ref/dest/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "destin" { } - -resource "aws_instance" "dest" { - attr = "${var.destin}" -} diff --git a/internal/terraform/testdata/validate-computed-module-var-ref/main.tf b/internal/terraform/testdata/validate-computed-module-var-ref/main.tf deleted file mode 100644 index d7c799cc8b64..000000000000 --- a/internal/terraform/testdata/validate-computed-module-var-ref/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "source" { - source = "./source" -} - -module "dest" { - source = "./dest" - destin = "${module.source.sourceout}" -} diff --git a/internal/terraform/testdata/validate-computed-module-var-ref/source/main.tf b/internal/terraform/testdata/validate-computed-module-var-ref/source/main.tf deleted file mode 100644 index d2edc9e0f170..000000000000 --- a/internal/terraform/testdata/validate-computed-module-var-ref/source/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "source" { - attr = "foo" -} - -output "sourceout" { - value = "${aws_instance.source.attr}" -} diff --git a/internal/terraform/testdata/validate-computed-var/main.tf b/internal/terraform/testdata/validate-computed-var/main.tf deleted file mode 100644 index 81acf7cfaa9d..000000000000 --- a/internal/terraform/testdata/validate-computed-var/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "aws" { - value = test_instance.foo.id -} - -resource "aws_instance" "bar" {} - -resource "test_instance" "foo" { - value = "yes" -} diff --git a/internal/terraform/testdata/validate-count-computed/main.tf b/internal/terraform/testdata/validate-count-computed/main.tf deleted file mode 100644 index e7de125f2263..000000000000 --- a/internal/terraform/testdata/validate-count-computed/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -data "aws_data_source" "foo" { - compute = "value" -} - -resource "aws_instance" "bar" { - count = "${data.aws_data_source.foo.value}" -} diff --git a/internal/terraform/testdata/validate-count-negative/main.tf b/internal/terraform/testdata/validate-count-negative/main.tf deleted file mode 100644 index d5bb046533d9..000000000000 --- a/internal/terraform/testdata/validate-count-negative/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "test" { - count = "-5" -} diff --git a/internal/terraform/testdata/validate-count-variable/main.tf b/internal/terraform/testdata/validate-count-variable/main.tf deleted file mode 100644 index 9c892ac2eac8..000000000000 --- a/internal/terraform/testdata/validate-count-variable/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -variable "foo" {} - -resource "aws_instance" "foo" { - foo = "foo" - count = "${var.foo}" -} diff --git a/internal/terraform/testdata/validate-good-module/child/main.tf b/internal/terraform/testdata/validate-good-module/child/main.tf deleted file mode 100644 index 17d8c60a7722..000000000000 --- a/internal/terraform/testdata/validate-good-module/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "good" { - value = "great" -} diff --git a/internal/terraform/testdata/validate-good-module/main.tf b/internal/terraform/testdata/validate-good-module/main.tf deleted file mode 100644 index 439d20210c49..000000000000 --- a/internal/terraform/testdata/validate-good-module/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "bar" { - foo = "${module.child.good}" -} diff --git a/internal/terraform/testdata/validate-good/main.tf b/internal/terraform/testdata/validate-good/main.tf deleted file mode 100644 index fe44019b7dad..000000000000 --- a/internal/terraform/testdata/validate-good/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" - foo = "bar" -} - -resource "aws_instance" "bar" { - foo = "bar" -} diff --git a/internal/terraform/testdata/validate-module-bad-rc/child/main.tf b/internal/terraform/testdata/validate-module-bad-rc/child/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/validate-module-bad-rc/child/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/validate-module-bad-rc/main.tf b/internal/terraform/testdata/validate-module-bad-rc/main.tf deleted file mode 100644 index 0f6991c536ca..000000000000 --- a/internal/terraform/testdata/validate-module-bad-rc/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/internal/terraform/testdata/validate-module-deps-cycle/a/main.tf b/internal/terraform/testdata/validate-module-deps-cycle/a/main.tf deleted file mode 100644 index 3d3b01634eb6..000000000000 --- a/internal/terraform/testdata/validate-module-deps-cycle/a/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "a" { } - -output "output" { - value = "${aws_instance.a.id}" -} diff --git a/internal/terraform/testdata/validate-module-deps-cycle/b/main.tf b/internal/terraform/testdata/validate-module-deps-cycle/b/main.tf deleted file mode 100644 index 0f8fc9116e63..000000000000 --- a/internal/terraform/testdata/validate-module-deps-cycle/b/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "input" {} - -resource "aws_instance" "b" { - id = "${var.input}" -} diff --git a/internal/terraform/testdata/validate-module-deps-cycle/main.tf b/internal/terraform/testdata/validate-module-deps-cycle/main.tf deleted file mode 100644 index 11ddb64bfa7f..000000000000 --- a/internal/terraform/testdata/validate-module-deps-cycle/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "a" { - source = "./a" -} - -module "b" { - source = "./b" - input = "${module.a.output}" -} diff --git a/internal/terraform/testdata/validate-module-pc-inherit-unused/child/main.tf b/internal/terraform/testdata/validate-module-pc-inherit-unused/child/main.tf deleted file mode 100644 index 919f140bba6b..000000000000 --- a/internal/terraform/testdata/validate-module-pc-inherit-unused/child/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/validate-module-pc-inherit-unused/main.tf b/internal/terraform/testdata/validate-module-pc-inherit-unused/main.tf deleted file mode 100644 index 32c8a38f1e6f..000000000000 --- a/internal/terraform/testdata/validate-module-pc-inherit-unused/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -provider "aws" { - foo = "set" -} diff --git a/internal/terraform/testdata/validate-module-pc-inherit/child/main.tf b/internal/terraform/testdata/validate-module-pc-inherit/child/main.tf deleted file mode 100644 index 37189c1ffb66..000000000000 --- a/internal/terraform/testdata/validate-module-pc-inherit/child/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -provider "aws" {} - -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/validate-module-pc-inherit/main.tf b/internal/terraform/testdata/validate-module-pc-inherit/main.tf deleted file mode 100644 index 8976f4aa9f10..000000000000 --- a/internal/terraform/testdata/validate-module-pc-inherit/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "child" { - source = "./child" -} - -provider "aws" { - set = true -} - -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/validate-module-pc-vars/child/main.tf b/internal/terraform/testdata/validate-module-pc-vars/child/main.tf deleted file mode 100644 index 380cd465a398..000000000000 --- a/internal/terraform/testdata/validate-module-pc-vars/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "value" {} - -provider "aws" { - foo = var.value -} - -resource "aws_instance" "foo" {} diff --git a/internal/terraform/testdata/validate-module-pc-vars/main.tf b/internal/terraform/testdata/validate-module-pc-vars/main.tf deleted file mode 100644 index 5e239b406652..000000000000 --- a/internal/terraform/testdata/validate-module-pc-vars/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "provider_var" {} - -module "child" { - source = "./child" - - value = var.provider_var -} diff --git a/internal/terraform/testdata/validate-required-provider-config/main.tf b/internal/terraform/testdata/validate-required-provider-config/main.tf deleted file mode 100644 index 898a23fdf251..000000000000 --- a/internal/terraform/testdata/validate-required-provider-config/main.tf +++ /dev/null @@ -1,20 +0,0 @@ -# This test verifies that the provider local name, local config and fqn map -# together properly when the local name does not match the type. - -terraform { - required_providers { - arbitrary = { - source = "hashicorp/aws" - } - } -} - -# hashicorp/test has required provider config attributes. This "arbitrary" -# provider configuration block should map to hashicorp/test. -provider "arbitrary" { - required_attribute = "bloop" -} - -resource "aws_instance" "test" { - provider = "arbitrary" -} diff --git a/internal/terraform/testdata/validate-required-var/main.tf b/internal/terraform/testdata/validate-required-var/main.tf deleted file mode 100644 index bd55ea11bf75..000000000000 --- a/internal/terraform/testdata/validate-required-var/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "foo" {} - -resource "aws_instance" "web" { - ami = "${var.foo}" -} diff --git a/internal/terraform/testdata/validate-sensitive-provisioner-config/main.tf b/internal/terraform/testdata/validate-sensitive-provisioner-config/main.tf deleted file mode 100644 index 88a37275a835..000000000000 --- a/internal/terraform/testdata/validate-sensitive-provisioner-config/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "secret" { - type = string - default = " password123" - sensitive = true -} - -resource "aws_instance" "foo" { - provisioner "test" { - test_string = var.secret - } -} diff --git a/internal/terraform/testdata/validate-skipped-pc-empty/main.tf b/internal/terraform/testdata/validate-skipped-pc-empty/main.tf deleted file mode 100644 index 1ad9ade8948f..000000000000 --- a/internal/terraform/testdata/validate-skipped-pc-empty/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "test" {} diff --git a/internal/terraform/testdata/validate-targeted/main.tf b/internal/terraform/testdata/validate-targeted/main.tf deleted file mode 100644 index a1e847d9a0e4..000000000000 --- a/internal/terraform/testdata/validate-targeted/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" { - num = "2" - provisioner "shell" {} -} - -resource "aws_instance" "bar" { - foo = "bar" - provisioner "shell" {} -} diff --git a/internal/terraform/testdata/validate-var-no-default-explicit-type/main.tf b/internal/terraform/testdata/validate-var-no-default-explicit-type/main.tf deleted file mode 100644 index 5953eab4da98..000000000000 --- a/internal/terraform/testdata/validate-var-no-default-explicit-type/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "maybe_a_map" { - type = map(string) - - // No default -} diff --git a/internal/terraform/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf b/internal/terraform/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf deleted file mode 100644 index 05027f75ade6..000000000000 --- a/internal/terraform/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "test" { - type = string - - validation { - condition = var.test != "nope" - error_message = "Value must not be \"nope\"." - } -} diff --git a/internal/terraform/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf b/internal/terraform/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf deleted file mode 100644 index 4f436db11a3d..000000000000 --- a/internal/terraform/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf +++ /dev/null @@ -1,10 +0,0 @@ -variable "test" { - sensitive = true - default = "nope" -} - -module "child" { - source = "./child" - - test = var.test -} diff --git a/internal/terraform/testdata/validate-variable-custom-validations-child/child/child.tf b/internal/terraform/testdata/validate-variable-custom-validations-child/child/child.tf deleted file mode 100644 index 05027f75ade6..000000000000 --- a/internal/terraform/testdata/validate-variable-custom-validations-child/child/child.tf +++ /dev/null @@ -1,8 +0,0 @@ -variable "test" { - type = string - - validation { - condition = var.test != "nope" - error_message = "Value must not be \"nope\"." - } -} diff --git a/internal/terraform/testdata/validate-variable-custom-validations-child/validate-variable-custom-validations.tf b/internal/terraform/testdata/validate-variable-custom-validations-child/validate-variable-custom-validations.tf deleted file mode 100644 index 8b8111e675c9..000000000000 --- a/internal/terraform/testdata/validate-variable-custom-validations-child/validate-variable-custom-validations.tf +++ /dev/null @@ -1,5 +0,0 @@ -module "child" { - source = "./child" - - test = "nope" -} diff --git a/internal/terraform/testdata/validate-variable-ref/main.tf b/internal/terraform/testdata/validate-variable-ref/main.tf deleted file mode 100644 index 3bc9860b6029..000000000000 --- a/internal/terraform/testdata/validate-variable-ref/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "foo" {} - -resource "aws_instance" "bar" { - foo = "${var.foo}" -} diff --git a/internal/terraform/testdata/vars-basic-bool/main.tf b/internal/terraform/testdata/vars-basic-bool/main.tf deleted file mode 100644 index 52d90595a275..000000000000 --- a/internal/terraform/testdata/vars-basic-bool/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -// At the time of writing Terraform doesn't formally support a boolean -// type, but historically this has magically worked. Lots of TF code -// relies on this so we test it now. -variable "a" { - default = true -} - -variable "b" { - default = false -} diff --git a/internal/terraform/testdata/vars-basic/main.tf b/internal/terraform/testdata/vars-basic/main.tf deleted file mode 100644 index af3ba5cc6954..000000000000 --- a/internal/terraform/testdata/vars-basic/main.tf +++ /dev/null @@ -1,14 +0,0 @@ -variable "a" { - default = "foo" - type = string -} - -variable "b" { - default = [] - type = list(string) -} - -variable "c" { - default = {} - type = map(string) -} diff --git a/internal/terraform/transform.go b/internal/terraform/transform.go deleted file mode 100644 index 2cc812ffe477..000000000000 --- a/internal/terraform/transform.go +++ /dev/null @@ -1,52 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/logging" -) - -// GraphTransformer is the interface that transformers implement. This -// interface is only for transforms that need entire graph visibility. -type GraphTransformer interface { - Transform(*Graph) error -} - -// GraphVertexTransformer is an interface that transforms a single -// Vertex within with graph. This is a specialization of GraphTransformer -// that makes it easy to do vertex replacement. -// -// The GraphTransformer that runs through the GraphVertexTransformers is -// VertexTransformer. -type GraphVertexTransformer interface { - Transform(dag.Vertex) (dag.Vertex, error) -} - -type graphTransformerMulti struct { - Transforms []GraphTransformer -} - -func (t *graphTransformerMulti) Transform(g *Graph) error { - var lastStepStr string - for _, t := range t.Transforms { - log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) - if err := t.Transform(g); err != nil { - return err - } - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s ------", t, logging.Indent(thisStepStr)) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) - } - } - - return nil -} - -// GraphTransformMulti combines multiple graph transformers into a single -// GraphTransformer that runs all the individual graph transformers. -func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { - return &graphTransformerMulti{Transforms: ts} -} diff --git a/internal/terraform/transform_attach_config_provider.go b/internal/terraform/transform_attach_config_provider.go deleted file mode 100644 index 95153eacedb7..000000000000 --- a/internal/terraform/transform_attach_config_provider.go +++ /dev/null @@ -1,16 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" -) - -// GraphNodeAttachProvider is an interface that must be implemented by nodes -// that want provider configurations attached. -type GraphNodeAttachProvider interface { - // ProviderName with no module prefix. Example: "aws". - ProviderAddr() addrs.AbsProviderConfig - - // Sets the configuration - AttachProvider(*configs.Provider) -} diff --git a/internal/terraform/transform_attach_config_provider_meta.go b/internal/terraform/transform_attach_config_provider_meta.go deleted file mode 100644 index d79df26fa0a0..000000000000 --- a/internal/terraform/transform_attach_config_provider_meta.go +++ /dev/null @@ -1,15 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" -) - -// GraphNodeAttachProviderMetaConfigs is an interface that must be implemented -// by nodes that want provider meta configurations attached. -type GraphNodeAttachProviderMetaConfigs interface { - GraphNodeConfigResource - - // Sets the configuration - AttachProviderMetaConfigs(map[addrs.Provider]*configs.ProviderMeta) -} diff --git a/internal/terraform/transform_attach_config_resource.go b/internal/terraform/transform_attach_config_resource.go deleted file mode 100644 index e2468a009961..000000000000 --- a/internal/terraform/transform_attach_config_resource.go +++ /dev/null @@ -1,110 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" -) - -// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes -// that want resource configurations attached. -type GraphNodeAttachResourceConfig interface { - GraphNodeConfigResource - - // Sets the configuration - AttachResourceConfig(*configs.Resource) -} - -// AttachResourceConfigTransformer goes through the graph and attaches -// resource configuration structures to nodes that implement -// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachResourceConfigTransformer struct { - Config *configs.Config // Config is the root node in the config tree -} - -func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { - - // Go through and find GraphNodeAttachResource - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachResource implementations - arn, ok := v.(GraphNodeAttachResourceConfig) - if !ok { - continue - } - - // Determine what we're looking for - addr := arn.ResourceAddr() - - // Get the configuration. - config := t.Config.Descendent(addr.Module) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) - continue - } - - for _, r := range config.Module.ManagedResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - - // attach the provider_meta info - if gnapmc, ok := v.(GraphNodeAttachProviderMetaConfigs); ok { - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching provider meta configs to %s", dag.VertexName(v)) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no config set on the transformer for %s", dag.VertexName(v)) - continue - } - if config.Module == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no module in config for %s", dag.VertexName(v)) - continue - } - if config.Module.ProviderMetas == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no provider metas defined for %s", dag.VertexName(v)) - continue - } - gnapmc.AttachProviderMetaConfigs(config.Module.ProviderMetas) - } - } - for _, r := range config.Module.DataResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - - // attach the provider_meta info - if gnapmc, ok := v.(GraphNodeAttachProviderMetaConfigs); ok { - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching provider meta configs to %s", dag.VertexName(v)) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no config set on the transformer for %s", dag.VertexName(v)) - continue - } - if config.Module == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no module in config for %s", dag.VertexName(v)) - continue - } - if config.Module.ProviderMetas == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no provider metas defined for %s", dag.VertexName(v)) - continue - } - gnapmc.AttachProviderMetaConfigs(config.Module.ProviderMetas) - } - } - } - - return nil -} diff --git a/internal/terraform/transform_attach_schema.go b/internal/terraform/transform_attach_schema.go deleted file mode 100644 index 8f7a59083348..000000000000 --- a/internal/terraform/transform_attach_schema.go +++ /dev/null @@ -1,109 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/dag" -) - -// GraphNodeAttachResourceSchema is an interface implemented by node types -// that need a resource schema attached. -type GraphNodeAttachResourceSchema interface { - GraphNodeConfigResource - GraphNodeProviderConsumer - - AttachResourceSchema(schema *configschema.Block, version uint64) -} - -// GraphNodeAttachProviderConfigSchema is an interface implemented by node types -// that need a provider configuration schema attached. -type GraphNodeAttachProviderConfigSchema interface { - GraphNodeProvider - - AttachProviderConfigSchema(*configschema.Block) -} - -// GraphNodeAttachProvisionerSchema is an interface implemented by node types -// that need one or more provisioner schemas attached. -type GraphNodeAttachProvisionerSchema interface { - ProvisionedBy() []string - - // SetProvisionerSchema is called during transform for each provisioner - // type returned from ProvisionedBy, providing the configuration schema - // for each provisioner in turn. The implementer should save these for - // later use in evaluating provisioner configuration blocks. - AttachProvisionerSchema(name string, schema *configschema.Block) -} - -// AttachSchemaTransformer finds nodes that implement -// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or -// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each -// and then passes them to a method implemented by the node. -type AttachSchemaTransformer struct { - Plugins *contextPlugins - Config *configs.Config -} - -func (t *AttachSchemaTransformer) Transform(g *Graph) error { - if t.Plugins == nil { - // Should never happen with a reasonable caller, but we'll return a - // proper error here anyway so that we'll fail gracefully. - return fmt.Errorf("AttachSchemaTransformer used with nil Plugins") - } - - for _, v := range g.Vertices() { - - if tv, ok := v.(GraphNodeAttachResourceSchema); ok { - addr := tv.ResourceAddr() - mode := addr.Resource.Mode - typeName := addr.Resource.Type - providerFqn := tv.Provider() - - schema, version, err := t.Plugins.ResourceTypeSchema(providerFqn, mode, typeName) - if err != nil { - return fmt.Errorf("failed to read schema for %s in %s: %s", addr, providerFqn, err) - } - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) - tv.AttachResourceSchema(schema, version) - } - - if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { - providerAddr := tv.ProviderAddr() - schema, err := t.Plugins.ProviderConfigSchema(providerAddr.Provider) - if err != nil { - return fmt.Errorf("failed to read provider configuration schema for %s: %s", providerAddr.Provider, err) - } - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) - tv.AttachProviderConfigSchema(schema) - } - - if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { - names := tv.ProvisionedBy() - for _, name := range names { - schema, err := t.Plugins.ProvisionerSchema(name) - if err != nil { - return fmt.Errorf("failed to read provisioner configuration schema for %q: %s", name, err) - } - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) - tv.AttachProvisionerSchema(name, schema) - } - } - } - - return nil -} diff --git a/internal/terraform/transform_attach_state.go b/internal/terraform/transform_attach_state.go deleted file mode 100644 index 13694718c20f..000000000000 --- a/internal/terraform/transform_attach_state.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/states" -) - -// GraphNodeAttachResourceState is an interface that can be implemented -// to request that a ResourceState is attached to the node. -// -// Due to a historical naming inconsistency, the type ResourceState actually -// represents the state for a particular _instance_, while InstanceState -// represents the values for that instance during a particular phase -// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState -// is supported only for nodes that represent resource instances, even though -// the name might suggest it is for containing resources. -type GraphNodeAttachResourceState interface { - GraphNodeResourceInstance - - // Sets the state - AttachResourceState(*states.Resource) -} - -// AttachStateTransformer goes through the graph and attaches -// state to nodes that implement the interfaces above. -type AttachStateTransformer struct { - State *states.State // State is the root state -} - -func (t *AttachStateTransformer) Transform(g *Graph) error { - // If no state, then nothing to do - if t.State == nil { - log.Printf("[DEBUG] Not attaching any node states: overall state is nil") - return nil - } - - for _, v := range g.Vertices() { - // Nodes implement this interface to request state attachment. - an, ok := v.(GraphNodeAttachResourceState) - if !ok { - continue - } - addr := an.ResourceInstanceAddr() - - rs := t.State.Resource(addr.ContainingResource()) - if rs == nil { - log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - is := rs.Instance(addr.Resource.Key) - if is == nil { - // We don't actually need this here, since we'll attach the whole - // resource state, but we still check because it'd be weird - // for the specific instance we're attaching to not to exist. - log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - // make sure to attach a copy of the state, so instances can modify the - // same ResourceState. - an.AttachResourceState(rs.DeepCopy()) - } - - return nil -} diff --git a/internal/terraform/transform_config.go b/internal/terraform/transform_config.go deleted file mode 100644 index 59fa1eeea9f8..000000000000 --- a/internal/terraform/transform_config.go +++ /dev/null @@ -1,122 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" -) - -// ConfigTransformer is a GraphTransformer that adds all the resources -// from the configuration to the graph. -// -// The module used to configure this transformer must be the root module. -// -// Only resources are added to the graph. Variables, outputs, and -// providers must be added via other transforms. -// -// Unlike ConfigTransformerOld, this transformer creates a graph with -// all resources including module resources, rather than creating module -// nodes that are then "flattened". -type ConfigTransformer struct { - Concrete ConcreteResourceNodeFunc - - // Module is the module to add resources from. - Config *configs.Config - - // Mode will only add resources that match the given mode - ModeFilter bool - Mode addrs.ResourceMode - - // Do not apply this transformer. - skip bool - - // configuration resources that are to be imported - importTargets []*ImportTarget -} - -func (t *ConfigTransformer) Transform(g *Graph) error { - if t.skip { - return nil - } - - // If no configuration is available, we don't do anything - if t.Config == nil { - return nil - } - - // Start the transformation process - return t.transform(g, t.Config) -} - -func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { - // If no config, do nothing - if config == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, config); err != nil { - return err - } - - // Transform all the children. - for _, c := range config.Children { - if err := t.transform(g, c); err != nil { - return err - } - } - - return nil -} - -func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { - path := config.Path - module := config.Module - log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) - - allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) - for _, r := range module.ManagedResources { - allResources = append(allResources, r) - } - for _, r := range module.DataResources { - allResources = append(allResources, r) - } - - for _, r := range allResources { - relAddr := r.Addr() - - if t.ModeFilter && relAddr.Mode != t.Mode { - // Skip non-matching modes - continue - } - - // If any of the import targets can apply to this node's instances, - // filter them down to the applicable addresses. - var imports []*ImportTarget - configAddr := relAddr.InModule(path) - for _, i := range t.importTargets { - if target := i.Addr.ContainingResource().Config(); target.Equal(configAddr) { - imports = append(imports, i) - } - } - - abstract := &NodeAbstractResource{ - Addr: addrs.ConfigResource{ - Resource: relAddr, - Module: path, - }, - importTargets: imports, - } - - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/internal/terraform/transform_config_test.go b/internal/terraform/transform_config_test.go deleted file mode 100644 index ceed2bf35651..000000000000 --- a/internal/terraform/transform_config_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestConfigTransformer_nilModule(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - tf := &ConfigTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - if len(g.Vertices()) > 0 { - t.Fatalf("graph is not empty: %s", g.String()) - } -} - -func TestConfigTransformer(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - tf := &ConfigTransformer{Config: testModule(t, "graph-basic")} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testConfigTransformerGraphBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestConfigTransformer_mode(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - tf := &ConfigTransformer{ - Config: testModule(t, "transform-config-mode-data"), - ModeFilter: true, - Mode: addrs.DataResourceMode, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(` -data.aws_ami.foo -`) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestConfigTransformer_nonUnique(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(NewNodeAbstractResource( - addrs.RootModule.Resource( - addrs.ManagedResourceMode, "aws_instance", "web", - ), - )) - tf := &ConfigTransformer{Config: testModule(t, "graph-basic")} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(` -aws_instance.web -aws_instance.web -aws_load_balancer.weblb -aws_security_group.firewall -openstack_floating_ip.random -`) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -const testConfigTransformerGraphBasicStr = ` -aws_instance.web -aws_load_balancer.weblb -aws_security_group.firewall -openstack_floating_ip.random -` diff --git a/internal/terraform/transform_destroy_cbd.go b/internal/terraform/transform_destroy_cbd.go deleted file mode 100644 index 19dadbe5ae1a..000000000000 --- a/internal/terraform/transform_destroy_cbd.go +++ /dev/null @@ -1,150 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/states" -) - -// GraphNodeDestroyerCBD must be implemented by nodes that might be -// create-before-destroy destroyers, or might plan a create-before-destroy -// action. -type GraphNodeDestroyerCBD interface { - // CreateBeforeDestroy returns true if this node represents a node - // that is doing a CBD. - CreateBeforeDestroy() bool - - // ModifyCreateBeforeDestroy is called when the CBD state of a node - // is changed dynamically. This can return an error if this isn't - // allowed. - ModifyCreateBeforeDestroy(bool) error -} - -// ForcedCBDTransformer detects when a particular CBD-able graph node has -// dependencies with another that has create_before_destroy set that require -// it to be forced on, and forces it on. -// -// This must be used in the plan graph builder to ensure that -// create_before_destroy settings are properly propagated before constructing -// the planned changes. This requires that the plannable resource nodes -// implement GraphNodeDestroyerCBD. -type ForcedCBDTransformer struct { -} - -func (t *ForcedCBDTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - // If there are no CBD decendent (dependent nodes), then we - // do nothing here. - if !t.hasCBDDescendent(g, v) { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) - continue - } - - // If this isn't naturally a CBD node, this means that an descendent is - // and we need to auto-upgrade this node to CBD. We do this because - // a CBD node depending on non-CBD will result in cycles. To avoid this, - // we always attempt to upgrade it. - log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) - if err := dn.ModifyCreateBeforeDestroy(true); err != nil { - return fmt.Errorf( - "%s: must have create before destroy enabled because "+ - "a dependent resource has CBD enabled. However, when "+ - "attempting to automatically do this, an error occurred: %s", - dag.VertexName(v), err) - } - } else { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) - } - } - return nil -} - -// hasCBDDescendent returns true if any descendent (node that depends on this) -// has CBD set. -func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { - s, _ := g.Descendents(v) - if s == nil { - return true - } - - for _, ov := range s { - dn, ok := ov.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if dn.CreateBeforeDestroy() { - // some descendent is CreateBeforeDestroy, so we need to follow suit - log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) - return true - } - } - - return false -} - -// CBDEdgeTransformer modifies the edges of create-before-destroy ("CBD") nodes -// that went through the DestroyEdgeTransformer so that they will have the -// correct dependencies. There are two parts to this: -// -// 1. With CBD, the destroy edge is inverted: the destroy depends on -// the creation. -// -// 2. Destroy for A must depend on resources that depend on A. This is to -// allow the destroy to only happen once nodes that depend on A successfully -// update to A. Example: adding a web server updates the load balancer -// before deleting the old web server. -// -// This transformer requires that a previous transformer has already forced -// create_before_destroy on for nodes that are depended on by explicit CBD -// nodes. This is the logic in ForcedCBDTransformer, though in practice we -// will get here by recording the CBD-ness of each change in the plan during -// the plan walk and then forcing the nodes into the appropriate setting during -// DiffTransformer when building the apply graph. -type CBDEdgeTransformer struct { - // Module and State are only needed to look up dependencies in - // any way possible. Either can be nil if not availabile. - Config *configs.Config - State *states.State -} - -func (t *CBDEdgeTransformer) Transform(g *Graph) error { - // Go through and reverse any destroy edges - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - if _, ok = v.(GraphNodeDestroyer); !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - continue - } - - // Find the resource edges - for _, e := range g.EdgesTo(v) { - src := e.Source() - - // If source is a create node, invert the edge. - // This covers both the node's own creator, as well as reversing - // any dependants' edges. - if _, ok := src.(GraphNodeCreator); ok { - log.Printf("[TRACE] CBDEdgeTransformer: reversing edge %s -> %s", dag.VertexName(src), dag.VertexName(v)) - g.RemoveEdge(e) - g.Connect(dag.BasicEdge(v, src)) - } - } - } - return nil -} diff --git a/internal/terraform/transform_destroy_cbd_test.go b/internal/terraform/transform_destroy_cbd_test.go deleted file mode 100644 index 8f5712b57449..000000000000 --- a/internal/terraform/transform_destroy_cbd_test.go +++ /dev/null @@ -1,360 +0,0 @@ -package terraform - -import ( - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" -) - -func cbdTestGraph(t *testing.T, mod string, changes *plans.Changes, state *states.State) *Graph { - module := testModule(t, mod) - - applyBuilder := &ApplyGraphBuilder{ - Config: module, - Changes: changes, - Plugins: simpleMockPluginLibrary(), - State: state, - } - g, err := (&BasicGraphBuilder{ - Steps: cbdTestSteps(applyBuilder.Steps()), - Name: "ApplyGraphBuilder", - }).Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - return filterInstances(g) -} - -// override the apply graph builder to halt the process after CBD -func cbdTestSteps(steps []GraphTransformer) []GraphTransformer { - found := false - var i int - var t GraphTransformer - for i, t = range steps { - if _, ok := t.(*CBDEdgeTransformer); ok { - found = true - break - } - } - - if !found { - panic("CBDEdgeTransformer not found") - } - - // re-add the root node so we have a valid graph for a walk, then reduce - // the graph for less output - steps = append(steps[:i+1], &CloseRootModuleTransformer{}) - steps = append(steps, &TransitiveReductionTransformer{}) - - return steps -} - -// remove extra nodes for easier test comparisons -func filterInstances(g *Graph) *Graph { - for _, v := range g.Vertices() { - if _, ok := v.(GraphNodeResourceInstance); !ok { - g.Remove(v) - } - - } - return g -} - -func TestCBDEdgeTransformer(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.A"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - g := cbdTestGraph(t, "transform-destroy-cbd-edge-basic", changes, state) - g = filterInstances(g) - - actual := strings.TrimSpace(g.String()) - expected := regexp.MustCompile(strings.TrimSpace(` -(?m)test_object.A -test_object.A \(destroy deposed \w+\) - test_object.B -test_object.B - test_object.A -`)) - - if !expected.MatchString(actual) { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestCBDEdgeTransformerMulti(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.A"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.C"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.C").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"C","test_list":["x"]}`), - Dependencies: []addrs.ConfigResource{ - mustConfigResourceAddr("test_object.A"), - mustConfigResourceAddr("test_object.B"), - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - g := cbdTestGraph(t, "transform-destroy-cbd-edge-multi", changes, state) - g = filterInstances(g) - - actual := strings.TrimSpace(g.String()) - expected := regexp.MustCompile(strings.TrimSpace(` -(?m)test_object.A -test_object.A \(destroy deposed \w+\) - test_object.C -test_object.B -test_object.B \(destroy deposed \w+\) - test_object.C -test_object.C - test_object.A - test_object.B -`)) - - if !expected.MatchString(actual) { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestCBDEdgeTransformer_depNonCBDCount(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.A"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B[0]"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B[1]"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - g := cbdTestGraph(t, "transform-cbd-destroy-edge-count", changes, state) - - actual := strings.TrimSpace(g.String()) - expected := regexp.MustCompile(strings.TrimSpace(` -(?m)test_object.A -test_object.A \(destroy deposed \w+\) - test_object.B\[0\] - test_object.B\[1\] -test_object.B\[0\] - test_object.A -test_object.B\[1\] - test_object.A`)) - - if !expected.MatchString(actual) { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestCBDEdgeTransformer_depNonCBDCountBoth(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.A[0]"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.A[1]"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.CreateThenDelete, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B[0]"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - { - Addr: mustResourceInstanceAddr("test_object.B[1]"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - }, - }, - }, - } - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - g := cbdTestGraph(t, "transform-cbd-destroy-edge-both-count", changes, state) - - actual := strings.TrimSpace(g.String()) - expected := regexp.MustCompile(strings.TrimSpace(` -test_object.A\[0\] -test_object.A\[0\] \(destroy deposed \w+\) - test_object.B\[0\] - test_object.B\[1\] -test_object.A\[1\] -test_object.A\[1\] \(destroy deposed \w+\) - test_object.B\[0\] - test_object.B\[1\] -test_object.B\[0\] - test_object.A\[0\] - test_object.A\[1\] -test_object.B\[1\] - test_object.A\[0\] - test_object.A\[1\] -`)) - - if !expected.MatchString(actual) { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} diff --git a/internal/terraform/transform_destroy_edge.go b/internal/terraform/transform_destroy_edge.go deleted file mode 100644 index 3f87e7edf375..000000000000 --- a/internal/terraform/transform_destroy_edge.go +++ /dev/null @@ -1,374 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/plans" -) - -// GraphNodeDestroyer must be implemented by nodes that destroy resources. -type GraphNodeDestroyer interface { - dag.Vertex - - // DestroyAddr is the address of the resource that is being - // destroyed by this node. If this returns nil, then this node - // is not destroying anything. - DestroyAddr() *addrs.AbsResourceInstance -} - -// GraphNodeCreator must be implemented by nodes that create OR update resources. -type GraphNodeCreator interface { - // CreateAddr is the address of the resource being created or updated - CreateAddr() *addrs.AbsResourceInstance -} - -// DestroyEdgeTransformer is a GraphTransformer that creates the proper -// references for destroy resources. Destroy resources are more complex -// in that they must be depend on the destruction of resources that -// in turn depend on the CREATION of the node being destroy. -// -// That is complicated. Visually: -// -// B_d -> A_d -> A -> B -// -// Notice that A destroy depends on B destroy, while B create depends on -// A create. They're inverted. This must be done for example because often -// dependent resources will block parent resources from deleting. Concrete -// example: VPC with subnets, the VPC can't be deleted while there are -// still subnets. -type DestroyEdgeTransformer struct { - // FIXME: GraphNodeCreators are not always applying changes, and should not - // participate in the destroy graph if there are no operations which could - // interract with destroy nodes. We need Changes for now to detect the - // action type, but perhaps this should be indicated somehow by the - // DiffTransformer which was intended to be the only transformer operating - // from the change set. - Changes *plans.Changes - - // FIXME: Operation will not be needed here one we can better track - // inter-provider dependencies and remove the cycle checks in - // tryInterProviderDestroyEdge. - Operation walkOperation -} - -// tryInterProviderDestroyEdge checks if we're inserting a destroy edge -// across a provider boundary, and only adds the edge if it results in no cycles. -// -// FIXME: The cycles can arise in valid configurations when a provider depends -// on resources from another provider. In the future we may want to inspect -// the dependencies of the providers themselves, to avoid needing to use the -// blunt hammer of checking for cycles. -// -// A reduced example of this dependency problem looks something like: -/* - -createA <- createB - | \ / | - | providerB <- | - v \ v -destroyA -------------> destroyB - -*/ -// -// The edge from destroyA to destroyB would be skipped in this case, but there -// are still other combinations of changes which could connect the A and B -// groups around providerB in various ways. -// -// The most difficult problem here happens during a full destroy operation. -// That creates a special case where resources on which a provider depends must -// exist for evaluation before they are destroyed. This means that any provider -// dependencies must wait until all that provider's resources have first been -// destroyed. This is where these cross-provider edges are still required to -// ensure the correct order. -func (t *DestroyEdgeTransformer) tryInterProviderDestroyEdge(g *Graph, from, to dag.Vertex) { - e := dag.BasicEdge(from, to) - g.Connect(e) - - // If this is a complete destroy operation, then there are no create/update - // nodes to worry about and we can accept the edge without deeper inspection. - if t.Operation == walkDestroy { - return - } - - // getComparableProvider inspects the node to try and get the most precise - // description of the provider being used to help determine if 2 nodes are - // from the same provider instance. - getComparableProvider := func(pc GraphNodeProviderConsumer) string { - ps := pc.Provider().String() - - // we don't care about `exact` here, since we're only looking for any - // clue that the providers may differ. - p, _ := pc.ProvidedBy() - switch p := p.(type) { - case addrs.AbsProviderConfig: - ps = p.String() - case addrs.LocalProviderConfig: - ps = p.String() - } - - return ps - } - - pc, ok := from.(GraphNodeProviderConsumer) - if !ok { - return - } - fromProvider := getComparableProvider(pc) - - pc, ok = to.(GraphNodeProviderConsumer) - if !ok { - return - } - toProvider := getComparableProvider(pc) - - // Check for cycles, and back out the edge if there are any. - // The cycles we are looking for only appears between providers, so don't - // waste time checking for cycles if both nodes use the same provider. - if fromProvider != toProvider && len(g.Cycles()) > 0 { - log.Printf("[DEBUG] DestroyEdgeTransformer: skipping inter-provider edge %s->%s which creates a cycle", - dag.VertexName(from), dag.VertexName(to)) - g.RemoveEdge(e) - } -} - -func (t *DestroyEdgeTransformer) Transform(g *Graph) error { - // Build a map of what is being destroyed (by address string) to - // the list of destroyers. - destroyers := make(map[string][]GraphNodeDestroyer) - - // Record the creators, which will need to depend on the destroyers if they - // are only being updated. - creators := make(map[string][]GraphNodeCreator) - - // destroyersByResource records each destroyer by the ConfigResource - // address. We use this because dependencies are only referenced as - // resources and have no index or module instance information, but we will - // want to connect all the individual instances for correct ordering. - destroyersByResource := make(map[string][]GraphNodeDestroyer) - for _, v := range g.Vertices() { - switch n := v.(type) { - case GraphNodeDestroyer: - addrP := n.DestroyAddr() - if addrP == nil { - log.Printf("[WARN] DestroyEdgeTransformer: %q (%T) has no destroy address", dag.VertexName(n), v) - continue - } - addr := *addrP - - key := addr.String() - log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(n), v, key) - destroyers[key] = append(destroyers[key], n) - - resAddr := addr.ContainingResource().Config().String() - destroyersByResource[resAddr] = append(destroyersByResource[resAddr], n) - case GraphNodeCreator: - addr := n.CreateAddr() - cfgAddr := addr.ContainingResource().Config().String() - - if t.Changes == nil { - // unit tests may not have changes - creators[cfgAddr] = append(creators[cfgAddr], n) - break - } - - // NoOp changes should not participate in the destroy dependencies. - rc := t.Changes.ResourceInstance(*addr) - if rc != nil && rc.Action != plans.NoOp { - creators[cfgAddr] = append(creators[cfgAddr], n) - } - } - } - - // If we aren't destroying anything, there will be no edges to make - // so just exit early and avoid future work. - if len(destroyers) == 0 { - return nil - } - - // Go through and connect creators to destroyers. Going along with - // our example, this makes: A_d => A - for _, v := range g.Vertices() { - cn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - addr := cn.CreateAddr() - if addr == nil { - continue - } - - for _, d := range destroyers[addr.String()] { - // For illustrating our example - a_d := d.(dag.Vertex) - a := v - - log.Printf( - "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", - dag.VertexName(a), dag.VertexName(a_d)) - - g.Connect(dag.BasicEdge(a, a_d)) - } - } - - // connect creators to any destroyers on which they may depend - for _, cs := range creators { - for _, c := range cs { - ri, ok := c.(GraphNodeResourceInstance) - if !ok { - continue - } - - for _, resAddr := range ri.StateDependencies() { - for _, desDep := range destroyersByResource[resAddr.String()] { - if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(c, desDep) { - log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(c), dag.VertexName(desDep)) - g.Connect(dag.BasicEdge(c, desDep)) - } else { - log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(c), dag.VertexName(desDep)) - } - } - } - } - } - - // Connect destroy dependencies as stored in the state - for _, ds := range destroyers { - for _, des := range ds { - ri, ok := des.(GraphNodeResourceInstance) - if !ok { - continue - } - - for _, resAddr := range ri.StateDependencies() { - for _, desDep := range destroyersByResource[resAddr.String()] { - if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(desDep, des) { - log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(desDep), dag.VertexName(des)) - t.tryInterProviderDestroyEdge(g, desDep, des) - } else { - log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(desDep), dag.VertexName(des)) - } - } - - // We can have some create or update nodes which were - // dependents of the destroy node. If they have no destroyer - // themselves, make the connection directly from the creator. - for _, createDep := range creators[resAddr.String()] { - if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(createDep, des) { - log.Printf("[DEBUG] DestroyEdgeTransformer2: %s has stored dependency of %s\n", dag.VertexName(createDep), dag.VertexName(des)) - t.tryInterProviderDestroyEdge(g, createDep, des) - } else { - log.Printf("[TRACE] DestroyEdgeTransformer2: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(createDep), dag.VertexName(des)) - } - } - } - } - } - - return nil -} - -// Remove any nodes that aren't needed when destroying modules. -// Variables, outputs, locals, and expanders may not be able to evaluate -// correctly, so we can remove these if nothing depends on them. The module -// closers also need to disable their use of expansion if the module itself is -// no longer present. -type pruneUnusedNodesTransformer struct { - // The plan graph builder will skip this transformer except during a full - // destroy. Planing normally involves all nodes, but during a destroy plan - // we may need to prune things which are in the configuration but do not - // exist in state to evaluate. - skip bool -} - -func (t *pruneUnusedNodesTransformer) Transform(g *Graph) error { - if t.skip { - return nil - } - - // We need a reverse depth first walk of modules, processing them in order - // from the leaf modules to the root. This allows us to remove unneeded - // dependencies from child modules, freeing up nodes in the parent module - // to also be removed. - - nodes := g.Vertices() - - for removed := true; removed; { - removed = false - - for i := 0; i < len(nodes); i++ { - // run this in a closure, so we can return early rather than - // dealing with complex looping and labels - func() { - n := nodes[i] - switch n := n.(type) { - case graphNodeTemporaryValue: - // root module outputs indicate they are not temporary by - // returning false here. - if !n.temporaryValue() { - return - } - - // temporary values, which consist of variables, locals, - // and outputs, must be kept if anything refers to them. - for _, v := range g.UpEdges(n) { - // keep any value which is connected through a - // reference - if _, ok := v.(GraphNodeReferencer); ok { - return - } - } - - case graphNodeExpandsInstances: - // Any nodes that expand instances are kept when their - // instances may need to be evaluated. - for _, v := range g.UpEdges(n) { - switch v.(type) { - case graphNodeExpandsInstances: - // Root module output values (which the following - // condition matches) are exempt because we know - // there is only ever exactly one instance of the - // root module, and so it's not actually important - // to expand it and so this lets us do a bit more - // pruning than we'd be able to do otherwise. - if tmp, ok := v.(graphNodeTemporaryValue); ok && !tmp.temporaryValue() { - continue - } - - // expanders can always depend on module expansion - // themselves - return - case GraphNodeResourceInstance: - // resource instances always depend on their - // resource node, which is an expander - return - } - } - - case GraphNodeProvider: - // Providers that may have been required by expansion nodes - // that we no longer need can also be removed. - if g.UpEdges(n).Len() > 0 { - return - } - - default: - return - } - - log.Printf("[DEBUG] pruneUnusedNodes: %s is no longer needed, removing", dag.VertexName(n)) - g.Remove(n) - removed = true - - // remove the node from our iteration as well - last := len(nodes) - 1 - nodes[i], nodes[last] = nodes[last], nodes[i] - nodes = nodes[:last] - }() - } - } - - return nil -} diff --git a/internal/terraform/transform_destroy_edge_test.go b/internal/terraform/transform_destroy_edge_test.go deleted file mode 100644 index c82d07e38544..000000000000 --- a/internal/terraform/transform_destroy_edge_test.go +++ /dev/null @@ -1,595 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" -) - -func TestDestroyEdgeTransformer_basic(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(testDestroyNode("test_object.A")) - g.Add(testDestroyNode("test_object.B")) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_string":"x"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { - t.Fatal(err) - } - - tf := &DestroyEdgeTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformDestroyEdgeBasicStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestDestroyEdgeTransformer_multi(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(testDestroyNode("test_object.A")) - g.Add(testDestroyNode("test_object.B")) - g.Add(testDestroyNode("test_object.C")) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_string":"x"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.C").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"C","test_string":"x"}`), - Dependencies: []addrs.ConfigResource{ - mustConfigResourceAddr("test_object.A"), - mustConfigResourceAddr("test_object.B"), - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { - t.Fatal(err) - } - - tf := &DestroyEdgeTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformDestroyEdgeMultiStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestDestroyEdgeTransformer_selfRef(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(testDestroyNode("test_object.A")) - tf := &DestroyEdgeTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformDestroyEdgeSelfRefStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestDestroyEdgeTransformer_module(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(testDestroyNode("module.child.test_object.b")) - g.Add(testDestroyNode("test_object.a")) - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.child.test_object.b")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b","test_string":"x"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { - t.Fatal(err) - } - - tf := &DestroyEdgeTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformDestroyEdgeModuleStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestDestroyEdgeTransformer_moduleOnly(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - - state := states.NewState() - for moduleIdx := 0; moduleIdx < 2; moduleIdx++ { - g.Add(testDestroyNode(fmt.Sprintf("module.child[%d].test_object.a", moduleIdx))) - g.Add(testDestroyNode(fmt.Sprintf("module.child[%d].test_object.b", moduleIdx))) - g.Add(testDestroyNode(fmt.Sprintf("module.child[%d].test_object.c", moduleIdx))) - - child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.IntKey(moduleIdx))) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.a").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"a"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.b").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"b","test_string":"x"}`), - Dependencies: []addrs.ConfigResource{ - mustConfigResourceAddr("module.child.test_object.a"), - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - child.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.c").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"c","test_string":"x"}`), - Dependencies: []addrs.ConfigResource{ - mustConfigResourceAddr("module.child.test_object.a"), - mustConfigResourceAddr("module.child.test_object.b"), - }, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - } - - if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { - t.Fatal(err) - } - - tf := &DestroyEdgeTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - // The analyses done in the destroy edge transformer are between - // not-yet-expanded objects, which is conservative and so it will generate - // edges that aren't strictly necessary. As a special case we filter out - // any edges that are between resources instances that are in different - // instances of the same module, because those edges are never needed - // (one instance of a module cannot depend on another instance of the - // same module) and including them can, in complex cases, cause cycles due - // to unnecessary interactions between destroyed and created module - // instances in the same plan. - // - // Therefore below we expect to see the dependencies within each instance - // of module.child reflected, but we should not see any dependencies - // _between_ instances of module.child. - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(` -module.child[0].test_object.a (destroy) - module.child[0].test_object.b (destroy) - module.child[0].test_object.c (destroy) -module.child[0].test_object.b (destroy) - module.child[0].test_object.c (destroy) -module.child[0].test_object.c (destroy) -module.child[1].test_object.a (destroy) - module.child[1].test_object.b (destroy) - module.child[1].test_object.c (destroy) -module.child[1].test_object.b (destroy) - module.child[1].test_object.c (destroy) -module.child[1].test_object.c (destroy) -`) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestDestroyEdgeTransformer_destroyThenUpdate(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(testUpdateNode("test_object.A")) - g.Add(testDestroyNode("test_object.B")) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A","test_string":"old"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_string":"x"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { - t.Fatal(err) - } - - tf := &DestroyEdgeTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - expected := strings.TrimSpace(` -test_object.A - test_object.B (destroy) -test_object.B (destroy) -`) - actual := strings.TrimSpace(g.String()) - - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestPruneUnusedNodesTransformer_rootModuleOutputValues(t *testing.T) { - // This is a kinda-weird test case covering the very narrow situation - // where a root module output value depends on a resource, where we - // need to make sure that the output value doesn't block pruning of - // the resource from the graph. This special case exists because although - // root module objects are "expanders", they in practice always expand - // to exactly one instance and so don't have the usual requirement of - // needing to stick around in order to support downstream expanders - // when there are e.g. nested expanding modules. - - // In order to keep this test focused on the pruneUnusedNodesTransformer - // as much as possible we're using a minimal graph construction here which - // is just enough to get the nodes we need, but this does mean that this - // test might be invalidated by future changes to the apply graph builder, - // and so if something seems off here it might help to compare the - // following with the real apply graph transformer and verify whether - // this smaller construction is still realistic enough to be a valid test. - // It might be valid to change or remove this test to "make it work", as - // long as you verify that there is still _something_ upholding the - // invariant that a root module output value should not block a resource - // node from being pruned from the graph. - - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &nodeExpandApplyableResource{ - NodeAbstractResource: a, - } - } - - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeApplyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - resourceInstAddr := mustResourceInstanceAddr("test.a") - providerCfgAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: addrs.MustParseProviderSourceString("foo/test"), - } - emptyObjDynamicVal, err := plans.NewDynamicValue(cty.EmptyObjectVal, cty.EmptyObject) - if err != nil { - t.Fatal(err) - } - nullObjDynamicVal, err := plans.NewDynamicValue(cty.NullVal(cty.EmptyObject), cty.EmptyObject) - if err != nil { - t.Fatal(err) - } - - config := testModuleInline(t, map[string]string{ - "main.tf": ` - resource "test" "a" { - } - - output "test" { - value = test.a.foo - } - `, - }) - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - resourceInstAddr, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{}`), - }, - providerCfgAddr, - ) - }) - changes := plans.NewChanges() - changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ - Addr: resourceInstAddr, - PrevRunAddr: resourceInstAddr, - ProviderAddr: providerCfgAddr, - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - Before: emptyObjDynamicVal, - After: nullObjDynamicVal, - }, - }) - - builder := &BasicGraphBuilder{ - Steps: []GraphTransformer{ - &ConfigTransformer{ - Concrete: concreteResource, - Config: config, - }, - &OutputTransformer{ - Config: config, - }, - &DiffTransformer{ - Concrete: concreteResourceInstance, - State: state, - Changes: changes, - }, - &ReferenceTransformer{}, - &AttachDependenciesTransformer{}, - &pruneUnusedNodesTransformer{}, - &CloseRootModuleTransformer{}, - }, - } - graph, diags := builder.Build(addrs.RootModuleInstance) - assertNoDiagnostics(t, diags) - - // At this point, thanks to pruneUnusedNodesTransformer, we should still - // have the node for the output value, but the "test.a (expand)" node - // should've been pruned in recognition of the fact that we're performing - // a destroy and therefore we only need the "test.a (destroy)" node. - - nodesByName := make(map[string]dag.Vertex) - nodesByResourceExpand := make(map[string]dag.Vertex) - for _, n := range graph.Vertices() { - name := dag.VertexName(n) - if _, exists := nodesByName[name]; exists { - t.Fatalf("multiple nodes have name %q", name) - } - nodesByName[name] = n - - if exp, ok := n.(*nodeExpandApplyableResource); ok { - addr := exp.Addr - if _, exists := nodesByResourceExpand[addr.String()]; exists { - t.Fatalf("multiple nodes are expanders for %s", addr) - } - nodesByResourceExpand[addr.String()] = exp - } - } - - // NOTE: The following is sensitive to the current name string formats we - // use for these particular node types. These names are not contractual - // so if this breaks in future it is fine to update these names to the new - // names as long as you verify first that the new names correspond to - // the same meaning as what we're assuming below. - if _, exists := nodesByName["test.a (destroy)"]; !exists { - t.Errorf("missing destroy node for resource instance test.a") - } - if _, exists := nodesByName["output.test (expand)"]; !exists { - t.Errorf("missing expand for output value 'test'") - } - - // We _must not_ have any node that expands a resource. - if len(nodesByResourceExpand) != 0 { - t.Errorf("resource expand nodes remain the graph after transform; should've been pruned\n%s", spew.Sdump(nodesByResourceExpand)) - } -} - -// NoOp changes should not be participating in the destroy sequence -func TestDestroyEdgeTransformer_noOp(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(testDestroyNode("test_object.A")) - g.Add(testUpdateNode("test_object.B")) - g.Add(testDestroyNode("test_object.C")) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.B").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"B","test_string":"x"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.C").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"C","test_string":"x"}`), - Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A"), - mustConfigResourceAddr("test_object.B")}, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { - t.Fatal(err) - } - - tf := &DestroyEdgeTransformer{ - // We only need a minimal object to indicate GraphNodeCreator change is - // a NoOp here. - Changes: &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.B"), - ChangeSrc: plans.ChangeSrc{Action: plans.NoOp}, - }, - }, - }, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - expected := strings.TrimSpace(` -test_object.A (destroy) - test_object.C (destroy) -test_object.B -test_object.C (destroy)`) - - actual := strings.TrimSpace(g.String()) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestDestroyEdgeTransformer_dataDependsOn(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - - addrA := mustResourceInstanceAddr("test_object.A") - instA := NewNodeAbstractResourceInstance(addrA) - a := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: instA} - g.Add(a) - - // B here represents a data sources, which is effectively an update during - // apply, but won't have dependencies stored in the state. - addrB := mustResourceInstanceAddr("test_object.B") - instB := NewNodeAbstractResourceInstance(addrB) - instB.Dependencies = append(instB.Dependencies, addrA.ConfigResource()) - b := &NodeApplyableResourceInstance{NodeAbstractResourceInstance: instB} - - g.Add(b) - - state := states.NewState() - root := state.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("test_object.A").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"A"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), - ) - - if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { - t.Fatal(err) - } - - tf := &DestroyEdgeTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(` -test_object.A (destroy) -test_object.B - test_object.A (destroy) -`) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func testDestroyNode(addrString string) GraphNodeDestroyer { - instAddr := mustResourceInstanceAddr(addrString) - inst := NewNodeAbstractResourceInstance(instAddr) - return &NodeDestroyResourceInstance{NodeAbstractResourceInstance: inst} -} - -func testUpdateNode(addrString string) GraphNodeCreator { - instAddr := mustResourceInstanceAddr(addrString) - inst := NewNodeAbstractResourceInstance(instAddr) - return &NodeApplyableResourceInstance{NodeAbstractResourceInstance: inst} -} - -const testTransformDestroyEdgeBasicStr = ` -test_object.A (destroy) - test_object.B (destroy) -test_object.B (destroy) -` - -const testTransformDestroyEdgeMultiStr = ` -test_object.A (destroy) - test_object.B (destroy) - test_object.C (destroy) -test_object.B (destroy) - test_object.C (destroy) -test_object.C (destroy) -` - -const testTransformDestroyEdgeSelfRefStr = ` -test_object.A (destroy) -` - -const testTransformDestroyEdgeModuleStr = ` -module.child.test_object.b (destroy) - test_object.a (destroy) -test_object.a (destroy) -` diff --git a/internal/terraform/transform_diff.go b/internal/terraform/transform_diff.go deleted file mode 100644 index 3bce1131f5a5..000000000000 --- a/internal/terraform/transform_diff.go +++ /dev/null @@ -1,214 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// DiffTransformer is a GraphTransformer that adds graph nodes representing -// each of the resource changes described in the given Changes object. -type DiffTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - State *states.State - Changes *plans.Changes - Config *configs.Config -} - -// return true if the given resource instance has either Preconditions or -// Postconditions defined in the configuration. -func (t *DiffTransformer) hasConfigConditions(addr addrs.AbsResourceInstance) bool { - // unit tests may have no config - if t.Config == nil { - return false - } - - cfg := t.Config.DescendentForInstance(addr.Module) - if cfg == nil { - return false - } - - res := cfg.Module.ResourceByAddr(addr.ConfigResource().Resource) - if res == nil { - return false - } - - return len(res.Preconditions) > 0 || len(res.Postconditions) > 0 -} - -func (t *DiffTransformer) Transform(g *Graph) error { - if t.Changes == nil || len(t.Changes.Resources) == 0 { - // Nothing to do! - return nil - } - - // Go through all the modules in the diff. - log.Printf("[TRACE] DiffTransformer starting") - - var diags tfdiags.Diagnostics - state := t.State - changes := t.Changes - - // DiffTransformer creates resource _instance_ nodes. If there are any - // whole-resource nodes already in the graph, we must ensure that they - // get evaluated before any of the corresponding instances by creating - // dependency edges, so we'll do some prep work here to ensure we'll only - // create connections to nodes that existed before we started here. - resourceNodes := map[string][]GraphNodeConfigResource{} - for _, node := range g.Vertices() { - rn, ok := node.(GraphNodeConfigResource) - if !ok { - continue - } - // We ignore any instances that _also_ implement - // GraphNodeResourceInstance, since in the unlikely event that they - // do exist we'd probably end up creating cycles by connecting them. - if _, ok := node.(GraphNodeResourceInstance); ok { - continue - } - - addr := rn.ResourceAddr().String() - resourceNodes[addr] = append(resourceNodes[addr], rn) - } - - for _, rc := range changes.Resources { - addr := rc.Addr - dk := rc.DeposedKey - - log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) - - // Depending on the action we'll need some different combinations of - // nodes, because destroying uses a special node type separate from - // other actions. - var update, delete, createBeforeDestroy bool - switch rc.Action { - case plans.NoOp: - // For a no-op change we don't take any action but we still - // run any condition checks associated with the object, to - // make sure that they still hold when considering the - // results of other changes. - update = t.hasConfigConditions(addr) - case plans.Delete: - delete = true - case plans.DeleteThenCreate, plans.CreateThenDelete: - update = true - delete = true - createBeforeDestroy = (rc.Action == plans.CreateThenDelete) - default: - update = true - } - - // A deposed instance may only have a change of Delete or NoOp. A NoOp - // can happen if the provider shows it no longer exists during the most - // recent ReadResource operation. - if dk != states.NotDeposed && !(rc.Action == plans.Delete || rc.Action == plans.NoOp) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change for deposed object", - fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), - )) - continue - } - - // If we're going to do a create_before_destroy Replace operation then - // we need to allocate a DeposedKey to use to retain the - // not-yet-destroyed prior object, so that the delete node can destroy - // _that_ rather than the newly-created node, which will be current - // by the time the delete node is visited. - if update && delete && createBeforeDestroy { - // In this case, variable dk will be the _pre-assigned_ DeposedKey - // that must be used if the update graph node deposes the current - // instance, which will then align with the same key we pass - // into the destroy node to ensure we destroy exactly the deposed - // object we expect. - if state != nil { - ris := state.ResourceInstance(addr) - if ris == nil { - // Should never happen, since we don't plan to replace an - // instance that doesn't exist yet. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change", - fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), - )) - continue - } - - // Allocating a deposed key separately from using it can be racy - // in general, but we assume here that nothing except the apply - // node we instantiate below will actually make new deposed objects - // in practice, and so the set of already-used keys will not change - // between now and then. - dk = ris.FindUnusedDeposedKey() - } else { - // If we have no state at all yet then we can use _any_ - // DeposedKey. - dk = states.NewDeposedKey() - } - } - - if update { - // All actions except destroying the node type chosen by t.Concrete - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - if createBeforeDestroy { - // We'll attach our pre-allocated DeposedKey to the node if - // it supports that. NodeApplyableResourceInstance is the - // specific concrete node type we are looking for here really, - // since that's the only node type that might depose objects. - if dn, ok := node.(GraphNodeDeposer); ok { - dn.SetPreallocatedDeposedKey(dk) - } - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) - } else { - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) - } - - g.Add(node) - rsrcAddr := addr.ContainingResource().String() - for _, rsrcNode := range resourceNodes[rsrcAddr] { - g.Connect(dag.BasicEdge(node, rsrcNode)) - } - } - - if delete { - // Destroying always uses a destroy-specific node type, though - // which one depends on whether we're destroying a current object - // or a deposed object. - var node GraphNodeResourceInstance - abstract := NewNodeAbstractResourceInstance(addr) - if dk == states.NotDeposed { - node = &NodeDestroyResourceInstance{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - } else { - node = &NodeDestroyDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - } - if dk == states.NotDeposed { - log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) - } else { - log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) - } - g.Add(node) - } - - } - - log.Printf("[TRACE] DiffTransformer complete") - - return diags.Err() -} diff --git a/internal/terraform/transform_diff_test.go b/internal/terraform/transform_diff_test.go deleted file mode 100644 index fa1feae34d61..000000000000 --- a/internal/terraform/transform_diff_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plans" -) - -func TestDiffTransformer_nilDiff(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - tf := &DiffTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - if len(g.Vertices()) > 0 { - t.Fatal("graph should be empty") - } -} - -func TestDiffTransformer(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - - beforeVal, err := plans.NewDynamicValue(cty.StringVal(""), cty.String) - if err != nil { - t.Fatal(err) - } - afterVal, err := plans.NewDynamicValue(cty.StringVal(""), cty.String) - if err != nil { - t.Fatal(err) - } - - tf := &DiffTransformer{ - Changes: &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - Action: plans.Update, - Before: beforeVal, - After: afterVal, - }, - }, - }, - }, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformDiffBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestDiffTransformer_noOpChange(t *testing.T) { - // "No-op" changes are how we record explicitly in a plan that we did - // indeed visit a particular resource instance during the planning phase - // and concluded that no changes were needed, as opposed to the resource - // instance not existing at all or having been excluded from planning - // entirely. - // - // We must include nodes for resource instances with no-op changes in the - // apply graph, even though they won't take any external actions, because - // there are some secondary effects such as precondition/postcondition - // checks that can refer to objects elsewhere and so might have their - // results changed even if the resource instance they are attached to - // didn't actually change directly itself. - - // aws_instance.foo has a precondition, so should be included in the final - // graph. aws_instance.bar has no conditions, so there is nothing to - // execute during apply and it should not be included in the graph. - m := testModuleInline(t, map[string]string{ - "main.tf": ` -resource "aws_instance" "bar" { -} - -resource "aws_instance" "foo" { - test_string = "ok" - - lifecycle { - precondition { - condition = self.test_string != "" - error_message = "resource error" - } - } -} -`}) - - g := Graph{Path: addrs.RootModuleInstance} - - beforeVal, err := plans.NewDynamicValue(cty.StringVal(""), cty.String) - if err != nil { - t.Fatal(err) - } - - tf := &DiffTransformer{ - Config: m, - Changes: &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - // A "no-op" change has the no-op action and has the - // same object as both Before and After. - Action: plans.NoOp, - Before: beforeVal, - After: beforeVal, - }, - }, - { - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "bar", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ProviderAddr: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ChangeSrc: plans.ChangeSrc{ - // A "no-op" change has the no-op action and has the - // same object as both Before and After. - Action: plans.NoOp, - Before: beforeVal, - After: beforeVal, - }, - }, - }, - }, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformDiffBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -const testTransformDiffBasicStr = ` -aws_instance.foo -` diff --git a/internal/terraform/transform_expand.go b/internal/terraform/transform_expand.go deleted file mode 100644 index 6d9b92aeeedc..000000000000 --- a/internal/terraform/transform_expand.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -// GraphNodeDynamicExpandable is an interface that nodes can implement -// to signal that they can be expanded at eval-time (hence dynamic). -// These nodes are given the eval context and are expected to return -// a new subgraph. -type GraphNodeDynamicExpandable interface { - // DynamicExpand returns a new graph which will be treated as the dynamic - // subgraph of the receiving node. - // - // The second return value is of type error for historical reasons; - // it's valid (and most ideal) for DynamicExpand to return the result - // of calling ErrWithWarnings on a tfdiags.Diagnostics value instead, - // in which case the caller will unwrap it and gather the individual - // diagnostics. - DynamicExpand(EvalContext) (*Graph, error) -} diff --git a/internal/terraform/transform_import_state_test.go b/internal/terraform/transform_import_state_test.go deleted file mode 100644 index 919f09d84b8a..000000000000 --- a/internal/terraform/transform_import_state_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/zclconf/go-cty/cty" -) - -func TestGraphNodeImportStateExecute(t *testing.T) { - state := states.NewState() - provider := testProvider("aws") - provider.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ - ImportedResources: []providers.ImportedResource{ - { - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - }), - }, - }, - } - provider.ConfigureProvider(providers.ConfigureProviderRequest{}) - - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - ProviderProvider: provider, - } - - // Import a new aws_instance.foo, this time with ID=bar. The original - // aws_instance.foo object should be removed from state and replaced with - // the new. - node := graphNodeImportState{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ID: "bar", - ResolvedProvider: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - } - - diags := node.Execute(ctx, walkImport) - if diags.HasErrors() { - t.Fatalf("Unexpected error: %s", diags.Err()) - } - - if len(node.states) != 1 { - t.Fatalf("Wrong result! Expected one imported resource, got %d", len(node.states)) - } - // Verify the ID for good measure - id := node.states[0].State.GetAttr("id") - if !id.RawEquals(cty.StringVal("bar")) { - t.Fatalf("Wrong result! Expected id \"bar\", got %q", id.AsString()) - } -} - -func TestGraphNodeImportStateSubExecute(t *testing.T) { - state := states.NewState() - provider := testProvider("aws") - provider.ConfigureProvider(providers.ConfigureProviderRequest{}) - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - ProviderProvider: provider, - ProviderSchemaSchema: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }, - } - - importedResource := providers.ImportedResource{ - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("bar")}), - } - - node := graphNodeImportStateSub{ - TargetAddr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - State: importedResource, - ResolvedProvider: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - } - diags := node.Execute(ctx, walkImport) - if diags.HasErrors() { - t.Fatalf("Unexpected error: %s", diags.Err()) - } - - // check for resource in state - actual := strings.TrimSpace(state.String()) - expected := `aws_instance.foo: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"]` - if actual != expected { - t.Fatalf("bad state after import: \n%s", actual) - } -} - -func TestGraphNodeImportStateSubExecuteNull(t *testing.T) { - state := states.NewState() - provider := testProvider("aws") - provider.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - // return null indicating that the requested resource does not exist - resp.NewState = cty.NullVal(cty.Object(map[string]cty.Type{ - "id": cty.String, - })) - return resp - } - - ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - ProviderProvider: provider, - ProviderSchemaSchema: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }, - } - - importedResource := providers.ImportedResource{ - TypeName: "aws_instance", - State: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("bar")}), - } - - node := graphNodeImportStateSub{ - TargetAddr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - State: importedResource, - ResolvedProvider: addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - } - diags := node.Execute(ctx, walkImport) - if !diags.HasErrors() { - t.Fatal("expected error for non-existent resource") - } -} diff --git a/internal/terraform/transform_local.go b/internal/terraform/transform_local.go deleted file mode 100644 index 667d6f917e00..000000000000 --- a/internal/terraform/transform_local.go +++ /dev/null @@ -1,42 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" -) - -// LocalTransformer is a GraphTransformer that adds all the local values -// from the configuration to the graph. -type LocalTransformer struct { - Config *configs.Config -} - -func (t *LocalTransformer) Transform(g *Graph) error { - return t.transformModule(g, t.Config) -} - -func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { - if c == nil { - // Can't have any locals if there's no config - return nil - } - - for _, local := range c.Module.Locals { - addr := addrs.LocalValue{Name: local.Name} - node := &nodeExpandLocal{ - Addr: addr, - Module: c.Path, - Config: local, - } - g.Add(node) - } - - // Also populate locals for child modules - for _, cc := range c.Children { - if err := t.transformModule(g, cc); err != nil { - return err - } - } - - return nil -} diff --git a/internal/terraform/transform_module_expansion.go b/internal/terraform/transform_module_expansion.go deleted file mode 100644 index a4d45d963cd7..000000000000 --- a/internal/terraform/transform_module_expansion.go +++ /dev/null @@ -1,146 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" -) - -// ModuleExpansionTransformer is a GraphTransformer that adds graph nodes -// representing the possible expansion of each module call in the configuration, -// and ensures that any nodes representing objects declared within a module -// are dependent on the expansion node so that they will be visited only -// after the module expansion has been decided. -// -// This transform must be applied only after all nodes representing objects -// that can be contained within modules have already been added. -type ModuleExpansionTransformer struct { - Config *configs.Config - - // Concrete allows injection of a wrapped module node by the graph builder - // to alter the evaluation behavior. - Concrete ConcreteModuleNodeFunc - - closers map[string]*nodeCloseModule -} - -func (t *ModuleExpansionTransformer) Transform(g *Graph) error { - t.closers = make(map[string]*nodeCloseModule) - // The root module is always a singleton and so does not need expansion - // processing, but any descendent modules do. We'll process them - // recursively using t.transform. - for _, cfg := range t.Config.Children { - err := t.transform(g, cfg, nil) - if err != nil { - return err - } - } - - // Now go through and connect all nodes to their respective module closers. - // This is done all at once here, because orphaned modules were already - // handled by the RemovedModuleTransformer, and those module closers are in - // the graph already, and need to be connected to their parent closers. - for _, v := range g.Vertices() { - switch v.(type) { - case GraphNodeDestroyer: - // Destroy nodes can only be ordered relative to other resource - // instances. - continue - case *nodeCloseModule: - // a module closer cannot connect to itself - continue - } - - // any node that executes within the scope of a module should be a - // GraphNodeModulePath - pather, ok := v.(GraphNodeModulePath) - if !ok { - continue - } - if closer, ok := t.closers[pather.ModulePath().String()]; ok { - // The module closer depends on each child resource instance, since - // during apply the module expansion will complete before the - // individual instances are applied. - g.Connect(dag.BasicEdge(closer, v)) - } - } - - // Modules implicitly depend on their child modules, so connect closers to - // other which contain their path. - for _, c := range t.closers { - for _, d := range t.closers { - if len(d.Addr) > len(c.Addr) && c.Addr.Equal(d.Addr[:len(c.Addr)]) { - g.Connect(dag.BasicEdge(c, d)) - } - } - } - - return nil -} - -func (t *ModuleExpansionTransformer) transform(g *Graph, c *configs.Config, parentNode dag.Vertex) error { - _, call := c.Path.Call() - modCall := c.Parent.Module.ModuleCalls[call.Name] - - n := &nodeExpandModule{ - Addr: c.Path, - Config: c.Module, - ModuleCall: modCall, - } - var expander dag.Vertex = n - if t.Concrete != nil { - expander = t.Concrete(n) - } - - g.Add(expander) - log.Printf("[TRACE] ModuleExpansionTransformer: Added %s as %T", c.Path, expander) - - if parentNode != nil { - log.Printf("[TRACE] ModuleExpansionTransformer: %s must wait for expansion of %s", dag.VertexName(expander), dag.VertexName(parentNode)) - g.Connect(dag.BasicEdge(expander, parentNode)) - } - - // Add the closer (which acts as the root module node) to provide a - // single exit point for the expanded module. - closer := &nodeCloseModule{ - Addr: c.Path, - } - g.Add(closer) - g.Connect(dag.BasicEdge(closer, expander)) - t.closers[c.Path.String()] = closer - - for _, childV := range g.Vertices() { - // don't connect a node to itself - if childV == expander { - continue - } - - var path addrs.Module - switch t := childV.(type) { - case GraphNodeDestroyer: - // skip destroyers, as they can only depend on other resources. - continue - - case GraphNodeModulePath: - path = t.ModulePath() - default: - continue - } - - if path.Equal(c.Path) { - log.Printf("[TRACE] ModuleExpansionTransformer: %s must wait for expansion of %s", dag.VertexName(childV), c.Path) - g.Connect(dag.BasicEdge(childV, expander)) - } - } - - // Also visit child modules, recursively. - for _, cc := range c.Children { - if err := t.transform(g, cc, expander); err != nil { - return err - } - } - - return nil -} diff --git a/internal/terraform/transform_module_variable.go b/internal/terraform/transform_module_variable.go deleted file mode 100644 index a9fa02c4e10d..000000000000 --- a/internal/terraform/transform_module_variable.go +++ /dev/null @@ -1,112 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/configs" -) - -// ModuleVariableTransformer is a GraphTransformer that adds all the variables -// in the configuration to the graph. -// -// Any "variable" block present in any non-root module is included here, even -// if a particular variable is not referenced from anywhere. -// -// The transform will produce errors if a call to a module does not conform -// to the expected set of arguments, but this transformer is not in a good -// position to return errors and so the validate walk should include specific -// steps for validating module blocks, separate from this transform. -type ModuleVariableTransformer struct { - Config *configs.Config -} - -func (t *ModuleVariableTransformer) Transform(g *Graph) error { - return t.transform(g, nil, t.Config) -} - -func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { - // We can have no variables if we have no configuration. - if c == nil { - return nil - } - - // Transform all the children first. - for _, cc := range c.Children { - if err := t.transform(g, c, cc); err != nil { - return err - } - } - - // If we're processing anything other than the root module then we'll - // add graph nodes for variables defined inside. (Variables for the root - // module are dealt with in RootVariableTransformer). - // If we have a parent, we can determine if a module variable is being - // used, so we transform this. - if parent != nil { - if err := t.transformSingle(g, parent, c); err != nil { - return err - } - } - - return nil -} - -func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { - _, call := c.Path.Call() - - // Find the call in the parent module configuration, so we can get the - // expressions given for each input variable at the call site. - callConfig, exists := parent.Module.ModuleCalls[call.Name] - if !exists { - // This should never happen, since it indicates an improperly-constructed - // configuration tree. - panic(fmt.Errorf("no module call block found for %s", c.Path)) - } - - // We need to construct a schema for the expected call arguments based on - // the configured variables in our config, which we can then use to - // decode the content of the call block. - schema := &hcl.BodySchema{} - for _, v := range c.Module.Variables { - schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ - Name: v.Name, - Required: v.Default == cty.NilVal, - }) - } - - content, contentDiags := callConfig.Config.Content(schema) - if contentDiags.HasErrors() { - // Validation code elsewhere should deal with any errors before we - // get in here, but we'll report them out here just in case, to - // avoid crashes. - var diags tfdiags.Diagnostics - diags = diags.Append(contentDiags) - return diags.Err() - } - - for _, v := range c.Module.Variables { - var expr hcl.Expression - if attr := content.Attributes[v.Name]; attr != nil { - expr = attr.Expr - } - - // Add a plannable node, as the variable may expand - // during module expansion - node := &nodeExpandModuleVariable{ - Addr: addrs.InputVariable{ - Name: v.Name, - }, - Module: c.Path, - Config: v, - Expr: expr, - } - g.Add(node) - } - - return nil -} diff --git a/internal/terraform/transform_module_variable_test.go b/internal/terraform/transform_module_variable_test.go deleted file mode 100644 index 363d141ae7cc..000000000000 --- a/internal/terraform/transform_module_variable_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestModuleVariableTransformer(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - module := testModule(t, "transform-module-var-basic") - - { - tf := &RootVariableTransformer{Config: module} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - tf := &ModuleVariableTransformer{Config: module} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformModuleVarBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestModuleVariableTransformer_nested(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - module := testModule(t, "transform-module-var-nested") - - { - tf := &RootVariableTransformer{Config: module} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - tf := &ModuleVariableTransformer{Config: module} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformModuleVarNestedStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -const testTransformModuleVarBasicStr = ` -module.child.var.value (expand) -` - -const testTransformModuleVarNestedStr = ` -module.child.module.child.var.value (expand) -module.child.var.value (expand) -` diff --git a/internal/terraform/transform_orphan_count.go b/internal/terraform/transform_orphan_count.go deleted file mode 100644 index bdeebf6d18a3..000000000000 --- a/internal/terraform/transform_orphan_count.go +++ /dev/null @@ -1,61 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/states" -) - -// OrphanResourceInstanceCountTransformer is a GraphTransformer that adds orphans -// for an expanded count to the graph. The determination of this depends -// on the count argument given. -// -// Orphans are found by comparing the count to what is found in the state. -// This transform assumes that if an element in the state is within the count -// bounds given, that it is not an orphan. -type OrphanResourceInstanceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - Addr addrs.AbsResource // Addr of the resource to look for orphans - InstanceAddrs []addrs.AbsResourceInstance // Addresses that currently exist in config - State *states.State // Full global state -} - -func (t *OrphanResourceInstanceCountTransformer) Transform(g *Graph) error { - rs := t.State.Resource(t.Addr) - if rs == nil { - return nil // Resource doesn't exist in state, so nothing to do! - } - - // This is an O(n*m) analysis, which we accept for now because the - // number of instances of a single resource ought to always be small in any - // reasonable Terraform configuration. -Have: - for key, inst := range rs.Instances { - // Instances which have no current objects (only one or more - // deposed objects) will be taken care of separately - if inst.Current == nil { - continue - } - - thisAddr := rs.Addr.Instance(key) - for _, wantAddr := range t.InstanceAddrs { - if wantAddr.Equal(thisAddr) { - continue Have - } - } - // If thisAddr is not in t.InstanceAddrs then we've found an "orphan" - - abstract := NewNodeAbstractResourceInstance(thisAddr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceInstanceCountTransformer: adding %s as %T", thisAddr, node) - g.Add(node) - } - - return nil -} diff --git a/internal/terraform/transform_orphan_count_test.go b/internal/terraform/transform_orphan_count_test.go deleted file mode 100644 index 024189b9a40d..000000000000 --- a/internal/terraform/transform_orphan_count_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" -) - -func TestOrphanResourceCountTransformer(t *testing.T) { - state := states.NewState() - root := state.RootModule() - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.web").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[2]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - g := Graph{Path: addrs.RootModuleInstance} - - { - tf := &OrphanResourceInstanceCountTransformer{ - Concrete: testOrphanResourceConcreteFunc, - Addr: addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - InstanceAddrs: []addrs.AbsResourceInstance{mustResourceInstanceAddr("aws_instance.foo[0]")}, - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestOrphanResourceCountTransformer_zero(t *testing.T) { - state := states.NewState() - root := state.RootModule() - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.web").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[2]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - g := Graph{Path: addrs.RootModuleInstance} - - { - tf := &OrphanResourceInstanceCountTransformer{ - Concrete: testOrphanResourceConcreteFunc, - Addr: addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - InstanceAddrs: []addrs.AbsResourceInstance{}, - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountZeroStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestOrphanResourceCountTransformer_oneIndex(t *testing.T) { - state := states.NewState() - root := state.RootModule() - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.web").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - g := Graph{Path: addrs.RootModuleInstance} - - { - tf := &OrphanResourceInstanceCountTransformer{ - Concrete: testOrphanResourceConcreteFunc, - Addr: addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - InstanceAddrs: []addrs.AbsResourceInstance{mustResourceInstanceAddr("aws_instance.foo[0]")}, - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountOneIndexStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestOrphanResourceCountTransformer_deposed(t *testing.T) { - state := states.NewState() - root := state.RootModule() - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.web").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[0]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceCurrent( - mustResourceInstanceAddr("aws_instance.foo[1]").Resource, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - root.SetResourceInstanceDeposed( - mustResourceInstanceAddr("aws_instance.foo[2]").Resource, - states.NewDeposedKey(), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"foo"}`), - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - g := Graph{Path: addrs.RootModuleInstance} - - { - tf := &OrphanResourceInstanceCountTransformer{ - Concrete: testOrphanResourceConcreteFunc, - Addr: addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - InstanceAddrs: []addrs.AbsResourceInstance{mustResourceInstanceAddr("aws_instance.foo[0]")}, - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountDeposedStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -// When converting from a NoEach mode to an EachMap via a switch to for_each, -// an edge is necessary to ensure that the map-key'd instances -// are evaluated after the NoKey resource, because the final instance evaluated -// sets the whole resource's EachMode. -func TestOrphanResourceCountTransformer_ForEachEdgesAdded(t *testing.T) { - state := states.BuildState(func(s *states.SyncState) { - // "bar" key'd resource - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.StringKey("bar")).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - - // NoKey'd resource - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), - ) - }) - - g := Graph{Path: addrs.RootModuleInstance} - - { - tf := &OrphanResourceInstanceCountTransformer{ - Concrete: testOrphanResourceConcreteFunc, - Addr: addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - InstanceAddrs: []addrs.AbsResourceInstance{}, - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceForEachStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -const testTransformOrphanResourceCountBasicStr = ` -aws_instance.foo[2] (orphan) -` - -const testTransformOrphanResourceCountZeroStr = ` -aws_instance.foo[0] (orphan) -aws_instance.foo[2] (orphan) -` - -const testTransformOrphanResourceCountOneIndexStr = ` -aws_instance.foo[1] (orphan) -` - -const testTransformOrphanResourceCountDeposedStr = ` -aws_instance.foo[1] (orphan) -` - -const testTransformOrphanResourceForEachStr = ` -aws_instance.foo (orphan) -aws_instance.foo["bar"] (orphan) -` diff --git a/internal/terraform/transform_orphan_output.go b/internal/terraform/transform_orphan_output.go deleted file mode 100644 index fe3e9ce4192d..000000000000 --- a/internal/terraform/transform_orphan_output.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states" -) - -// OrphanOutputTransformer finds the outputs that aren't present -// in the given config that are in the state and adds them to the graph -// for deletion. -type OrphanOutputTransformer struct { - Config *configs.Config // Root of config tree - State *states.State // State is the root state - Planning bool -} - -func (t *OrphanOutputTransformer) Transform(g *Graph) error { - if t.State == nil { - log.Printf("[DEBUG] No state, no orphan outputs") - return nil - } - - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - return nil -} - -func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the config for this path, which is nil if the entire module has been - // removed. - var outputs map[string]*configs.Output - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - outputs = c.Module.Outputs - } - - // An output is "orphaned" if it's present in the state but not declared - // in the configuration. - for name := range ms.OutputValues { - if _, exists := outputs[name]; exists { - continue - } - - g.Add(&NodeDestroyableOutput{ - Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), - Planning: t.Planning, - }) - } - - return nil -} diff --git a/internal/terraform/transform_orphan_resource.go b/internal/terraform/transform_orphan_resource.go deleted file mode 100644 index 348ae905205d..000000000000 --- a/internal/terraform/transform_orphan_resource.go +++ /dev/null @@ -1,108 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/states" -) - -// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned -// resource instances to the graph. An "orphan" is an instance that is present -// in the state but belongs to a resource that is no longer present in the -// configuration. -// -// This is not the transformer that deals with "count orphans" (instances that -// are no longer covered by a resource's "count" or "for_each" setting); that's -// handled instead by OrphanResourceCountTransformer. -type OrphanResourceInstanceTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - // State is the global state. We require the global state to - // properly find module orphans at our path. - State *states.State - - // Config is the root node in the configuration tree. We'll look up - // the appropriate note in this tree using the path in each node. - Config *configs.Config - - // Do not apply this transformer - skip bool -} - -func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { - if t.skip { - return nil - } - - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - if t.Config == nil { - // Should never happen: we can't be doing any Terraform operations - // without at least an empty configuration. - panic("OrphanResourceInstanceTransformer used without setting Config") - } - - // Go through the modules and for each module transform in order - // to add the orphan. - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - - return nil -} - -func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the configuration for this module. The configuration might be - // nil if the module was removed from the configuration. This is okay, - // this just means that every resource is an orphan. - var m *configs.Module - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - m = c.Module - } - - // An "orphan" is a resource that is in the state but not the configuration, - // so we'll walk the state resources and try to correlate each of them - // with a configuration block. Each orphan gets a node in the graph whose - // type is decided by t.Concrete. - // - // We don't handle orphans related to changes in the "count" and "for_each" - // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. - for _, rs := range ms.Resources { - if m != nil { - if r := m.ResourceByAddr(rs.Addr.Resource); r != nil { - continue - } - } - - for key, inst := range rs.Instances { - // Instances which have no current objects (only one or more - // deposed objects) will be taken care of separately - if inst.Current == nil { - continue - } - - addr := rs.Addr.Instance(key) - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) - g.Add(node) - } - } - - return nil -} diff --git a/internal/terraform/transform_orphan_resource_test.go b/internal/terraform/transform_orphan_resource_test.go deleted file mode 100644 index f44f081525f0..000000000000 --- a/internal/terraform/transform_orphan_resource_test.go +++ /dev/null @@ -1,326 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/states" -) - -func TestOrphanResourceInstanceTransformer(t *testing.T) { - mod := testModule(t, "transform-orphan-basic") - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "web", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - // The orphan - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "db", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - - // A deposed orphan should not be handled by this transformer - s.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_instance", - Name: "deposed", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - states.NewDeposedKey(), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - }) - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - tf := &OrphanResourceInstanceTransformer{ - Concrete: testOrphanResourceConcreteFunc, - State: state, - Config: mod, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestOrphanResourceInstanceTransformer_countGood(t *testing.T) { - mod := testModule(t, "transform-orphan-count") - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - }) - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - tf := &OrphanResourceInstanceTransformer{ - Concrete: testOrphanResourceConcreteFunc, - State: state, - Config: mod, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestOrphanResourceInstanceTransformer_countBad(t *testing.T) { - mod := testModule(t, "transform-orphan-count-empty") - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - }) - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - tf := &OrphanResourceInstanceTransformer{ - Concrete: testOrphanResourceConcreteFunc, - State: state, - Config: mod, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountBadStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestOrphanResourceInstanceTransformer_modules(t *testing.T) { - mod := testModule(t, "transform-orphan-modules") - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "web", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - }) - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - tf := &OrphanResourceInstanceTransformer{ - Concrete: testOrphanResourceConcreteFunc, - State: state, - Config: mod, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - got := strings.TrimSpace(g.String()) - want := strings.TrimSpace(testTransformOrphanResourceModulesStr) - if got != want { - t.Fatalf("wrong state result\ngot:\n%s\n\nwant:\n%s", got, want) - } -} - -const testTransformOrphanResourceBasicStr = ` -aws_instance.db (orphan) -aws_instance.web -` - -const testTransformOrphanResourceCountStr = ` -aws_instance.foo -` - -const testTransformOrphanResourceCountBadStr = ` -aws_instance.foo[0] (orphan) -aws_instance.foo[1] (orphan) -` - -const testTransformOrphanResourceModulesStr = ` -aws_instance.foo -module.child.aws_instance.web (orphan) -` - -func testOrphanResourceConcreteFunc(a *NodeAbstractResourceInstance) dag.Vertex { - return &testOrphanResourceInstanceConcrete{a} -} - -type testOrphanResourceInstanceConcrete struct { - *NodeAbstractResourceInstance -} - -func (n *testOrphanResourceInstanceConcrete) Name() string { - return fmt.Sprintf("%s (orphan)", n.NodeAbstractResourceInstance.Name()) -} diff --git a/internal/terraform/transform_output.go b/internal/terraform/transform_output.go deleted file mode 100644 index 214bc3ff10ec..000000000000 --- a/internal/terraform/transform_output.go +++ /dev/null @@ -1,73 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" -) - -// OutputTransformer is a GraphTransformer that adds all the outputs -// in the configuration to the graph. -// -// This is done for the apply graph builder even if dependent nodes -// aren't changing since there is no downside: the state will be available -// even if the dependent items aren't changing. -type OutputTransformer struct { - Config *configs.Config - - // Refresh-only mode means that any failing output preconditions are - // reported as warnings rather than errors - RefreshOnly bool - - // Planning must be set to true only when we're building a planning graph. - // It must be set to false whenever we're building an apply graph. - Planning bool - - // If this is a planned destroy, root outputs are still in the configuration - // so we need to record that we wish to remove them - PlanDestroy bool - - // ApplyDestroy indicates that this is being added to an apply graph, which - // is the result of a destroy plan. - ApplyDestroy bool -} - -func (t *OutputTransformer) Transform(g *Graph) error { - return t.transform(g, t.Config) -} - -func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { - // If we have no config then there can be no outputs. - if c == nil { - return nil - } - - // Transform all the children. We must do this first because - // we can reference module outputs and they must show up in the - // reference map. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - - for _, o := range c.Module.Outputs { - addr := addrs.OutputValue{Name: o.Name} - - node := &nodeExpandOutput{ - Addr: addr, - Module: c.Path, - Config: o, - PlanDestroy: t.PlanDestroy, - ApplyDestroy: t.ApplyDestroy, - RefreshOnly: t.RefreshOnly, - Planning: t.Planning, - } - - log.Printf("[TRACE] OutputTransformer: adding %s as %T", o.Name, node) - g.Add(node) - } - - return nil -} diff --git a/internal/terraform/transform_provider.go b/internal/terraform/transform_provider.go deleted file mode 100644 index 2e1f8bd5f3f8..000000000000 --- a/internal/terraform/transform_provider.go +++ /dev/null @@ -1,730 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -func transformProviders(concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { - return GraphTransformMulti( - // Add providers from the config - &ProviderConfigTransformer{ - Config: config, - Concrete: concrete, - }, - // Add any remaining missing providers - &MissingProviderTransformer{ - Config: config, - Concrete: concrete, - }, - // Connect the providers - &ProviderTransformer{ - Config: config, - }, - // Remove unused providers and proxies - &PruneProviderTransformer{}, - ) -} - -// GraphNodeProvider is an interface that nodes that can be a provider -// must implement. -// -// ProviderAddr returns the address of the provider configuration this -// satisfies, which is relative to the path returned by method Path(). -// -// Name returns the full name of the provider in the config. -type GraphNodeProvider interface { - GraphNodeModulePath - ProviderAddr() addrs.AbsProviderConfig - Name() string -} - -// GraphNodeCloseProvider is an interface that nodes that can be a close -// provider must implement. The CloseProviderName returned is the name of -// the provider they satisfy. -type GraphNodeCloseProvider interface { - GraphNodeModulePath - CloseProviderAddr() addrs.AbsProviderConfig -} - -// GraphNodeProviderConsumer is an interface that nodes that require -// a provider must implement. ProvidedBy must return the address of the provider -// to use, which will be resolved to a configuration either in the same module -// or in an ancestor module, with the resulting absolute address passed to -// SetProvider. -type GraphNodeProviderConsumer interface { - GraphNodeModulePath - // ProvidedBy returns the address of the provider configuration the node - // refers to, if available. The following value types may be returned: - // - // nil + exact true: the node does not require a provider - // * addrs.LocalProviderConfig: the provider was set in the resource config - // * addrs.AbsProviderConfig + exact true: the provider configuration was - // taken from the instance state. - // * addrs.AbsProviderConfig + exact false: no config or state; the returned - // value is a default provider configuration address for the resource's - // Provider - ProvidedBy() (addr addrs.ProviderConfig, exact bool) - - // Provider() returns the Provider FQN for the node. - Provider() (provider addrs.Provider) - - // Set the resolved provider address for this resource. - SetProvider(addrs.AbsProviderConfig) -} - -// ProviderTransformer is a GraphTransformer that maps resources to providers -// within the graph. This will error if there are any resources that don't map -// to proper resources. -type ProviderTransformer struct { - Config *configs.Config -} - -func (t *ProviderTransformer) Transform(g *Graph) error { - // We need to find a provider configuration address for each resource - // either directly represented by a node or referenced by a node in - // the graph, and then create graph edges from provider to provider user - // so that the providers will get initialized first. - - var diags tfdiags.Diagnostics - - // To start, we'll collect the _requested_ provider addresses for each - // node, which we'll then resolve (handling provider inheritence, etc) in - // the next step. - // Our "requested" map is from graph vertices to string representations of - // provider config addresses (for deduping) to requests. - type ProviderRequest struct { - Addr addrs.AbsProviderConfig - Exact bool // If true, inheritence from parent modules is not attempted - } - requested := map[dag.Vertex]map[string]ProviderRequest{} - needConfigured := map[string]addrs.AbsProviderConfig{} - for _, v := range g.Vertices() { - // Does the vertex _directly_ use a provider? - if pv, ok := v.(GraphNodeProviderConsumer); ok { - providerAddr, exact := pv.ProvidedBy() - if providerAddr == nil && exact { - // no provider is required - continue - } - - requested[v] = make(map[string]ProviderRequest) - - var absPc addrs.AbsProviderConfig - - switch p := providerAddr.(type) { - case addrs.AbsProviderConfig: - // ProvidedBy() returns an AbsProviderConfig when the provider - // configuration is set in state, so we do not need to verify - // the FQN matches. - absPc = p - - if exact { - log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), absPc) - } - - case addrs.LocalProviderConfig: - // ProvidedBy() return a LocalProviderConfig when the resource - // contains a `provider` attribute - absPc.Provider = pv.Provider() - modPath := pv.ModulePath() - if t.Config == nil { - absPc.Module = modPath - absPc.Alias = p.Alias - break - } - - absPc.Module = modPath - absPc.Alias = p.Alias - - default: - // This should never happen; the case statements are meant to be exhaustive - panic(fmt.Sprintf("%s: provider for %s couldn't be determined", dag.VertexName(v), absPc)) - } - - requested[v][absPc.String()] = ProviderRequest{ - Addr: absPc, - Exact: exact, - } - - // Direct references need the provider configured as well as initialized - needConfigured[absPc.String()] = absPc - } - } - - // Now we'll go through all the requested addresses we just collected and - // figure out which _actual_ config address each belongs to, after resolving - // for provider inheritance and passing. - m := providerVertexMap(g) - for v, reqs := range requested { - for key, req := range reqs { - p := req.Addr - target := m[key] - - _, ok := v.(GraphNodeModulePath) - if !ok && target == nil { - // No target and no path to traverse up from - diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) - continue - } - - if target != nil { - log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) - } - - // if we don't have a provider at this level, walk up the path looking for one, - // unless we were told to be exact. - if target == nil && !req.Exact { - for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { - key := pp.String() - target = m[key] - if target != nil { - log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) - break - } - log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) - } - } - - // If this provider doesn't need to be configured then we can just - // stub it out with an init-only provider node, which will just - // start up the provider and fetch its schema. - if _, exists := needConfigured[key]; target == nil && !exists { - stubAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: p.Provider, - } - stub := &NodeEvalableProvider{ - &NodeAbstractProvider{ - Addr: stubAddr, - }, - } - m[stubAddr.String()] = stub - log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) - target = stub - g.Add(target) - } - - if target == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider configuration not present", - fmt.Sprintf( - "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", - dag.VertexName(v), p, dag.VertexName(v), - ), - )) - break - } - - // see if this is a proxy provider pointing to another concrete config - if p, ok := target.(*graphNodeProxyProvider); ok { - g.Remove(p) - target = p.Target() - } - - log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) - if pv, ok := v.(GraphNodeProviderConsumer); ok { - pv.SetProvider(target.ProviderAddr()) - } - g.Connect(dag.BasicEdge(v, target)) - } - } - - return diags.Err() -} - -// CloseProviderTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provider connections that aren't needed anymore. -// A provider connection is not needed anymore once all depended resources -// in the graph are evaluated. -type CloseProviderTransformer struct{} - -func (t *CloseProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - cpm := make(map[string]*graphNodeCloseProvider) - var err error - - for _, p := range pm { - key := p.ProviderAddr().String() - - // get the close provider of this type if we alread created it - closer := cpm[key] - - if closer == nil { - // create a closer for this provider type - closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} - g.Add(closer) - cpm[key] = closer - } - - // Close node depends on the provider itself - // this is added unconditionally, so it will connect to all instances - // of the provider. Extra edges will be removed by transitive - // reduction. - g.Connect(dag.BasicEdge(closer, p)) - - // connect all the provider's resources to the close node - for _, s := range g.UpEdges(p) { - if _, ok := s.(GraphNodeProviderConsumer); ok { - g.Connect(dag.BasicEdge(closer, s)) - } - } - } - - return err -} - -// MissingProviderTransformer is a GraphTransformer that adds to the graph -// a node for each default provider configuration that is referenced by another -// node but not already present in the graph. -// -// These "default" nodes are always added to the root module, regardless of -// where they are requested. This is important because our inheritance -// resolution behavior in ProviderTransformer will then treat these as a -// last-ditch fallback after walking up the tree, rather than preferring them -// as it would if they were placed in the same module as the requester. -// -// This transformer may create extra nodes that are not needed in practice, -// due to overriding provider configurations in child modules. -// PruneProviderTransformer can then remove these once ProviderTransformer -// has resolved all of the inheritence, etc. -type MissingProviderTransformer struct { - // MissingProviderTransformer needs the config to rule out _implied_ default providers - Config *configs.Config - - // Concrete, if set, overrides how the providers are made. - Concrete ConcreteProviderNodeFunc -} - -func (t *MissingProviderTransformer) Transform(g *Graph) error { - // Initialize factory - if t.Concrete == nil { - t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { - return a - } - } - - var err error - m := providerVertexMap(g) - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProviderConsumer) - if !ok { - continue - } - - // For our work here we actually care only about the provider type and - // we plan to place all default providers in the root module. - providerFqn := pv.Provider() - - // We're going to create an implicit _default_ configuration for the - // referenced provider type in the _root_ module, ignoring all other - // aspects of the resource's declared provider address. - defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(providerFqn) - key := defaultAddr.String() - provider := m[key] - - if provider != nil { - // There's already an explicit default configuration for this - // provider type in the root module, so we have nothing to do. - continue - } - - log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) - - // create the missing top-level provider - provider = t.Concrete(&NodeAbstractProvider{ - Addr: defaultAddr, - }).(GraphNodeProvider) - - g.Add(provider) - m[key] = provider - } - - return err -} - -// PruneProviderTransformer removes any providers that are not actually used by -// anything, and provider proxies. This avoids the provider being initialized -// and configured. This both saves resources but also avoids errors since -// configuration may imply initialization which may require auth. -type PruneProviderTransformer struct{} - -func (t *PruneProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - _, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // ProxyProviders will have up edges, but we're now done with them in the graph - if _, ok := v.(*graphNodeProxyProvider); ok { - log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) - g.Remove(v) - } - - // Remove providers with no dependencies. - if g.UpEdges(v).Len() == 0 { - log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) - g.Remove(v) - } - } - - return nil -} - -func providerVertexMap(g *Graph) map[string]GraphNodeProvider { - m := make(map[string]GraphNodeProvider) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvider); ok { - addr := pv.ProviderAddr() - m[addr.String()] = pv - } - } - - return m -} - -type graphNodeCloseProvider struct { - Addr addrs.AbsProviderConfig -} - -var ( - _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) - _ GraphNodeExecutable = (*graphNodeCloseProvider)(nil) -) - -func (n *graphNodeCloseProvider) Name() string { - return n.Addr.String() + " (close)" -} - -// GraphNodeModulePath -func (n *graphNodeCloseProvider) ModulePath() addrs.Module { - return n.Addr.Module -} - -// GraphNodeExecutable impl. -func (n *graphNodeCloseProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { - return diags.Append(ctx.CloseProvider(n.Addr)) -} - -func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeDotter impl. -func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - if !opts.Verbose { - return nil - } - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} - -// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to -// store the name and value of a provider node for inheritance between modules. -// These nodes are only used to store the data while loading the provider -// configurations, and are removed after all the resources have been connected -// to their providers. -type graphNodeProxyProvider struct { - addr addrs.AbsProviderConfig - target GraphNodeProvider -} - -var ( - _ GraphNodeModulePath = (*graphNodeProxyProvider)(nil) - _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) -) - -func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.addr -} - -func (n *graphNodeProxyProvider) ModulePath() addrs.Module { - return n.addr.Module -} - -func (n *graphNodeProxyProvider) Name() string { - return n.addr.String() + " (proxy)" -} - -// find the concrete provider instance -func (n *graphNodeProxyProvider) Target() GraphNodeProvider { - switch t := n.target.(type) { - case *graphNodeProxyProvider: - return t.Target() - default: - return n.target - } -} - -// ProviderConfigTransformer adds all provider nodes from the configuration and -// attaches the configs. -type ProviderConfigTransformer struct { - Concrete ConcreteProviderNodeFunc - - // each provider node is stored here so that the proxy nodes can look up - // their targets by name. - providers map[string]GraphNodeProvider - // record providers that can be overriden with a proxy - proxiable map[string]bool - - // Config is the root node of the configuration tree to add providers from. - Config *configs.Config -} - -func (t *ProviderConfigTransformer) Transform(g *Graph) error { - // If no configuration is given, we don't do anything - if t.Config == nil { - return nil - } - - t.providers = make(map[string]GraphNodeProvider) - t.proxiable = make(map[string]bool) - - // Start the transformation process - if err := t.transform(g, t.Config); err != nil { - return err - } - - // finally attach the configs to the new nodes - return t.attachProviderConfigs(g) -} - -func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { - // If no config, do nothing - if c == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, c); err != nil { - return err - } - - // Transform all the children. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - return nil -} - -func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { - // Get the module associated with this configuration tree node - mod := c.Module - path := c.Path - - // If this is the root module, we can add nodes for required providers that - // have no configuration, equivalent to having an empty configuration - // block. This will ensure that a provider node exists for modules to - // access when passing around configuration and inheritance. - if path.IsRoot() && c.Module.ProviderRequirements != nil { - for name, p := range c.Module.ProviderRequirements.RequiredProviders { - if _, configured := mod.ProviderConfigs[name]; configured { - continue - } - - addr := addrs.AbsProviderConfig{ - Provider: p.Type, - Module: path, - } - - if _, ok := t.providers[addr.String()]; ok { - // The config validation warns about this too, but we can't - // completely prevent it in v1. - log.Printf("[WARN] ProviderConfigTransformer: duplicate required_providers entry for %s", addr) - continue - } - - abstract := &NodeAbstractProvider{ - Addr: addr, - } - - var v dag.Vertex - if t.Concrete != nil { - v = t.Concrete(abstract) - } else { - v = abstract - } - - g.Add(v) - t.providers[addr.String()] = v.(GraphNodeProvider) - } - } - - // add all providers from the configuration - for _, p := range mod.ProviderConfigs { - fqn := mod.ProviderForLocalConfig(p.Addr()) - addr := addrs.AbsProviderConfig{ - Provider: fqn, - Alias: p.Alias, - Module: path, - } - - if _, ok := t.providers[addr.String()]; ok { - // The abstract provider node may already have been added from the - // provider requirements. - log.Printf("[WARN] ProviderConfigTransformer: provider node %s already added", addr) - continue - } - - abstract := &NodeAbstractProvider{ - Addr: addr, - } - var v dag.Vertex - if t.Concrete != nil { - v = t.Concrete(abstract) - } else { - v = abstract - } - - // Add it to the graph - g.Add(v) - key := addr.String() - t.providers[key] = v.(GraphNodeProvider) - - // While deprecated, we still accept empty configuration blocks within - // modules as being a possible proxy for passed configuration. - if !path.IsRoot() { - // A provider configuration is "proxyable" if its configuration is - // entirely empty. This means it's standing in for a provider - // configuration that must be passed in from the parent module. - // We decide this by evaluating the config with an empty schema; - // if this succeeds, then we know there's nothing in the body. - _, diags := p.Config.Content(&hcl.BodySchema{}) - t.proxiable[key] = !diags.HasErrors() - } - } - - // Now replace the provider nodes with proxy nodes if a provider was being - // passed in, and create implicit proxies if there was no config. Any extra - // proxies will be removed in the prune step. - return t.addProxyProviders(g, c) -} - -func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { - path := c.Path - - // can't add proxies at the root - if path.IsRoot() { - return nil - } - - parentPath, callAddr := path.Call() - parent := c.Parent - if parent == nil { - return nil - } - - callName := callAddr.Name - var parentCfg *configs.ModuleCall - for name, mod := range parent.Module.ModuleCalls { - if name == callName { - parentCfg = mod - break - } - } - - if parentCfg == nil { - // this can't really happen during normal execution. - return fmt.Errorf("parent module config not found for %s", c.Path.String()) - } - - // Go through all the providers the parent is passing in, and add proxies to - // the parent provider nodes. - for _, pair := range parentCfg.Providers { - fqn := c.Module.ProviderForLocalConfig(pair.InChild.Addr()) - fullAddr := addrs.AbsProviderConfig{ - Provider: fqn, - Module: path, - Alias: pair.InChild.Addr().Alias, - } - - fullParentAddr := addrs.AbsProviderConfig{ - Provider: fqn, - Module: parentPath, - Alias: pair.InParent.Addr().Alias, - } - - fullName := fullAddr.String() - fullParentName := fullParentAddr.String() - - parentProvider := t.providers[fullParentName] - - if parentProvider == nil { - return fmt.Errorf("missing provider %s", fullParentName) - } - - proxy := &graphNodeProxyProvider{ - addr: fullAddr, - target: parentProvider, - } - - concreteProvider := t.providers[fullName] - - // replace the concrete node with the provider passed in only if it is - // proxyable - if concreteProvider != nil { - if t.proxiable[fullName] { - g.Replace(concreteProvider, proxy) - t.providers[fullName] = proxy - } - continue - } - - // There was no concrete provider, so add this as an implicit provider. - // The extra proxy will be pruned later if it's unused. - g.Add(proxy) - t.providers[fullName] = proxy - } - - return nil -} - -func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - addr := apn.ProviderAddr() - - // Get the configuration. - mc := t.Config.Descendent(addr.Module) - if mc == nil { - log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) - continue - } - - // Find the localName for the provider fqn - localName := mc.Module.LocalNameForProvider(addr.Provider) - - // Go through the provider configs to find the matching config - for _, p := range mc.Module.ProviderConfigs { - if p.Name == localName && p.Alias == addr.Alias { - log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) - apn.AttachProvider(p) - break - } - } - } - - return nil -} diff --git a/internal/terraform/transform_provider_test.go b/internal/terraform/transform_provider_test.go deleted file mode 100644 index ff21685710d1..000000000000 --- a/internal/terraform/transform_provider_test.go +++ /dev/null @@ -1,491 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" -) - -func testProviderTransformerGraph(t *testing.T, cfg *configs.Config) *Graph { - t.Helper() - - g := &Graph{Path: addrs.RootModuleInstance} - ct := &ConfigTransformer{Config: cfg} - if err := ct.Transform(g); err != nil { - t.Fatal(err) - } - arct := &AttachResourceConfigTransformer{Config: cfg} - if err := arct.Transform(g); err != nil { - t.Fatal(err) - } - - return g -} - -func TestProviderTransformer(t *testing.T) { - mod := testModule(t, "transform-provider-basic") - - g := testProviderTransformerGraph(t, mod) - { - transform := &MissingProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - transform := &ProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformProviderBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -// Test providers with FQNs that do not match the typeName -func TestProviderTransformer_fqns(t *testing.T) { - for _, mod := range []string{"fqns", "fqns-module"} { - mod := testModule(t, fmt.Sprintf("transform-provider-%s", mod)) - - g := testProviderTransformerGraph(t, mod) - { - transform := &MissingProviderTransformer{Config: mod} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - transform := &ProviderTransformer{Config: mod} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformProviderBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } - } -} - -func TestCloseProviderTransformer(t *testing.T) { - mod := testModule(t, "transform-provider-basic") - g := testProviderTransformerGraph(t, mod) - - { - transform := &MissingProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &ProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &CloseProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformCloseProviderBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestCloseProviderTransformer_withTargets(t *testing.T) { - mod := testModule(t, "transform-provider-basic") - - g := testProviderTransformerGraph(t, mod) - transforms := []GraphTransformer{ - &MissingProviderTransformer{}, - &ProviderTransformer{}, - &CloseProviderTransformer{}, - &TargetsTransformer{ - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "something", "else", - ), - }, - }, - } - - for _, tr := range transforms { - if err := tr.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(``) - if actual != expected { - t.Fatalf("expected:%s\n\ngot:\n\n%s", expected, actual) - } -} - -func TestMissingProviderTransformer(t *testing.T) { - mod := testModule(t, "transform-provider-missing") - - g := testProviderTransformerGraph(t, mod) - { - transform := &MissingProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &ProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &CloseProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformMissingProviderBasicStr) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestMissingProviderTransformer_grandchildMissing(t *testing.T) { - mod := testModule(t, "transform-provider-missing-grandchild") - - concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } - - g := testProviderTransformerGraph(t, mod) - { - transform := transformProviders(concrete, mod) - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - { - transform := &TransitiveReductionTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformMissingGrandchildProviderStr) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestPruneProviderTransformer(t *testing.T) { - mod := testModule(t, "transform-provider-prune") - - g := testProviderTransformerGraph(t, mod) - { - transform := &MissingProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &ProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &CloseProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &PruneProviderTransformer{} - if err := transform.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformPruneProviderBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -// the child module resource is attached to the configured parent provider -func TestProviderConfigTransformer_parentProviders(t *testing.T) { - mod := testModule(t, "transform-provider-inherit") - concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } - - g := testProviderTransformerGraph(t, mod) - { - tf := transformProviders(concrete, mod) - if err := tf.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformModuleProviderConfigStr) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -// the child module resource is attached to the configured grand-parent provider -func TestProviderConfigTransformer_grandparentProviders(t *testing.T) { - mod := testModule(t, "transform-provider-grandchild-inherit") - concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } - - g := testProviderTransformerGraph(t, mod) - { - tf := transformProviders(concrete, mod) - if err := tf.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformModuleProviderGrandparentStr) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestProviderConfigTransformer_inheritOldSkool(t *testing.T) { - mod := testModuleInline(t, map[string]string{ - "main.tf": ` -provider "test" { - test_string = "config" -} - -module "moda" { - source = "./moda" -} -`, - - "moda/main.tf": ` -resource "test_object" "a" { -} -`, - }) - concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } - - g := testProviderTransformerGraph(t, mod) - { - tf := transformProviders(concrete, mod) - if err := tf.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - expected := `module.moda.test_object.a - provider["registry.terraform.io/hashicorp/test"] -provider["registry.terraform.io/hashicorp/test"]` - - actual := strings.TrimSpace(g.String()) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -// Verify that configurations which are not recommended yet supported still work -func TestProviderConfigTransformer_nestedModuleProviders(t *testing.T) { - mod := testModuleInline(t, map[string]string{ - "main.tf": ` -terraform { - required_providers { - test = { - source = "registry.terraform.io/hashicorp/test" - } - } -} - -provider "test" { - alias = "z" - test_string = "config" -} - -module "moda" { - source = "./moda" - providers = { - test.x = test.z - } -} -`, - - "moda/main.tf": ` -terraform { - required_providers { - test = { - source = "registry.terraform.io/hashicorp/test" - configuration_aliases = [ test.x ] - } - } -} - -provider "test" { - test_string = "config" -} - -// this should connect to this module's provider -resource "test_object" "a" { -} - -resource "test_object" "x" { - provider = test.x -} - -module "modb" { - source = "./modb" -} -`, - - "moda/modb/main.tf": ` -# this should end up with the provider from the parent module -resource "test_object" "a" { -} -`, - }) - concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } - - g := testProviderTransformerGraph(t, mod) - { - tf := transformProviders(concrete, mod) - if err := tf.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - } - - expected := `module.moda.module.modb.test_object.a - module.moda.provider["registry.terraform.io/hashicorp/test"] -module.moda.provider["registry.terraform.io/hashicorp/test"] -module.moda.test_object.a - module.moda.provider["registry.terraform.io/hashicorp/test"] -module.moda.test_object.x - provider["registry.terraform.io/hashicorp/test"].z -provider["registry.terraform.io/hashicorp/test"].z` - - actual := strings.TrimSpace(g.String()) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -func TestProviderConfigTransformer_duplicateLocalName(t *testing.T) { - mod := testModuleInline(t, map[string]string{ - "main.tf": ` -terraform { - required_providers { - # We have to allow this since it wasn't previously prevented. If the - # default config is equivalent to the provider config, the user may never - # see an error. - dupe = { - source = "registry.terraform.io/hashicorp/test" - } - } -} - -provider "test" { -} -`}) - concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } - - g := testProviderTransformerGraph(t, mod) - tf := ProviderConfigTransformer{ - Config: mod, - Concrete: concrete, - } - if err := tf.Transform(g); err != nil { - t.Fatalf("err: %s", err) - } - - expected := `provider["registry.terraform.io/hashicorp/test"]` - - actual := strings.TrimSpace(g.String()) - if actual != expected { - t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -const testTransformProviderBasicStr = ` -aws_instance.web - provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/aws"] -` - -const testTransformCloseProviderBasicStr = ` -aws_instance.web - provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/aws"] (close) - aws_instance.web - provider["registry.terraform.io/hashicorp/aws"] -` - -const testTransformMissingProviderBasicStr = ` -aws_instance.web - provider["registry.terraform.io/hashicorp/aws"] -foo_instance.web - provider["registry.terraform.io/hashicorp/foo"] -provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/aws"] (close) - aws_instance.web - provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/foo"] -provider["registry.terraform.io/hashicorp/foo"] (close) - foo_instance.web - provider["registry.terraform.io/hashicorp/foo"] -` - -const testTransformMissingGrandchildProviderStr = ` -module.sub.module.subsub.bar_instance.two - provider["registry.terraform.io/hashicorp/bar"] -module.sub.module.subsub.foo_instance.one - module.sub.provider["registry.terraform.io/hashicorp/foo"] -module.sub.provider["registry.terraform.io/hashicorp/foo"] -provider["registry.terraform.io/hashicorp/bar"] -` - -const testTransformPruneProviderBasicStr = ` -foo_instance.web - provider["registry.terraform.io/hashicorp/foo"] -provider["registry.terraform.io/hashicorp/foo"] -provider["registry.terraform.io/hashicorp/foo"] (close) - foo_instance.web - provider["registry.terraform.io/hashicorp/foo"] -` - -const testTransformModuleProviderConfigStr = ` -module.child.aws_instance.thing - provider["registry.terraform.io/hashicorp/aws"].foo -provider["registry.terraform.io/hashicorp/aws"].foo -` - -const testTransformModuleProviderGrandparentStr = ` -module.child.module.grandchild.aws_instance.baz - provider["registry.terraform.io/hashicorp/aws"].foo -provider["registry.terraform.io/hashicorp/aws"].foo -` diff --git a/internal/terraform/transform_provisioner.go b/internal/terraform/transform_provisioner.go deleted file mode 100644 index 38e3a8ed714e..000000000000 --- a/internal/terraform/transform_provisioner.go +++ /dev/null @@ -1,8 +0,0 @@ -package terraform - -// GraphNodeProvisionerConsumer is an interface that nodes that require -// a provisioner must implement. ProvisionedBy must return the names of the -// provisioners to use. -type GraphNodeProvisionerConsumer interface { - ProvisionedBy() []string -} diff --git a/internal/terraform/transform_reference.go b/internal/terraform/transform_reference.go deleted file mode 100644 index fb8feb2f2a7f..000000000000 --- a/internal/terraform/transform_reference.go +++ /dev/null @@ -1,557 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/lang" -) - -// GraphNodeReferenceable must be implemented by any node that represents -// a Terraform thing that can be referenced (resource, module, etc.). -// -// Even if the thing has no name, this should return an empty list. By -// implementing this and returning a non-nil result, you say that this CAN -// be referenced and other methods of referencing may still be possible (such -// as by path!) -type GraphNodeReferenceable interface { - GraphNodeModulePath - - // ReferenceableAddrs returns a list of addresses through which this can be - // referenced. - ReferenceableAddrs() []addrs.Referenceable -} - -// GraphNodeReferencer must be implemented by nodes that reference other -// Terraform items and therefore depend on them. -type GraphNodeReferencer interface { - GraphNodeModulePath - - // References returns a list of references made by this node, which - // include both a referenced address and source location information for - // the reference. - References() []*addrs.Reference -} - -type GraphNodeAttachDependencies interface { - GraphNodeConfigResource - AttachDependencies([]addrs.ConfigResource) -} - -// graphNodeDependsOn is implemented by resources that need to expose any -// references set via DependsOn in their configuration. -type graphNodeDependsOn interface { - GraphNodeReferencer - DependsOn() []*addrs.Reference -} - -// graphNodeAttachDataResourceDependsOn records all resources that are transitively -// referenced through depends_on in the configuration. This is used by data -// resources to determine if they can be read during the plan, or if they need -// to be further delayed until apply. -// We can only use an addrs.ConfigResource address here, because modules are -// not yet expended in the graph. While this will cause some extra data -// resources to show in the plan when their depends_on references may be in -// unrelated module instances, the fact that it only happens when there are any -// resource updates pending means we can still avoid the problem of the -// "perpetual diff" -type graphNodeAttachDataResourceDependsOn interface { - GraphNodeConfigResource - graphNodeDependsOn - - // AttachDataResourceDependsOn stores the discovered dependencies in the - // resource node for evaluation later. - // - // The force parameter indicates that even if there are no dependencies, - // force the data source to act as though there are for refresh purposes. - // This is needed because yet-to-be-created resources won't be in the - // initial refresh graph, but may still be referenced through depends_on. - AttachDataResourceDependsOn(deps []addrs.ConfigResource, force bool) -} - -// GraphNodeReferenceOutside is an interface that can optionally be implemented. -// A node that implements it can specify that its own referenceable addresses -// and/or the addresses it references are in a different module than the -// node itself. -// -// Any referenceable addresses returned by ReferenceableAddrs are interpreted -// relative to the returned selfPath. -// -// Any references returned by References are interpreted relative to the -// returned referencePath. -// -// It is valid but not required for either of these paths to match what is -// returned by method Path, though if both match the main Path then there -// is no reason to implement this method. -// -// The primary use-case for this is the nodes representing module input -// variables, since their expressions are resolved in terms of their calling -// module, but they are still referenced from their own module. -type GraphNodeReferenceOutside interface { - // ReferenceOutside returns a path in which any references from this node - // are resolved. - ReferenceOutside() (selfPath, referencePath addrs.Module) -} - -// ReferenceTransformer is a GraphTransformer that connects all the -// nodes that reference each other in order to form the proper ordering. -type ReferenceTransformer struct{} - -func (t *ReferenceTransformer) Transform(g *Graph) error { - // Build a reference map so we can efficiently look up the references - vs := g.Vertices() - m := NewReferenceMap(vs) - - // Find the things that reference things and connect them - for _, v := range vs { - if _, ok := v.(GraphNodeDestroyer); ok { - // destroy nodes references are not connected, since they can only - // use their own state. - continue - } - - parents := m.References(v) - parentsDbg := make([]string, len(parents)) - for i, v := range parents { - parentsDbg[i] = dag.VertexName(v) - } - log.Printf( - "[DEBUG] ReferenceTransformer: %q references: %v", - dag.VertexName(v), parentsDbg) - - for _, parent := range parents { - // A destroy plan relies solely on the state, so we only need to - // ensure that temporary values are connected to get the evaluation - // order correct. Any references to destroy nodes will cause - // cycles, because they are connected in reverse order. - if _, ok := parent.(GraphNodeDestroyer); ok { - continue - } - - if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(v, parent) { - g.Connect(dag.BasicEdge(v, parent)) - } else { - log.Printf("[TRACE] ReferenceTransformer: skipping %s => %s inter-module-instance dependency", dag.VertexName(v), dag.VertexName(parent)) - } - } - - if len(parents) > 0 { - continue - } - } - - return nil -} - -type depMap map[string]addrs.ConfigResource - -// add stores the vertex if it represents a resource in the -// graph. -func (m depMap) add(v dag.Vertex) { - // we're only concerned with resources which may have changes that - // need to be applied. - switch v := v.(type) { - case GraphNodeResourceInstance: - instAddr := v.ResourceInstanceAddr() - addr := instAddr.ContainingResource().Config() - m[addr.String()] = addr - case GraphNodeConfigResource: - addr := v.ResourceAddr() - m[addr.String()] = addr - } -} - -// attachDataResourceDependsOnTransformer records all resources transitively -// referenced through a configuration depends_on. -type attachDataResourceDependsOnTransformer struct { -} - -func (t attachDataResourceDependsOnTransformer) Transform(g *Graph) error { - // First we need to make a map of referenceable addresses to their vertices. - // This is very similar to what's done in ReferenceTransformer, but we keep - // implementation separate as they may need to change independently. - vertices := g.Vertices() - refMap := NewReferenceMap(vertices) - - for _, v := range vertices { - depender, ok := v.(graphNodeAttachDataResourceDependsOn) - if !ok { - continue - } - - // Only data need to attach depends_on, so they can determine if they - // are eligible to be read during plan. - if depender.ResourceAddr().Resource.Mode != addrs.DataResourceMode { - continue - } - - // depMap will only add resource references then dedupe - deps := make(depMap) - dependsOnDeps, fromModule := refMap.dependsOn(g, depender) - for _, dep := range dependsOnDeps { - // any the dependency - deps.add(dep) - } - - res := make([]addrs.ConfigResource, 0, len(deps)) - for _, d := range deps { - res = append(res, d) - } - - log.Printf("[TRACE] attachDataDependenciesTransformer: %s depends on %s", depender.ResourceAddr(), res) - depender.AttachDataResourceDependsOn(res, fromModule) - } - - return nil -} - -// AttachDependenciesTransformer records all resource dependencies for each -// instance, and attaches the addresses to the node itself. Managed resource -// will record these in the state for proper ordering of destroy operations. -type AttachDependenciesTransformer struct { -} - -func (t AttachDependenciesTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - attacher, ok := v.(GraphNodeAttachDependencies) - if !ok { - continue - } - selfAddr := attacher.ResourceAddr() - - ans, err := g.Ancestors(v) - if err != nil { - return err - } - - // dedupe addrs when there's multiple instances involved, or - // multiple paths in the un-reduced graph - depMap := map[string]addrs.ConfigResource{} - for _, d := range ans { - var addr addrs.ConfigResource - - switch d := d.(type) { - case GraphNodeResourceInstance: - instAddr := d.ResourceInstanceAddr() - addr = instAddr.ContainingResource().Config() - case GraphNodeConfigResource: - addr = d.ResourceAddr() - default: - continue - } - - if addr.Equal(selfAddr) { - continue - } - depMap[addr.String()] = addr - } - - deps := make([]addrs.ConfigResource, 0, len(depMap)) - for _, d := range depMap { - deps = append(deps, d) - } - sort.Slice(deps, func(i, j int) bool { - return deps[i].String() < deps[j].String() - }) - - log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps) - attacher.AttachDependencies(deps) - } - - return nil -} - -func isDependableResource(v dag.Vertex) bool { - switch v.(type) { - case GraphNodeResourceInstance: - return true - case GraphNodeConfigResource: - return true - } - return false -} - -// ReferenceMap is a structure that can be used to efficiently check -// for references on a graph, mapping internal reference keys (as produced by -// the mapKey method) to one or more vertices that are identified by each key. -type ReferenceMap map[string][]dag.Vertex - -// References returns the set of vertices that the given vertex refers to, -// and any referenced addresses that do not have corresponding vertices. -func (m ReferenceMap) References(v dag.Vertex) []dag.Vertex { - rn, ok := v.(GraphNodeReferencer) - if !ok { - return nil - } - - var matches []dag.Vertex - - for _, ref := range rn.References() { - subject := ref.Subject - - key := m.referenceMapKey(v, subject) - if _, exists := m[key]; !exists { - // If what we were looking for was a ResourceInstance then we - // might be in a resource-oriented graph rather than an - // instance-oriented graph, and so we'll see if we have the - // resource itself instead. - switch ri := subject.(type) { - case addrs.ResourceInstance: - subject = ri.ContainingResource() - case addrs.ResourceInstancePhase: - subject = ri.ContainingResource() - case addrs.ModuleCallInstanceOutput: - subject = ri.ModuleCallOutput() - case addrs.ModuleCallInstance: - subject = ri.Call - default: - log.Printf("[INFO] ReferenceTransformer: reference not found: %q", subject) - continue - } - key = m.referenceMapKey(v, subject) - } - vertices := m[key] - for _, rv := range vertices { - // don't include self-references - if rv == v { - continue - } - matches = append(matches, rv) - } - } - - return matches -} - -// dependsOn returns the set of vertices that the given vertex refers to from -// the configured depends_on. The bool return value indicates if depends_on was -// found in a parent module configuration. -func (m ReferenceMap) dependsOn(g *Graph, depender graphNodeDependsOn) ([]dag.Vertex, bool) { - var res []dag.Vertex - fromModule := false - - refs := depender.DependsOn() - - // get any implied dependencies for data sources - refs = append(refs, m.dataDependsOn(depender)...) - - // This is where we record that a module has depends_on configured. - if _, ok := depender.(*nodeExpandModule); ok && len(refs) > 0 { - fromModule = true - } - - for _, ref := range refs { - subject := ref.Subject - - key := m.referenceMapKey(depender, subject) - vertices, ok := m[key] - if !ok { - // the ReferenceMap generates all possible keys, so any warning - // here is probably not useful for this implementation. - continue - } - for _, rv := range vertices { - // don't include self-references - if rv == depender { - continue - } - res = append(res, rv) - - // Check any ancestors for transitive dependencies when we're - // not pointed directly at a resource. We can't be much more - // precise here, since in order to maintain our guarantee that data - // sources will wait for explicit dependencies, if those dependencies - // happen to be a module, output, or variable, we have to find some - // upstream managed resource in order to check for a planned - // change. - if _, ok := rv.(GraphNodeConfigResource); !ok { - ans, _ := g.Ancestors(rv) - for _, v := range ans { - if isDependableResource(v) { - res = append(res, v) - } - } - } - } - } - - parentDeps, fromParentModule := m.parentModuleDependsOn(g, depender) - res = append(res, parentDeps...) - - return res, fromModule || fromParentModule -} - -// Return extra depends_on references if this is a data source. -// For data sources we implicitly treat references to managed resources as -// depends_on entries. If a data source references a managed resource, even if -// that reference is resolvable, it stands to reason that the user intends for -// the data source to require that resource in some way. -func (m ReferenceMap) dataDependsOn(depender graphNodeDependsOn) []*addrs.Reference { - var refs []*addrs.Reference - if n, ok := depender.(GraphNodeConfigResource); ok && - n.ResourceAddr().Resource.Mode == addrs.DataResourceMode { - for _, r := range depender.References() { - - var resAddr addrs.Resource - switch s := r.Subject.(type) { - case addrs.Resource: - resAddr = s - case addrs.ResourceInstance: - resAddr = s.Resource - r.Subject = resAddr - } - - if resAddr.Mode != addrs.ManagedResourceMode { - // We only want to wait on directly referenced managed resources. - // Data sources have no external side effects, so normal - // references to them in the config will suffice for proper - // ordering. - continue - } - - refs = append(refs, r) - } - } - return refs -} - -// parentModuleDependsOn returns the set of vertices that a data sources parent -// module references through the module call's depends_on. The bool return -// value indicates if depends_on was found in a parent module configuration. -func (m ReferenceMap) parentModuleDependsOn(g *Graph, depender graphNodeDependsOn) ([]dag.Vertex, bool) { - var res []dag.Vertex - fromModule := false - - // Look for containing modules with DependsOn. - // This should be connected directly to the module node, so we only need to - // look one step away. - for _, v := range g.DownEdges(depender) { - // we're only concerned with module expansion nodes here. - mod, ok := v.(*nodeExpandModule) - if !ok { - continue - } - - deps, fromParentModule := m.dependsOn(g, mod) - for _, dep := range deps { - // add the dependency - res = append(res, dep) - - // and check any transitive resource dependencies for more resources - ans, _ := g.Ancestors(dep) - for _, v := range ans { - if isDependableResource(v) { - res = append(res, v) - } - } - } - fromModule = fromModule || fromParentModule - } - - return res, fromModule -} - -func (m *ReferenceMap) mapKey(path addrs.Module, addr addrs.Referenceable) string { - return fmt.Sprintf("%s|%s", path.String(), addr.String()) -} - -// vertexReferenceablePath returns the path in which the given vertex can be -// referenced. This is the path that its results from ReferenceableAddrs -// are considered to be relative to. -// -// Only GraphNodeModulePath implementations can be referenced, so this method will -// panic if the given vertex does not implement that interface. -func vertexReferenceablePath(v dag.Vertex) addrs.Module { - sp, ok := v.(GraphNodeModulePath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeModulePath", sp)) - } - - if outside, ok := v.(GraphNodeReferenceOutside); ok { - // Vertex is referenced from a different module than where it was - // declared. - path, _ := outside.ReferenceOutside() - return path - } - - // Vertex is referenced from the same module as where it was declared. - return sp.ModulePath() -} - -// vertexReferencePath returns the path in which references _from_ the given -// vertex must be interpreted. -// -// Only GraphNodeModulePath implementations can have references, so this method -// will panic if the given vertex does not implement that interface. -func vertexReferencePath(v dag.Vertex) addrs.Module { - sp, ok := v.(GraphNodeModulePath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeModulePath", v)) - } - - if outside, ok := v.(GraphNodeReferenceOutside); ok { - // Vertex makes references to objects in a different module than where - // it was declared. - _, path := outside.ReferenceOutside() - return path - } - - // Vertex makes references to objects in the same module as where it - // was declared. - return sp.ModulePath() -} - -// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex -// that the reference is from, and "addr" is the address of the object being -// referenced. -// -// The result is an opaque string that includes both the address of the given -// object and the address of the module instance that object belongs to. -// -// Only GraphNodeModulePath implementations can be referrers, so this method will -// panic if the given vertex does not implement that interface. -func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { - path := vertexReferencePath(referrer) - return m.mapKey(path, addr) -} - -// NewReferenceMap is used to create a new reference map for the -// given set of vertices. -func NewReferenceMap(vs []dag.Vertex) ReferenceMap { - // Build the lookup table - m := make(ReferenceMap) - for _, v := range vs { - // We're only looking for referenceable nodes - rn, ok := v.(GraphNodeReferenceable) - if !ok { - continue - } - - path := vertexReferenceablePath(v) - - // Go through and cache them - for _, addr := range rn.ReferenceableAddrs() { - key := m.mapKey(path, addr) - m[key] = append(m[key], v) - } - } - - return m -} - -// ReferencesFromConfig returns the references that a configuration has -// based on the interpolated variables in a configuration. -func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { - if body == nil { - return nil - } - refs, _ := lang.ReferencesInBlock(body, schema) - return refs -} diff --git a/internal/terraform/transform_reference_test.go b/internal/terraform/transform_reference_test.go deleted file mode 100644 index 50e47e19b860..000000000000 --- a/internal/terraform/transform_reference_test.go +++ /dev/null @@ -1,319 +0,0 @@ -package terraform - -import ( - "reflect" - "sort" - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" -) - -func TestReferenceTransformer_simple(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(&graphNodeRefParentTest{ - NameValue: "A", - Names: []string{"A"}, - }) - g.Add(&graphNodeRefChildTest{ - NameValue: "B", - Refs: []string{"A"}, - }) - - tf := &ReferenceTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformRefBasicStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestReferenceTransformer_self(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(&graphNodeRefParentTest{ - NameValue: "A", - Names: []string{"A"}, - }) - g.Add(&graphNodeRefChildTest{ - NameValue: "B", - Refs: []string{"A", "B"}, - }) - - tf := &ReferenceTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformRefBasicStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestReferenceTransformer_path(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add(&graphNodeRefParentTest{ - NameValue: "A", - Names: []string{"A"}, - }) - g.Add(&graphNodeRefChildTest{ - NameValue: "B", - Refs: []string{"A"}, - }) - g.Add(&graphNodeRefParentTest{ - NameValue: "child.A", - PathValue: addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "child"}}, - Names: []string{"A"}, - }) - g.Add(&graphNodeRefChildTest{ - NameValue: "child.B", - PathValue: addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "child"}}, - Refs: []string{"A"}, - }) - - tf := &ReferenceTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformRefPathStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestReferenceTransformer_resourceInstances(t *testing.T) { - // Our reference analyses are all done based on unexpanded addresses - // so that we can use this transformer both in the plan graph (where things - // are not expanded yet) and the apply graph (where resource instances are - // pre-expanded but nothing else is.) - // However, that would make the result too conservative about instances - // of the same resource in different instances of the same module, so we - // make an exception for that situation in particular, keeping references - // between resource instances segregated by their containing module - // instance. - g := Graph{Path: addrs.RootModuleInstance} - moduleInsts := []addrs.ModuleInstance{ - { - { - Name: "foo", InstanceKey: addrs.IntKey(0), - }, - }, - { - { - Name: "foo", InstanceKey: addrs.IntKey(1), - }, - }, - } - resourceAs := make([]addrs.AbsResourceInstance, len(moduleInsts)) - for i, moduleInst := range moduleInsts { - resourceAs[i] = addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "thing", - Name: "a", - }.Instance(addrs.NoKey).Absolute(moduleInst) - } - resourceBs := make([]addrs.AbsResourceInstance, len(moduleInsts)) - for i, moduleInst := range moduleInsts { - resourceBs[i] = addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "thing", - Name: "b", - }.Instance(addrs.NoKey).Absolute(moduleInst) - } - g.Add(&graphNodeFakeResourceInstance{ - Addr: resourceAs[0], - }) - g.Add(&graphNodeFakeResourceInstance{ - Addr: resourceBs[0], - Refs: []*addrs.Reference{ - { - Subject: resourceAs[0].Resource, - }, - }, - }) - g.Add(&graphNodeFakeResourceInstance{ - Addr: resourceAs[1], - }) - g.Add(&graphNodeFakeResourceInstance{ - Addr: resourceBs[1], - Refs: []*addrs.Reference{ - { - Subject: resourceAs[1].Resource, - }, - }, - }) - - tf := &ReferenceTransformer{} - if err := tf.Transform(&g); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Resource B should be connected to resource A in each module instance, - // but there should be no connections between the two module instances. - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(` -module.foo[0].thing.a -module.foo[0].thing.b - module.foo[0].thing.a -module.foo[1].thing.a -module.foo[1].thing.b - module.foo[1].thing.a -`) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestReferenceMapReferences(t *testing.T) { - cases := map[string]struct { - Nodes []dag.Vertex - Check dag.Vertex - Result []string - }{ - "simple": { - Nodes: []dag.Vertex{ - &graphNodeRefParentTest{ - NameValue: "A", - Names: []string{"A"}, - }, - }, - Check: &graphNodeRefChildTest{ - NameValue: "foo", - Refs: []string{"A"}, - }, - Result: []string{"A"}, - }, - } - - for tn, tc := range cases { - t.Run(tn, func(t *testing.T) { - rm := NewReferenceMap(tc.Nodes) - result := rm.References(tc.Check) - - var resultStr []string - for _, v := range result { - resultStr = append(resultStr, dag.VertexName(v)) - } - - sort.Strings(resultStr) - sort.Strings(tc.Result) - if !reflect.DeepEqual(resultStr, tc.Result) { - t.Fatalf("bad: %#v", resultStr) - } - }) - } -} - -type graphNodeRefParentTest struct { - NameValue string - PathValue addrs.ModuleInstance - Names []string -} - -var _ GraphNodeReferenceable = (*graphNodeRefParentTest)(nil) - -func (n *graphNodeRefParentTest) Name() string { - return n.NameValue -} - -func (n *graphNodeRefParentTest) ReferenceableAddrs() []addrs.Referenceable { - ret := make([]addrs.Referenceable, len(n.Names)) - for i, name := range n.Names { - ret[i] = addrs.LocalValue{Name: name} - } - return ret -} - -func (n *graphNodeRefParentTest) Path() addrs.ModuleInstance { - return n.PathValue -} - -func (n *graphNodeRefParentTest) ModulePath() addrs.Module { - return n.PathValue.Module() -} - -type graphNodeRefChildTest struct { - NameValue string - PathValue addrs.ModuleInstance - Refs []string -} - -var _ GraphNodeReferencer = (*graphNodeRefChildTest)(nil) - -func (n *graphNodeRefChildTest) Name() string { - return n.NameValue -} - -func (n *graphNodeRefChildTest) References() []*addrs.Reference { - ret := make([]*addrs.Reference, len(n.Refs)) - for i, name := range n.Refs { - ret[i] = &addrs.Reference{ - Subject: addrs.LocalValue{Name: name}, - } - } - return ret -} - -func (n *graphNodeRefChildTest) Path() addrs.ModuleInstance { - return n.PathValue -} - -func (n *graphNodeRefChildTest) ModulePath() addrs.Module { - return n.PathValue.Module() -} - -type graphNodeFakeResourceInstance struct { - Addr addrs.AbsResourceInstance - Refs []*addrs.Reference -} - -var _ GraphNodeResourceInstance = (*graphNodeFakeResourceInstance)(nil) -var _ GraphNodeReferenceable = (*graphNodeFakeResourceInstance)(nil) -var _ GraphNodeReferencer = (*graphNodeFakeResourceInstance)(nil) - -func (n *graphNodeFakeResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { - return n.Addr -} - -func (n *graphNodeFakeResourceInstance) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -func (n *graphNodeFakeResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Resource} -} - -func (n *graphNodeFakeResourceInstance) References() []*addrs.Reference { - return n.Refs -} - -func (n *graphNodeFakeResourceInstance) StateDependencies() []addrs.ConfigResource { - return nil -} - -func (n *graphNodeFakeResourceInstance) String() string { - return n.Addr.String() -} - -const testTransformRefBasicStr = ` -A -B - A -` - -const testTransformRefPathStr = ` -A -B - A -child.A -child.B - child.A -` diff --git a/internal/terraform/transform_removed_modules.go b/internal/terraform/transform_removed_modules.go deleted file mode 100644 index 090582ce2081..000000000000 --- a/internal/terraform/transform_removed_modules.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states" -) - -// RemovedModuleTransformer implements GraphTransformer to add nodes indicating -// when a module was removed from the configuration. -type RemovedModuleTransformer struct { - Config *configs.Config // root node in the config tree - State *states.State -} - -func (t *RemovedModuleTransformer) Transform(g *Graph) error { - // nothing to remove if there's no state! - if t.State == nil { - return nil - } - - removed := map[string]addrs.Module{} - - for _, m := range t.State.Modules { - cc := t.Config.DescendentForInstance(m.Addr) - if cc != nil { - continue - } - removed[m.Addr.Module().String()] = m.Addr.Module() - log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) - } - - // add closers to collect any module instances we're removing - for _, modAddr := range removed { - closer := &nodeCloseModule{ - Addr: modAddr, - } - g.Add(closer) - } - - return nil -} diff --git a/internal/terraform/transform_resource_count.go b/internal/terraform/transform_resource_count.go deleted file mode 100644 index 4d853593eb71..000000000000 --- a/internal/terraform/transform_resource_count.go +++ /dev/null @@ -1,36 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/dag" -) - -// ResourceCountTransformer is a GraphTransformer that expands the count -// out for a specific resource. -// -// This assumes that the count is already interpolated. -type ResourceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - Schema *configschema.Block - - Addr addrs.ConfigResource - InstanceAddrs []addrs.AbsResourceInstance -} - -func (t *ResourceCountTransformer) Transform(g *Graph) error { - for _, addr := range t.InstanceAddrs { - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - log.Printf("[TRACE] ResourceCountTransformer: adding %s as %T", addr, node) - g.Add(node) - } - return nil -} diff --git a/internal/terraform/transform_root.go b/internal/terraform/transform_root.go deleted file mode 100644 index e06ef5b414cf..000000000000 --- a/internal/terraform/transform_root.go +++ /dev/null @@ -1,82 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/dag" -) - -const rootNodeName = "root" - -// RootTransformer is a GraphTransformer that adds a root to the graph. -type RootTransformer struct{} - -func (t *RootTransformer) Transform(g *Graph) error { - addRootNodeToGraph(g) - return nil -} - -// addRootNodeToGraph modifies the given graph in-place so that it has a root -// node if it didn't already have one and so that any other node which doesn't -// already depend on something will depend on that root node. -// -// After this function returns, the graph will have only one node that doesn't -// depend on any other nodes. -func addRootNodeToGraph(g *Graph) { - // We always add the root node. This is a singleton so if it's already - // in the graph this will do nothing and just retain the existing root node. - // - // Note that rootNode is intentionally added by value and not by pointer - // so that all root nodes will be equal to one another and therefore - // coalesce when two valid graphs get merged together into a single graph. - g.Add(rootNode) - - // Everything that doesn't already depend on at least one other node will - // depend on the root node, except the root node itself. - for _, v := range g.Vertices() { - if v == dag.Vertex(rootNode) { - continue - } - - if g.UpEdges(v).Len() == 0 { - g.Connect(dag.BasicEdge(rootNode, v)) - } - } -} - -type graphNodeRoot struct{} - -// rootNode is the singleton value representing all root graph nodes. -// -// The root node for all graphs should be this value directly, and in particular -// _not_ a pointer to this value. Using the value directly here means that -// multiple root nodes will always coalesce together when subsuming one graph -// into another. -var rootNode graphNodeRoot - -func (n graphNodeRoot) Name() string { - return rootNodeName -} - -// CloseRootModuleTransformer is a GraphTransformer that adds a root to the graph. -type CloseRootModuleTransformer struct{} - -func (t *CloseRootModuleTransformer) Transform(g *Graph) error { - // close the root module - closeRoot := &nodeCloseModule{} - g.Add(closeRoot) - - // since this is closing the root module, make it depend on everything in - // the root module. - for _, v := range g.Vertices() { - if v == closeRoot { - continue - } - - // since this is closing the root module, and must be last, we can - // connect to anything that doesn't have any up edges. - if g.UpEdges(v).Len() == 0 { - g.Connect(dag.BasicEdge(closeRoot, v)) - } - } - - return nil -} diff --git a/internal/terraform/transform_root_test.go b/internal/terraform/transform_root_test.go deleted file mode 100644 index 61f24a5f764a..000000000000 --- a/internal/terraform/transform_root_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestRootTransformer(t *testing.T) { - t.Run("many nodes", func(t *testing.T) { - mod := testModule(t, "transform-root-basic") - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &MissingProviderTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &ProviderTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &RootTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformRootBasicStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } - - root, err := g.Root() - if err != nil { - t.Fatalf("err: %s", err) - } - if _, ok := root.(graphNodeRoot); !ok { - t.Fatalf("bad: %#v", root) - } - }) - - t.Run("only one initial node", func(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - g.Add("foo") - addRootNodeToGraph(&g) - got := strings.TrimSpace(g.String()) - want := strings.TrimSpace(` -foo -root - foo -`) - if got != want { - t.Errorf("wrong final graph\ngot:\n%s\nwant:\n%s", got, want) - } - }) - - t.Run("graph initially empty", func(t *testing.T) { - g := Graph{Path: addrs.RootModuleInstance} - addRootNodeToGraph(&g) - got := strings.TrimSpace(g.String()) - want := `root` - if got != want { - t.Errorf("wrong final graph\ngot:\n%s\nwant:\n%s", got, want) - } - }) - -} - -const testTransformRootBasicStr = ` -aws_instance.foo - provider["registry.terraform.io/hashicorp/aws"] -do_droplet.bar - provider["registry.terraform.io/hashicorp/do"] -provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/do"] -root - aws_instance.foo - do_droplet.bar -` diff --git a/internal/terraform/transform_state.go b/internal/terraform/transform_state.go deleted file mode 100644 index 1ca060a88aad..000000000000 --- a/internal/terraform/transform_state.go +++ /dev/null @@ -1,72 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/states" -) - -// StateTransformer is a GraphTransformer that adds the elements of -// the state to the graph. -// -// This transform is used for example by the DestroyPlanGraphBuilder to ensure -// that only resources that are in the state are represented in the graph. -type StateTransformer struct { - // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract - // resource instance nodes that this transformer will create. - // - // If either of these is nil, the objects of that type will be skipped and - // not added to the graph at all. It doesn't make sense to use this - // transformer without setting at least one of these, since that would - // skip everything and thus be a no-op. - ConcreteCurrent ConcreteResourceInstanceNodeFunc - ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc - - State *states.State -} - -func (t *StateTransformer) Transform(g *Graph) error { - if t.State == nil { - log.Printf("[TRACE] StateTransformer: state is nil, so nothing to do") - return nil - } - - switch { - case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") - case t.ConcreteCurrent != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") - case t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") - default: - log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") - } - - for _, ms := range t.State.Modules { - for _, rs := range ms.Resources { - resourceAddr := rs.Addr - - for key, is := range rs.Instances { - addr := resourceAddr.Instance(key) - - if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteCurrent(abstract) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) - } - - if t.ConcreteDeposed != nil { - for dk := range is.Deposed { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteDeposed(abstract, dk) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) - } - } - } - } - } - - return nil -} diff --git a/internal/terraform/transform_targets.go b/internal/terraform/transform_targets.go deleted file mode 100644 index e603bcedb417..000000000000 --- a/internal/terraform/transform_targets.go +++ /dev/null @@ -1,159 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" -) - -// GraphNodeTargetable is an interface for graph nodes to implement when they -// need to be told about incoming targets. This is useful for nodes that need -// to respect targets as they dynamically expand. Note that the list of targets -// provided will contain every target provided, and each implementing graph -// node must filter this list to targets considered relevant. -type GraphNodeTargetable interface { - SetTargets([]addrs.Targetable) -} - -// TargetsTransformer is a GraphTransformer that, when the user specifies a -// list of resources to target, limits the graph to only those resources and -// their dependencies. -type TargetsTransformer struct { - // List of targeted resource names specified by the user - Targets []addrs.Targetable -} - -func (t *TargetsTransformer) Transform(g *Graph) error { - if len(t.Targets) > 0 { - targetedNodes, err := t.selectTargetedNodes(g, t.Targets) - if err != nil { - return err - } - - for _, v := range g.Vertices() { - if !targetedNodes.Include(v) { - log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) - g.Remove(v) - } - } - } - - return nil -} - -// Returns a set of targeted nodes. A targeted node is either addressed -// directly, address indirectly via its container, or it's a dependency of a -// targeted node. -func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (dag.Set, error) { - targetedNodes := make(dag.Set) - - vertices := g.Vertices() - - for _, v := range vertices { - if t.nodeIsTarget(v, addrs) { - targetedNodes.Add(v) - - // We inform nodes that ask about the list of targets - helps for nodes - // that need to dynamically expand. Note that this only occurs for nodes - // that are already directly targeted. - if tn, ok := v.(GraphNodeTargetable); ok { - tn.SetTargets(addrs) - } - - deps, _ := g.Ancestors(v) - for _, d := range deps { - targetedNodes.Add(d) - } - } - } - - // It is expected that outputs which are only derived from targeted - // resources are also updated. While we don't include any other possible - // side effects from the targeted nodes, these are added because outputs - // cannot be targeted on their own. - // Start by finding the root module output nodes themselves - for _, v := range vertices { - // outputs are all temporary value types - tv, ok := v.(graphNodeTemporaryValue) - if !ok { - continue - } - - // root module outputs indicate that while they are an output type, - // they not temporary and will return false here. - if tv.temporaryValue() { - continue - } - - // If this output is descended only from targeted resources, then we - // will keep it - deps, _ := g.Ancestors(v) - found := 0 - for _, d := range deps { - switch d.(type) { - case GraphNodeResourceInstance: - case GraphNodeConfigResource: - default: - continue - } - - if !targetedNodes.Include(d) { - // this dependency isn't being targeted, so we can't process this - // output - found = 0 - break - } - - found++ - } - - if found > 0 { - // we found an output we can keep; add it, and all it's dependencies - targetedNodes.Add(v) - for _, d := range deps { - targetedNodes.Add(d) - } - } - } - - return targetedNodes, nil -} - -func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { - var vertexAddr addrs.Targetable - switch r := v.(type) { - case GraphNodeResourceInstance: - vertexAddr = r.ResourceInstanceAddr() - case GraphNodeConfigResource: - vertexAddr = r.ResourceAddr() - - default: - // Only resource and resource instance nodes can be targeted. - return false - } - - for _, targetAddr := range targets { - switch vertexAddr.(type) { - case addrs.ConfigResource: - // Before expansion happens, we only have nodes that know their - // ConfigResource address. We need to take the more specific - // target addresses and generalize them in order to compare with a - // ConfigResource. - switch target := targetAddr.(type) { - case addrs.AbsResourceInstance: - targetAddr = target.ContainingResource().Config() - case addrs.AbsResource: - targetAddr = target.Config() - case addrs.ModuleInstance: - targetAddr = target.Module() - } - } - - if targetAddr.TargetContains(vertexAddr) { - return true - } - } - - return false -} diff --git a/internal/terraform/transform_targets_test.go b/internal/terraform/transform_targets_test.go deleted file mode 100644 index 0ed95c080978..000000000000 --- a/internal/terraform/transform_targets_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestTargetsTransformer(t *testing.T) { - mod := testModule(t, "transform-targets-basic") - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &ReferenceTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &TargetsTransformer{ - Targets: []addrs.Targetable{ - addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "me", - ), - }, - } - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(` -aws_instance.me - aws_subnet.me -aws_subnet.me - aws_vpc.me -aws_vpc.me - `) - if actual != expected { - t.Fatalf("bad:\n\nexpected:\n%s\n\ngot:\n%s\n", expected, actual) - } -} - -func TestTargetsTransformer_downstream(t *testing.T) { - mod := testModule(t, "transform-targets-downstream") - - g := Graph{Path: addrs.RootModuleInstance} - { - transform := &ConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - { - transform := &OutputTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - { - transform := &ReferenceTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &TargetsTransformer{ - Targets: []addrs.Targetable{ - addrs.RootModuleInstance. - Child("child", addrs.NoKey). - Child("grandchild", addrs.NoKey). - Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - }, - } - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - actual := strings.TrimSpace(g.String()) - // Even though we only asked to target the grandchild resource, all of the - // outputs that descend from it are also targeted. - expected := strings.TrimSpace(` -module.child.module.grandchild.aws_instance.foo -module.child.module.grandchild.output.id (expand) - module.child.module.grandchild.aws_instance.foo -module.child.output.grandchild_id (expand) - module.child.module.grandchild.output.id (expand) -output.grandchild_id (expand) - module.child.output.grandchild_id (expand) - `) - if actual != expected { - t.Fatalf("bad:\n\nexpected:\n%s\n\ngot:\n%s\n", expected, actual) - } -} - -// This tests the TargetsTransformer targeting a whole module, -// rather than a resource within a module instance. -func TestTargetsTransformer_wholeModule(t *testing.T) { - mod := testModule(t, "transform-targets-downstream") - - g := Graph{Path: addrs.RootModuleInstance} - { - transform := &ConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - { - transform := &OutputTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - { - transform := &ReferenceTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &TargetsTransformer{ - Targets: []addrs.Targetable{ - addrs.RootModule. - Child("child"). - Child("grandchild"), - }, - } - if err := transform.Transform(&g); err != nil { - t.Fatalf("%T failed: %s", transform, err) - } - } - - actual := strings.TrimSpace(g.String()) - // Even though we only asked to target the grandchild module, all of the - // outputs that descend from it are also targeted. - expected := strings.TrimSpace(` -module.child.module.grandchild.aws_instance.foo -module.child.module.grandchild.output.id (expand) - module.child.module.grandchild.aws_instance.foo -module.child.output.grandchild_id (expand) - module.child.module.grandchild.output.id (expand) -output.grandchild_id (expand) - module.child.output.grandchild_id (expand) - `) - if actual != expected { - t.Fatalf("bad:\n\nexpected:\n%s\n\ngot:\n%s\n", expected, actual) - } -} diff --git a/internal/terraform/transform_transitive_reduction.go b/internal/terraform/transform_transitive_reduction.go deleted file mode 100644 index 0bb6cb377336..000000000000 --- a/internal/terraform/transform_transitive_reduction.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// TransitiveReductionTransformer is a GraphTransformer that -// finds the transitive reduction of the graph. For a definition of -// transitive reduction, see [Wikipedia](https://en.wikipedia.org/wiki/Transitive_reduction). -type TransitiveReductionTransformer struct{} - -func (t *TransitiveReductionTransformer) Transform(g *Graph) error { - // If the graph isn't valid, skip the transitive reduction. - // We don't error here because Terraform itself handles graph - // validation in a better way, or we assume it does. - if err := g.Validate(); err != nil { - return nil - } - - // Do it - g.TransitiveReduction() - - return nil -} diff --git a/internal/terraform/transform_transitive_reduction_test.go b/internal/terraform/transform_transitive_reduction_test.go deleted file mode 100644 index 1339d071fec2..000000000000 --- a/internal/terraform/transform_transitive_reduction_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func TestTransitiveReductionTransformer(t *testing.T) { - mod := testModule(t, "transform-trans-reduce-basic") - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - t.Logf("graph after ConfigTransformer:\n%s", g.String()) - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &AttachSchemaTransformer{ - Plugins: schemaOnlyProvidersForTesting(map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("aws"): { - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "A": { - Type: cty.String, - Optional: true, - }, - "B": { - Type: cty.String, - Optional: true, - }, - }, - }, - }, - }, - }), - } - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &ReferenceTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - t.Logf("graph after ReferenceTransformer:\n%s", g.String()) - } - - { - transform := &TransitiveReductionTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - t.Logf("graph after TransitiveReductionTransformer:\n%s", g.String()) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformTransReduceBasicStr) - if actual != expected { - t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -const testTransformTransReduceBasicStr = ` -aws_instance.A -aws_instance.B - aws_instance.A -aws_instance.C - aws_instance.B -` diff --git a/internal/terraform/transform_variable.go b/internal/terraform/transform_variable.go deleted file mode 100644 index 4262ea3d6db0..000000000000 --- a/internal/terraform/transform_variable.go +++ /dev/null @@ -1,43 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" -) - -// RootVariableTransformer is a GraphTransformer that adds all the root -// variables to the graph. -// -// Root variables are currently no-ops but they must be added to the -// graph since downstream things that depend on them must be able to -// reach them. -type RootVariableTransformer struct { - Config *configs.Config - - RawValues InputValues -} - -func (t *RootVariableTransformer) Transform(g *Graph) error { - // We can have no variables if we have no config. - if t.Config == nil { - return nil - } - - // We're only considering root module variables here, since child - // module variables are handled by ModuleVariableTransformer. - vars := t.Config.Module.Variables - - // Add all variables here - for _, v := range vars { - node := &NodeRootVariable{ - Addr: addrs.InputVariable{ - Name: v.Name, - }, - Config: v, - RawValue: t.RawValues[v.Name], - } - g.Add(node) - } - - return nil -} diff --git a/internal/terraform/transform_vertex.go b/internal/terraform/transform_vertex.go deleted file mode 100644 index 6dd2f98dce31..000000000000 --- a/internal/terraform/transform_vertex.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/internal/dag" -) - -// VertexTransformer is a GraphTransformer that transforms vertices -// using the GraphVertexTransformers. The Transforms are run in sequential -// order. If a transform replaces a vertex then the next transform will see -// the new vertex. -type VertexTransformer struct { - Transforms []GraphVertexTransformer -} - -func (t *VertexTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - for _, vt := range t.Transforms { - newV, err := vt.Transform(v) - if err != nil { - return err - } - - // If the vertex didn't change, then don't do anything more - if newV == v { - continue - } - - // Vertex changed, replace it within the graph - if ok := g.Replace(v, newV); !ok { - // This should never happen, big problem - return fmt.Errorf( - "failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", - dag.VertexName(v), dag.VertexName(newV), v, newV) - } - - // Replace v so that future transforms use the proper vertex - v = newV - } - } - - return nil -} diff --git a/internal/terraform/transform_vertex_test.go b/internal/terraform/transform_vertex_test.go deleted file mode 100644 index 21d5d914a728..000000000000 --- a/internal/terraform/transform_vertex_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/internal/dag" -) - -func TestVertexTransformer_impl(t *testing.T) { - var _ GraphTransformer = new(VertexTransformer) -} - -func TestVertexTransformer(t *testing.T) { - var g Graph - g.Add(1) - g.Add(2) - g.Add(3) - g.Connect(dag.BasicEdge(1, 2)) - g.Connect(dag.BasicEdge(2, 3)) - - { - tf := &VertexTransformer{ - Transforms: []GraphVertexTransformer{ - &testVertexTransform{Source: 2, Target: 42}, - }, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testVertexTransformerStr) - if actual != expected { - t.Fatalf("bad: %s", actual) - } -} - -type testVertexTransform struct { - Source, Target dag.Vertex -} - -func (t *testVertexTransform) Transform(v dag.Vertex) (dag.Vertex, error) { - if t.Source == v { - v = t.Target - } - - return v, nil -} - -const testVertexTransformerStr = ` -1 - 42 -3 -42 - 3 -` diff --git a/internal/terraform/ui_input.go b/internal/terraform/ui_input.go deleted file mode 100644 index 688bcf71e43c..000000000000 --- a/internal/terraform/ui_input.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -import "context" - -// UIInput is the interface that must be implemented to ask for input -// from this user. This should forward the request to wherever the user -// inputs things to ask for values. -type UIInput interface { - Input(context.Context, *InputOpts) (string, error) -} - -// InputOpts are options for asking for input. -type InputOpts struct { - // Id is a unique ID for the question being asked that might be - // used for logging or to look up a prior answered question. - Id string - - // Query is a human-friendly question for inputting this value. - Query string - - // Description is a description about what this option is. Be wary - // that this will probably be in a terminal so split lines as you see - // necessary. - Description string - - // Default will be the value returned if no data is entered. - Default string - - // Secret should be true if we are asking for sensitive input. - // If attached to a TTY, Terraform will disable echo. - Secret bool -} diff --git a/internal/terraform/ui_input_mock.go b/internal/terraform/ui_input_mock.go deleted file mode 100644 index e2d9c3848193..000000000000 --- a/internal/terraform/ui_input_mock.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -import "context" - -// MockUIInput is an implementation of UIInput that can be used for tests. -type MockUIInput struct { - InputCalled bool - InputOpts *InputOpts - InputReturnMap map[string]string - InputReturnString string - InputReturnError error - InputFn func(*InputOpts) (string, error) -} - -func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - i.InputCalled = true - i.InputOpts = opts - if i.InputFn != nil { - return i.InputFn(opts) - } - if i.InputReturnMap != nil { - return i.InputReturnMap[opts.Id], i.InputReturnError - } - return i.InputReturnString, i.InputReturnError -} diff --git a/internal/terraform/ui_input_prefix.go b/internal/terraform/ui_input_prefix.go deleted file mode 100644 index b5d32b1e85d5..000000000000 --- a/internal/terraform/ui_input_prefix.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -import ( - "context" - "fmt" -) - -// PrefixUIInput is an implementation of UIInput that prefixes the ID -// with a string, allowing queries to be namespaced. -type PrefixUIInput struct { - IdPrefix string - QueryPrefix string - UIInput UIInput -} - -func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) - opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) - return i.UIInput.Input(ctx, opts) -} diff --git a/internal/terraform/ui_input_prefix_test.go b/internal/terraform/ui_input_prefix_test.go deleted file mode 100644 index dff42c39c5f8..000000000000 --- a/internal/terraform/ui_input_prefix_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package terraform - -import ( - "context" - "testing" -) - -func TestPrefixUIInput_impl(t *testing.T) { - var _ UIInput = new(PrefixUIInput) -} - -func TestPrefixUIInput(t *testing.T) { - input := new(MockUIInput) - prefix := &PrefixUIInput{ - IdPrefix: "foo", - UIInput: input, - } - - _, err := prefix.Input(context.Background(), &InputOpts{Id: "bar"}) - if err != nil { - t.Fatalf("err: %s", err) - } - - if input.InputOpts.Id != "foo.bar" { - t.Fatalf("bad: %#v", input.InputOpts) - } -} diff --git a/internal/terraform/ui_output.go b/internal/terraform/ui_output.go deleted file mode 100644 index 84427c63de1f..000000000000 --- a/internal/terraform/ui_output.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// UIOutput is the interface that must be implemented to output -// data to the end user. -type UIOutput interface { - Output(string) -} diff --git a/internal/terraform/ui_output_callback.go b/internal/terraform/ui_output_callback.go deleted file mode 100644 index 135a91c5f0a6..000000000000 --- a/internal/terraform/ui_output_callback.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -type CallbackUIOutput struct { - OutputFn func(string) -} - -func (o *CallbackUIOutput) Output(v string) { - o.OutputFn(v) -} diff --git a/internal/terraform/ui_output_callback_test.go b/internal/terraform/ui_output_callback_test.go deleted file mode 100644 index 1dd5ccddf9e8..000000000000 --- a/internal/terraform/ui_output_callback_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import ( - "testing" -) - -func TestCallbackUIOutput_impl(t *testing.T) { - var _ UIOutput = new(CallbackUIOutput) -} diff --git a/internal/terraform/ui_output_mock.go b/internal/terraform/ui_output_mock.go deleted file mode 100644 index d828c921ca3f..000000000000 --- a/internal/terraform/ui_output_mock.go +++ /dev/null @@ -1,21 +0,0 @@ -package terraform - -import "sync" - -// MockUIOutput is an implementation of UIOutput that can be used for tests. -type MockUIOutput struct { - sync.Mutex - OutputCalled bool - OutputMessage string - OutputFn func(string) -} - -func (o *MockUIOutput) Output(v string) { - o.Lock() - defer o.Unlock() - o.OutputCalled = true - o.OutputMessage = v - if o.OutputFn != nil { - o.OutputFn(v) - } -} diff --git a/internal/terraform/ui_output_mock_test.go b/internal/terraform/ui_output_mock_test.go deleted file mode 100644 index 0a23c2e2349a..000000000000 --- a/internal/terraform/ui_output_mock_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import ( - "testing" -) - -func TestMockUIOutput(t *testing.T) { - var _ UIOutput = new(MockUIOutput) -} diff --git a/internal/terraform/ui_output_provisioner.go b/internal/terraform/ui_output_provisioner.go deleted file mode 100644 index 22e5670cbd44..000000000000 --- a/internal/terraform/ui_output_provisioner.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/internal/addrs" -) - -// ProvisionerUIOutput is an implementation of UIOutput that calls a hook -// for the output so that the hooks can handle it. -type ProvisionerUIOutput struct { - InstanceAddr addrs.AbsResourceInstance - ProvisionerType string - Hooks []Hook -} - -func (o *ProvisionerUIOutput) Output(msg string) { - for _, h := range o.Hooks { - h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) - } -} diff --git a/internal/terraform/ui_output_provisioner_test.go b/internal/terraform/ui_output_provisioner_test.go deleted file mode 100644 index baadd31817fd..000000000000 --- a/internal/terraform/ui_output_provisioner_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/addrs" -) - -func TestProvisionerUIOutput_impl(t *testing.T) { - var _ UIOutput = new(ProvisionerUIOutput) -} - -func TestProvisionerUIOutputOutput(t *testing.T) { - hook := new(MockHook) - output := &ProvisionerUIOutput{ - InstanceAddr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "test", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - ProvisionerType: "foo", - Hooks: []Hook{hook}, - } - - output.Output("bar") - - if !hook.ProvisionOutputCalled { - t.Fatal("hook.ProvisionOutput was not called, and should've been") - } - if got, want := hook.ProvisionOutputProvisionerType, "foo"; got != want { - t.Fatalf("wrong provisioner type\ngot: %q\nwant: %q", got, want) - } - if got, want := hook.ProvisionOutputMessage, "bar"; got != want { - t.Fatalf("wrong output message\ngot: %q\nwant: %q", got, want) - } -} diff --git a/internal/terraform/update_state_hook.go b/internal/terraform/update_state_hook.go deleted file mode 100644 index c2ed76e8ece3..000000000000 --- a/internal/terraform/update_state_hook.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -// updateStateHook calls the PostStateUpdate hook with the current state. -func updateStateHook(ctx EvalContext) error { - // In principle we could grab the lock here just long enough to take a - // deep copy and then pass that to our hooks below, but we'll instead - // hold the hook for the duration to avoid the potential confusing - // situation of us racing to call PostStateUpdate concurrently with - // different state snapshots. - stateSync := ctx.State() - state := stateSync.Lock().DeepCopy() - defer stateSync.Unlock() - - // Call the hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostStateUpdate(state) - }) - return err -} diff --git a/internal/terraform/update_state_hook_test.go b/internal/terraform/update_state_hook_test.go deleted file mode 100644 index ac3e33f55dc1..000000000000 --- a/internal/terraform/update_state_hook_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" -) - -func TestUpdateStateHook(t *testing.T) { - mockHook := new(MockHook) - - state := states.NewState() - state.Module(addrs.RootModuleInstance).SetLocalValue("foo", cty.StringVal("hello")) - - ctx := new(MockEvalContext) - ctx.HookHook = mockHook - ctx.StateState = state.SyncWrapper() - - if err := updateStateHook(ctx); err != nil { - t.Fatalf("err: %s", err) - } - - if !mockHook.PostStateUpdateCalled { - t.Fatal("should call PostStateUpdate") - } - if mockHook.PostStateUpdateState.LocalValue(addrs.LocalValue{Name: "foo"}.Absolute(addrs.RootModuleInstance)) != cty.StringVal("hello") { - t.Fatalf("wrong state passed to hook: %s", spew.Sdump(mockHook.PostStateUpdateState)) - } -} diff --git a/internal/terraform/upgrade_resource_state.go b/internal/terraform/upgrade_resource_state.go deleted file mode 100644 index 906898e281a0..000000000000 --- a/internal/terraform/upgrade_resource_state.go +++ /dev/null @@ -1,206 +0,0 @@ -package terraform - -import ( - "encoding/json" - "fmt" - "log" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// upgradeResourceState will, if necessary, run the provider-defined upgrade -// logic against the given state object to make it compliant with the -// current schema version. This is a no-op if the given state object is -// already at the latest version. -// -// If any errors occur during upgrade, error diagnostics are returned. In that -// case it is not safe to proceed with using the original state object. -func upgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { - if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { - // We only do state upgrading for managed resources. - // This was a part of the normal workflow in older versions and - // returned early, so we are only going to log the error for now. - log.Printf("[ERROR] data resource %s should not require state upgrade", addr) - return src, nil - } - - // Remove any attributes from state that are not present in the schema. - // This was previously taken care of by the provider, but data sources do - // not go through the UpgradeResourceState process. - // - // Legacy flatmap state is already taken care of during conversion. - // If the schema version is be changed, then allow the provider to handle - // removed attributes. - if len(src.AttrsJSON) > 0 && src.SchemaVersion == currentVersion { - src.AttrsJSON = stripRemovedStateAttributes(src.AttrsJSON, currentSchema.ImpliedType()) - } - - stateIsFlatmap := len(src.AttrsJSON) == 0 - - // TODO: This should eventually use a proper FQN. - providerType := addr.Resource.Resource.ImpliedProvider() - if src.SchemaVersion > currentVersion { - log.Printf("[TRACE] upgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource instance managed by newer provider version", - // This is not a very good error message, but we don't retain enough - // information in state to give good feedback on what provider - // version might be required here. :( - fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), - )) - return nil, diags - } - - // If we get down here then we need to upgrade the state, with the - // provider's help. - // If this state was originally created by a version of Terraform prior to - // v0.12, this also includes translating from legacy flatmap to new-style - // representation, since only the provider has enough information to - // understand a flatmap built against an older schema. - if src.SchemaVersion != currentVersion { - log.Printf("[TRACE] upgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) - } else { - log.Printf("[TRACE] upgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) - } - - req := providers.UpgradeResourceStateRequest{ - TypeName: addr.Resource.Resource.Type, - - // TODO: The internal schema version representations are all using - // uint64 instead of int64, but unsigned integers aren't friendly - // to all protobuf target languages so in practice we use int64 - // on the wire. In future we will change all of our internal - // representations to int64 too. - Version: int64(src.SchemaVersion), - } - - if stateIsFlatmap { - req.RawStateFlatmap = src.AttrsFlat - } else { - req.RawStateJSON = src.AttrsJSON - } - - resp := provider.UpgradeResourceState(req) - diags := resp.Diagnostics - if diags.HasErrors() { - return nil, diags - } - - // After upgrading, the new value must conform to the current schema. When - // going over RPC this is actually already ensured by the - // marshaling/unmarshaling of the new value, but we'll check it here - // anyway for robustness, e.g. for in-process providers. - newValue := resp.UpgradedState - if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource state upgrade", - fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), - )) - } - return nil, diags - } - - new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) - if err != nil { - // We already checked for type conformance above, so getting into this - // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to encode result of resource state upgrade", - fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), - )) - } - return new, diags -} - -// stripRemovedStateAttributes deletes any attributes no longer present in the -// schema, so that the json can be correctly decoded. -func stripRemovedStateAttributes(state []byte, ty cty.Type) []byte { - jsonMap := map[string]interface{}{} - err := json.Unmarshal(state, &jsonMap) - if err != nil { - // we just log any errors here, and let the normal decode process catch - // invalid JSON. - log.Printf("[ERROR] UpgradeResourceState: stripRemovedStateAttributes: %s", err) - return state - } - - // if no changes were made, we return the original state to ensure nothing - // was altered in the marshaling process. - if !removeRemovedAttrs(jsonMap, ty) { - return state - } - - js, err := json.Marshal(jsonMap) - if err != nil { - // if the json map was somehow mangled enough to not marhsal, something - // went horribly wrong - panic(err) - } - - return js -} - -// strip out the actual missing attributes, and return a bool indicating if any -// changes were made. -func removeRemovedAttrs(v interface{}, ty cty.Type) bool { - modified := false - // we're only concerned with finding maps that correspond to object - // attributes - switch v := v.(type) { - case []interface{}: - switch { - // If these aren't blocks the next call will be a noop - case ty.IsListType() || ty.IsSetType(): - eTy := ty.ElementType() - for _, eV := range v { - modified = removeRemovedAttrs(eV, eTy) || modified - } - } - return modified - case map[string]interface{}: - switch { - case ty.IsMapType(): - // map blocks aren't yet supported, but handle this just in case - eTy := ty.ElementType() - for _, eV := range v { - modified = removeRemovedAttrs(eV, eTy) || modified - } - return modified - - case ty == cty.DynamicPseudoType: - log.Printf("[DEBUG] UpgradeResourceState: ignoring dynamic block: %#v\n", v) - return false - - case ty.IsObjectType(): - attrTypes := ty.AttributeTypes() - for attr, attrV := range v { - attrTy, ok := attrTypes[attr] - if !ok { - log.Printf("[DEBUG] UpgradeResourceState: attribute %q no longer present in schema", attr) - delete(v, attr) - modified = true - continue - } - - modified = removeRemovedAttrs(attrV, attrTy) || modified - } - return modified - default: - // This shouldn't happen, and will fail to decode further on, so - // there's no need to handle it here. - log.Printf("[WARN] UpgradeResourceState: unexpected type %#v for map in json state", ty) - return false - } - } - return modified -} diff --git a/internal/terraform/upgrade_resource_state_test.go b/internal/terraform/upgrade_resource_state_test.go deleted file mode 100644 index 11ef77b5f374..000000000000 --- a/internal/terraform/upgrade_resource_state_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package terraform - -import ( - "reflect" - "testing" - - "github.com/zclconf/go-cty/cty" -) - -func TestStripRemovedStateAttributes(t *testing.T) { - cases := []struct { - name string - state map[string]interface{} - expect map[string]interface{} - ty cty.Type - modified bool - }{ - { - "removed string", - map[string]interface{}{ - "a": "ok", - "b": "gone", - }, - map[string]interface{}{ - "a": "ok", - }, - cty.Object(map[string]cty.Type{ - "a": cty.String, - }), - true, - }, - { - "removed null", - map[string]interface{}{ - "a": "ok", - "b": nil, - }, - map[string]interface{}{ - "a": "ok", - }, - cty.Object(map[string]cty.Type{ - "a": cty.String, - }), - true, - }, - { - "removed nested string", - map[string]interface{}{ - "a": "ok", - "b": map[string]interface{}{ - "a": "ok", - "b": "removed", - }, - }, - map[string]interface{}{ - "a": "ok", - "b": map[string]interface{}{ - "a": "ok", - }, - }, - cty.Object(map[string]cty.Type{ - "a": cty.String, - "b": cty.Object(map[string]cty.Type{ - "a": cty.String, - }), - }), - true, - }, - { - "removed nested list", - map[string]interface{}{ - "a": "ok", - "b": map[string]interface{}{ - "a": "ok", - "b": []interface{}{"removed"}, - }, - }, - map[string]interface{}{ - "a": "ok", - "b": map[string]interface{}{ - "a": "ok", - }, - }, - cty.Object(map[string]cty.Type{ - "a": cty.String, - "b": cty.Object(map[string]cty.Type{ - "a": cty.String, - }), - }), - true, - }, - { - "removed keys in set of objs", - map[string]interface{}{ - "a": "ok", - "b": map[string]interface{}{ - "a": "ok", - "set": []interface{}{ - map[string]interface{}{ - "x": "ok", - "y": "removed", - }, - map[string]interface{}{ - "x": "ok", - "y": "removed", - }, - }, - }, - }, - map[string]interface{}{ - "a": "ok", - "b": map[string]interface{}{ - "a": "ok", - "set": []interface{}{ - map[string]interface{}{ - "x": "ok", - }, - map[string]interface{}{ - "x": "ok", - }, - }, - }, - }, - cty.Object(map[string]cty.Type{ - "a": cty.String, - "b": cty.Object(map[string]cty.Type{ - "a": cty.String, - "set": cty.Set(cty.Object(map[string]cty.Type{ - "x": cty.String, - })), - }), - }), - true, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - modified := removeRemovedAttrs(tc.state, tc.ty) - if !reflect.DeepEqual(tc.state, tc.expect) { - t.Fatalf("expected: %#v\n got: %#v\n", tc.expect, tc.state) - } - if modified != tc.modified { - t.Fatal("incorrect return value") - } - }) - } -} diff --git a/internal/terraform/util.go b/internal/terraform/util.go deleted file mode 100644 index 7966b58dd2fe..000000000000 --- a/internal/terraform/util.go +++ /dev/null @@ -1,75 +0,0 @@ -package terraform - -import ( - "sort" -) - -// Semaphore is a wrapper around a channel to provide -// utility methods to clarify that we are treating the -// channel as a semaphore -type Semaphore chan struct{} - -// NewSemaphore creates a semaphore that allows up -// to a given limit of simultaneous acquisitions -func NewSemaphore(n int) Semaphore { - if n <= 0 { - panic("semaphore with limit <=0") - } - ch := make(chan struct{}, n) - return Semaphore(ch) -} - -// Acquire is used to acquire an available slot. -// Blocks until available. -func (s Semaphore) Acquire() { - s <- struct{}{} -} - -// TryAcquire is used to do a non-blocking acquire. -// Returns a bool indicating success -func (s Semaphore) TryAcquire() bool { - select { - case s <- struct{}{}: - return true - default: - return false - } -} - -// Release is used to return a slot. Acquire must -// be called as a pre-condition. -func (s Semaphore) Release() { - select { - case <-s: - default: - panic("release without an acquire") - } -} - -// strSliceContains checks if a given string is contained in a slice -// When anybody asks why Go needs generics, here you go. -func strSliceContains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - return false -} - -// deduplicate a slice of strings -func uniqueStrings(s []string) []string { - if len(s) < 2 { - return s - } - - sort.Strings(s) - result := make([]string, 1, len(s)) - result[0] = s[0] - for i := 1; i < len(s); i++ { - if s[i] != result[len(result)-1] { - result = append(result, s[i]) - } - } - return result -} diff --git a/internal/terraform/util_test.go b/internal/terraform/util_test.go deleted file mode 100644 index 8b3907e2366c..000000000000 --- a/internal/terraform/util_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "testing" - "time" -) - -func TestSemaphore(t *testing.T) { - s := NewSemaphore(2) - timer := time.AfterFunc(time.Second, func() { - panic("deadlock") - }) - defer timer.Stop() - - s.Acquire() - if !s.TryAcquire() { - t.Fatalf("should acquire") - } - if s.TryAcquire() { - t.Fatalf("should not acquire") - } - s.Release() - s.Release() - - // This release should panic - defer func() { - r := recover() - if r == nil { - t.Fatalf("should panic") - } - }() - s.Release() -} - -func TestStrSliceContains(t *testing.T) { - if strSliceContains(nil, "foo") { - t.Fatalf("Bad") - } - if strSliceContains([]string{}, "foo") { - t.Fatalf("Bad") - } - if strSliceContains([]string{"bar"}, "foo") { - t.Fatalf("Bad") - } - if !strSliceContains([]string{"bar", "foo"}, "foo") { - t.Fatalf("Bad") - } -} - -func TestUniqueStrings(t *testing.T) { - cases := []struct { - Input []string - Expected []string - }{ - { - []string{}, - []string{}, - }, - { - []string{"x"}, - []string{"x"}, - }, - { - []string{"a", "b", "c"}, - []string{"a", "b", "c"}, - }, - { - []string{"a", "a", "a"}, - []string{"a"}, - }, - { - []string{"a", "b", "a", "b", "a", "a"}, - []string{"a", "b"}, - }, - { - []string{"c", "b", "a", "c", "b"}, - []string{"a", "b", "c"}, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("unique-%d", i), func(t *testing.T) { - actual := uniqueStrings(tc.Input) - if !reflect.DeepEqual(tc.Expected, actual) { - t.Fatalf("Expected: %q\nGot: %q", tc.Expected, actual) - } - }) - } -} diff --git a/internal/terraform/validate_selfref.go b/internal/terraform/validate_selfref.go deleted file mode 100644 index ff00cded7514..000000000000 --- a/internal/terraform/validate_selfref.go +++ /dev/null @@ -1,60 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// validateSelfRef checks to ensure that expressions within a particular -// referencable block do not reference that same block. -func validateSelfRef(addr addrs.Referenceable, config hcl.Body, providerSchema *ProviderSchema) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - addrStrs := make([]string, 0, 1) - addrStrs = append(addrStrs, addr.String()) - switch tAddr := addr.(type) { - case addrs.ResourceInstance: - // A resource instance may not refer to its containing resource either. - addrStrs = append(addrStrs, tAddr.ContainingResource().String()) - } - - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr)) - return diags - } - - var schema *configschema.Block - switch tAddr := addr.(type) { - case addrs.Resource: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr) - case addrs.ResourceInstance: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) - } - - if schema == nil { - diags = diags.Append(fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr)) - return diags - } - - refs, _ := lang.ReferencesInBlock(config, schema) - for _, ref := range refs { - for _, addrStr := range addrStrs { - if ref.Subject.String() == addrStr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referential block", - Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return diags -} diff --git a/internal/terraform/validate_selfref_test.go b/internal/terraform/validate_selfref_test.go deleted file mode 100644 index 73fda25d0a01..000000000000 --- a/internal/terraform/validate_selfref_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package terraform - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/internal/configs/configschema" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcltest" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -func TestValidateSelfRef(t *testing.T) { - rAddr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - } - - tests := []struct { - Name string - Addr addrs.Referenceable - Expr hcl.Expression - Err bool - }{ - { - "no references at all", - rAddr, - hcltest.MockExprLiteral(cty.StringVal("bar")), - false, - }, - - { - "non self reference", - rAddr, - hcltest.MockExprTraversalSrc("aws_instance.bar.id"), - false, - }, - - { - "self reference", - rAddr, - hcltest.MockExprTraversalSrc("aws_instance.foo.id"), - true, - }, - - { - "self reference other index", - rAddr, - hcltest.MockExprTraversalSrc("aws_instance.foo[4].id"), - false, - }, - - { - "self reference same index", - rAddr.Instance(addrs.IntKey(4)), - hcltest.MockExprTraversalSrc("aws_instance.foo[4].id"), - true, - }, - - { - "self reference whole", - rAddr.Instance(addrs.IntKey(4)), - hcltest.MockExprTraversalSrc("aws_instance.foo"), - true, - }, - } - - for i, test := range tests { - t.Run(fmt.Sprintf("%d-%s", i, test.Name), func(t *testing.T) { - body := hcltest.MockBody(&hcl.BodyContent{ - Attributes: hcl.Attributes{ - "foo": { - Name: "foo", - Expr: test.Expr, - }, - }, - }) - - ps := &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Required: true, - }, - }, - }, - }, - } - - diags := validateSelfRef(test.Addr, body, ps) - if diags.HasErrors() != test.Err { - if test.Err { - t.Errorf("unexpected success; want error") - } else { - t.Errorf("unexpected error\n\n%s", diags.Err()) - } - } - }) - } -} diff --git a/internal/terraform/valuesourcetype_string.go b/internal/terraform/valuesourcetype_string.go deleted file mode 100644 index 627593d762b5..000000000000 --- a/internal/terraform/valuesourcetype_string.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ValueFromUnknown-0] - _ = x[ValueFromConfig-67] - _ = x[ValueFromAutoFile-70] - _ = x[ValueFromNamedFile-78] - _ = x[ValueFromCLIArg-65] - _ = x[ValueFromEnvVar-69] - _ = x[ValueFromInput-73] - _ = x[ValueFromPlan-80] - _ = x[ValueFromCaller-83] -} - -const ( - _ValueSourceType_name_0 = "ValueFromUnknown" - _ValueSourceType_name_1 = "ValueFromCLIArg" - _ValueSourceType_name_2 = "ValueFromConfig" - _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" - _ValueSourceType_name_4 = "ValueFromInput" - _ValueSourceType_name_5 = "ValueFromNamedFile" - _ValueSourceType_name_6 = "ValueFromPlan" - _ValueSourceType_name_7 = "ValueFromCaller" -) - -var ( - _ValueSourceType_index_3 = [...]uint8{0, 15, 32} -) - -func (i ValueSourceType) String() string { - switch { - case i == 0: - return _ValueSourceType_name_0 - case i == 65: - return _ValueSourceType_name_1 - case i == 67: - return _ValueSourceType_name_2 - case 69 <= i && i <= 70: - i -= 69 - return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] - case i == 73: - return _ValueSourceType_name_4 - case i == 78: - return _ValueSourceType_name_5 - case i == 80: - return _ValueSourceType_name_6 - case i == 83: - return _ValueSourceType_name_7 - default: - return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/internal/terraform/variables.go b/internal/terraform/variables.go deleted file mode 100644 index a60f187003dd..000000000000 --- a/internal/terraform/variables.go +++ /dev/null @@ -1,315 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/tfdiags" -) - -// InputValue represents a raw value for a root module input variable as -// provided by the external caller into a function like terraform.Context.Plan. -// -// InputValue should represent as directly as possible what the user set the -// variable to, without any attempt to convert the value to the variable's -// type constraint or substitute the configured default values for variables -// that wasn't set. Those adjustments will be handled by Terraform Core itself -// as part of performing the requested operation. -// -// A Terraform Core caller must provide an InputValue object for each of the -// variables declared in the root module, even if the end user didn't provide -// an explicit value for some of them. See the Value field documentation for -// how to handle that situation. -// -// Terraform Core also internally uses InputValue to represent the raw value -// provided for a variable in a child module call, following the same -// conventions. However, that's an implementation detail not visible to -// outside callers. -type InputValue struct { - // Value is the raw value as provided by the user as part of the plan - // options, or a corresponding similar data structure for non-plan - // operations. - // - // If a particular variable declared in the root module is _not_ set by - // the user then the caller must still provide an InputValue for it but - // must set Value to cty.NilVal to represent the absense of a value. - // This requirement is to help detect situations where the caller isn't - // correctly detecting and handling all of the declared variables. - // - // For historical reasons it's important that callers distinguish the - // situation of the value not being set at all (cty.NilVal) from the - // situation of it being explicitly set to null (a cty.NullVal result): - // for "nullable" input variables that distinction unfortunately decides - // whether the final value will be the variable's default or will be - // explicitly null. - Value cty.Value - - // SourceType is a high-level category for where the value of Value - // came from, which Terraform Core uses to tailor some of its error - // messages to be more helpful to the user. - // - // Some SourceType values should be accompanied by a populated SourceRange - // value. See that field's documentation below for more information. - SourceType ValueSourceType - - // SourceRange provides source location information for values whose - // SourceType is either ValueFromConfig, ValueFromNamedFile, or - // ValueForNormalFile. It is not populated for other source types, and so - // should not be used. - SourceRange tfdiags.SourceRange -} - -// ValueSourceType describes what broad category of source location provided -// a particular value. -type ValueSourceType rune - -const ( - // ValueFromUnknown is the zero value of ValueSourceType and is not valid. - ValueFromUnknown ValueSourceType = 0 - - // ValueFromConfig indicates that a value came from a .tf or .tf.json file, - // e.g. the default value defined for a variable. - ValueFromConfig ValueSourceType = 'C' - - // ValueFromAutoFile indicates that a value came from a "values file", like - // a .tfvars file, that was implicitly loaded by naming convention. - ValueFromAutoFile ValueSourceType = 'F' - - // ValueFromNamedFile indicates that a value came from a named "values file", - // like a .tfvars file, that was passed explicitly on the command line (e.g. - // -var-file=foo.tfvars). - ValueFromNamedFile ValueSourceType = 'N' - - // ValueFromCLIArg indicates that the value was provided directly in - // a CLI argument. The name of this argument is not recorded and so it must - // be inferred from context. - ValueFromCLIArg ValueSourceType = 'A' - - // ValueFromEnvVar indicates that the value was provided via an environment - // variable. The name of the variable is not recorded and so it must be - // inferred from context. - ValueFromEnvVar ValueSourceType = 'E' - - // ValueFromInput indicates that the value was provided at an interactive - // input prompt. - ValueFromInput ValueSourceType = 'I' - - // ValueFromPlan indicates that the value was retrieved from a stored plan. - ValueFromPlan ValueSourceType = 'P' - - // ValueFromCaller indicates that the value was explicitly overridden by - // a caller to Context.SetVariable after the context was constructed. - ValueFromCaller ValueSourceType = 'S' -) - -func (v *InputValue) GoString() string { - if (v.SourceRange != tfdiags.SourceRange{}) { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) - } else { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) - } -} - -// HasSourceRange returns true if the reciever has a source type for which -// we expect the SourceRange field to be populated with a valid range. -func (v *InputValue) HasSourceRange() bool { - return v.SourceType.HasSourceRange() -} - -// HasSourceRange returns true if the reciever is one of the source types -// that is used along with a valid SourceRange field when appearing inside an -// InputValue object. -func (v ValueSourceType) HasSourceRange() bool { - switch v { - case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: - return true - default: - return false - } -} - -func (v ValueSourceType) GoString() string { - return fmt.Sprintf("terraform.%s", v) -} - -//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType - -// InputValues is a map of InputValue instances. -type InputValues map[string]*InputValue - -// InputValuesFromCaller turns the given map of naked values into an -// InputValues that attributes each value to "a caller", using the source -// type ValueFromCaller. This is primarily useful for testing purposes. -// -// This should not be used as a general way to convert map[string]cty.Value -// into InputValues, since in most real cases we want to set a suitable -// other SourceType and possibly SourceRange value. -func InputValuesFromCaller(vals map[string]cty.Value) InputValues { - ret := make(InputValues, len(vals)) - for k, v := range vals { - ret[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } - } - return ret -} - -// Override merges the given value maps with the receiver, overriding any -// conflicting keys so that the latest definition wins. -func (vv InputValues) Override(others ...InputValues) InputValues { - // FIXME: This should check to see if any of the values are maps and - // merge them if so, in order to preserve the behavior from prior to - // Terraform 0.12. - ret := make(InputValues) - for k, v := range vv { - ret[k] = v - } - for _, other := range others { - for k, v := range other { - ret[k] = v - } - } - return ret -} - -// JustValues returns a map that just includes the values, discarding the -// source information. -func (vv InputValues) JustValues() map[string]cty.Value { - ret := make(map[string]cty.Value, len(vv)) - for k, v := range vv { - ret[k] = v.Value - } - return ret -} - -// SameValues returns true if the given InputValues has the same values as -// the receiever, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) SameValues(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - } - - return true -} - -// HasValues returns true if the reciever has the same values as in the given -// map, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) HasValues(vals map[string]cty.Value) bool { - if len(vv) != len(vals) { - return false - } - - for k, v := range vv { - oVal, exists := vals[k] - if !exists { - return false - } - if !v.Value.RawEquals(oVal) { - return false - } - } - - return true -} - -// Identical returns true if the given InputValues has the same values, -// source types, and source ranges as the receiver. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -// -// This method is primarily for testing. For most practical purposes, it's -// better to use SameValues or HasValues. -func (vv InputValues) Identical(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - if v.SourceType != ov.SourceType { - return false - } - if v.SourceRange != ov.SourceRange { - return false - } - } - - return true -} - -// checkInputVariables ensures that the caller provided an InputValue -// definition for each root module variable declared in the configuration. -// The caller must provide an InputVariables with keys exactly matching -// the declared variables, though some of them may be marked explicitly -// unset by their values being cty.NilVal. -// -// This doesn't perform any type checking, default value substitution, or -// validation checks. Those are all handled during a graph walk when we -// visit the graph nodes representing each root variable. -// -// The set of values is considered valid only if the returned diagnostics -// does not contain errors. A valid set of values may still produce warnings, -// which should be returned to the user. -func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - for name := range vcs { - _, isSet := vs[name] - if !isSet { - // Always an error, since the caller should have produced an - // item with Value: cty.NilVal to be explicit that it offered - // an opportunity to set this variable. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unassigned variable", - fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), - )) - continue - } - } - - // Check for any variables that are assigned without being configured. - // This is always an implementation error in the caller, because we - // expect undefined variables to be caught during context construction - // where there is better context to report it well. - for name := range vs { - if _, defined := vcs[name]; !defined { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Value assigned to undeclared variable", - fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), - )) - } - } - - return diags -} diff --git a/internal/terraform/variables_test.go b/internal/terraform/variables_test.go deleted file mode 100644 index 6e53a95750a2..000000000000 --- a/internal/terraform/variables_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/internal/configs" - "github.com/zclconf/go-cty/cty" -) - -func TestCheckInputVariables(t *testing.T) { - c := testModule(t, "input-variables") - - t.Run("No variables set", func(t *testing.T) { - // No variables set - diags := checkInputVariables(c.Module.Variables, nil) - if !diags.HasErrors() { - t.Fatal("check succeeded, but want errors") - } - - // Required variables set, optional variables unset - // This is still an error at this layer, since it's the caller's - // responsibility to have already merged in any default values. - diags = checkInputVariables(c.Module.Variables, InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("bar"), - SourceType: ValueFromCLIArg, - }, - }) - if !diags.HasErrors() { - t.Fatal("check succeeded, but want errors") - } - }) - - t.Run("All variables set", func(t *testing.T) { - diags := checkInputVariables(c.Module.Variables, InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("bar"), - SourceType: ValueFromCLIArg, - }, - "bar": &InputValue{ - Value: cty.StringVal("baz"), - SourceType: ValueFromCLIArg, - }, - "map": &InputValue{ - Value: cty.StringVal("baz"), // okay because config has no type constraint - SourceType: ValueFromCLIArg, - }, - "object_map": &InputValue{ - Value: cty.MapVal(map[string]cty.Value{ - "uno": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("baz"), - "bar": cty.NumberIntVal(2), // type = any - }), - "dos": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bat"), - "bar": cty.NumberIntVal(99), // type = any - }), - }), - SourceType: ValueFromCLIArg, - }, - "object_list": &InputValue{ - Value: cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("baz"), - "bar": cty.NumberIntVal(2), // type = any - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bang"), - "bar": cty.NumberIntVal(42), // type = any - }), - }), - SourceType: ValueFromCLIArg, - }, - }) - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - }) - - t.Run("Invalid Complex Types", func(t *testing.T) { - diags := checkInputVariables(c.Module.Variables, InputValues{ - "foo": &InputValue{ - Value: cty.StringVal("bar"), - SourceType: ValueFromCLIArg, - }, - "bar": &InputValue{ - Value: cty.StringVal("baz"), - SourceType: ValueFromCLIArg, - }, - "map": &InputValue{ - Value: cty.StringVal("baz"), // okay because config has no type constraint - SourceType: ValueFromCLIArg, - }, - "object_map": &InputValue{ - Value: cty.MapVal(map[string]cty.Value{ - "uno": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("baz"), - "bar": cty.NumberIntVal(2), // type = any - }), - "dos": cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bat"), - "bar": cty.NumberIntVal(99), // type = any - }), - }), - SourceType: ValueFromCLIArg, - }, - "object_list": &InputValue{ - Value: cty.TupleVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("baz"), - "bar": cty.NumberIntVal(2), // type = any - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bang"), - "bar": cty.StringVal("42"), // type = any, but mismatch with the first list item - }), - }), - SourceType: ValueFromCLIArg, - }, - }) - - if diags.HasErrors() { - t.Fatalf("unexpected errors: %s", diags.Err()) - } - }) -} - -// testInputValuesUnset is a helper for constructing InputValues values for -// situations where all of the root module variables are optional and a -// test case intends to just use those default values and not override them -// at all. -// -// In other words, this constructs an InputValues with one entry per given -// input variable declaration where all of them are declared as unset. -func testInputValuesUnset(decls map[string]*configs.Variable) InputValues { - if len(decls) == 0 { - return nil - } - - ret := make(InputValues, len(decls)) - for name := range decls { - ret[name] = &InputValue{ - Value: cty.NilVal, - SourceType: ValueFromUnknown, - } - } - return ret -} diff --git a/internal/terraform/version_required.go b/internal/terraform/version_required.go deleted file mode 100644 index 1861050b9573..000000000000 --- a/internal/terraform/version_required.go +++ /dev/null @@ -1,85 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/tfdiags" - - "github.com/hashicorp/terraform/internal/configs" - - tfversion "github.com/hashicorp/terraform/version" -) - -// CheckCoreVersionRequirements visits each of the modules in the given -// configuration tree and verifies that any given Core version constraints -// match with the version of Terraform Core that is being used. -// -// The returned diagnostics will contain errors if any constraints do not match. -// The returned diagnostics might also return warnings, which should be -// displayed to the user. -func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { - if config == nil { - return nil - } - - var diags tfdiags.Diagnostics - module := config.Module - - for _, constraint := range module.CoreVersionConstraints { - // Before checking if the constraints are met, check that we are not using any prerelease fields as these - // are not currently supported. - var prereleaseDiags tfdiags.Diagnostics - for _, required := range constraint.Required { - if required.Prerelease() { - prereleaseDiags = prereleaseDiags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid required_version constraint", - Detail: fmt.Sprintf( - "Prerelease version constraints are not supported: %s. Remove the prerelease information from the constraint. Prerelease versions of terraform will match constraints using their version core only.", - required.String()), - Subject: constraint.DeclRange.Ptr(), - }) - } - } - - if len(prereleaseDiags) > 0 { - // There were some prerelease fields in the constraints. Don't check the constraints as they will - // fail, and populate the diagnostics for these constraints with the prerelease diagnostics. - diags = diags.Append(prereleaseDiags) - continue - } - - if !constraint.Required.Check(tfversion.SemVer) { - switch { - case len(config.Path) == 0: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - tfversion.String(), - ), - Subject: constraint.DeclRange.Ptr(), - }) - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - config.Path, config.SourceAddr, tfversion.String(), - ), - Subject: constraint.DeclRange.Ptr(), - }) - } - } - } - - for _, c := range config.Children { - childDiags := CheckCoreVersionRequirements(c) - diags = diags.Append(childDiags) - } - - return diags -} diff --git a/internal/terraform/walkoperation_string.go b/internal/terraform/walkoperation_string.go deleted file mode 100644 index 799d4dae27c7..000000000000 --- a/internal/terraform/walkoperation_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[walkInvalid-0] - _ = x[walkApply-1] - _ = x[walkPlan-2] - _ = x[walkPlanDestroy-3] - _ = x[walkValidate-4] - _ = x[walkDestroy-5] - _ = x[walkImport-6] - _ = x[walkEval-7] -} - -const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkValidatewalkDestroywalkImportwalkEval" - -var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 55, 66, 76, 84} - -func (i walkOperation) String() string { - if i >= walkOperation(len(_walkOperation_index)-1) { - return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] -} diff --git a/internal/tfdiags/rpc_friendly.go b/internal/tfdiags/rpc_friendly.go deleted file mode 100644 index 4c627bf98aac..000000000000 --- a/internal/tfdiags/rpc_friendly.go +++ /dev/null @@ -1,64 +0,0 @@ -package tfdiags - -import ( - "encoding/gob" -) - -type rpcFriendlyDiag struct { - Severity_ Severity - Summary_ string - Detail_ string - Subject_ *SourceRange - Context_ *SourceRange -} - -// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to -// RPC. -// -// In particular, it currently returns an object that can be serialized and -// later re-inflated using gob. This definition may grow to include other -// serializations later. -func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { - desc := diag.Description() - source := diag.Source() - return &rpcFriendlyDiag{ - Severity_: diag.Severity(), - Summary_: desc.Summary, - Detail_: desc.Detail, - Subject_: source.Subject, - Context_: source.Context, - } -} - -func (d *rpcFriendlyDiag) Severity() Severity { - return d.Severity_ -} - -func (d *rpcFriendlyDiag) Description() Description { - return Description{ - Summary: d.Summary_, - Detail: d.Detail_, - } -} - -func (d *rpcFriendlyDiag) Source() Source { - return Source{ - Subject: d.Subject_, - Context: d.Context_, - } -} - -func (d rpcFriendlyDiag) FromExpr() *FromExpr { - // RPC-friendly diagnostics cannot preserve expression information because - // expressions themselves are not RPC-friendly. - return nil -} - -func (d rpcFriendlyDiag) ExtraInfo() interface{} { - // RPC-friendly diagnostics always discard any "extra information". - return nil -} - -func init() { - gob.Register((*rpcFriendlyDiag)(nil)) -} diff --git a/internal/ipaddr/LICENSE b/ipaddr/LICENSE similarity index 100% rename from internal/ipaddr/LICENSE rename to ipaddr/LICENSE diff --git a/internal/ipaddr/PATENTS b/ipaddr/PATENTS similarity index 100% rename from internal/ipaddr/PATENTS rename to ipaddr/PATENTS diff --git a/internal/ipaddr/README.md b/ipaddr/README.md similarity index 100% rename from internal/ipaddr/README.md rename to ipaddr/README.md diff --git a/internal/ipaddr/doc.go b/ipaddr/doc.go similarity index 100% rename from internal/ipaddr/doc.go rename to ipaddr/doc.go diff --git a/internal/ipaddr/ip.go b/ipaddr/ip.go similarity index 100% rename from internal/ipaddr/ip.go rename to ipaddr/ip.go diff --git a/internal/ipaddr/ip_test.go b/ipaddr/ip_test.go similarity index 100% rename from internal/ipaddr/ip_test.go rename to ipaddr/ip_test.go diff --git a/internal/ipaddr/parse.go b/ipaddr/parse.go similarity index 100% rename from internal/ipaddr/parse.go rename to ipaddr/parse.go diff --git a/internal/lang/blocktoattr/doc.go b/lang/blocktoattr/doc.go similarity index 100% rename from internal/lang/blocktoattr/doc.go rename to lang/blocktoattr/doc.go diff --git a/internal/lang/blocktoattr/fixup.go b/lang/blocktoattr/fixup.go similarity index 99% rename from internal/lang/blocktoattr/fixup.go rename to lang/blocktoattr/fixup.go index 5d05a86f2f5f..38bfda3e88dd 100644 --- a/internal/lang/blocktoattr/fixup.go +++ b/lang/blocktoattr/fixup.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/blocktoattr/fixup_bench_test.go b/lang/blocktoattr/fixup_bench_test.go similarity index 97% rename from internal/lang/blocktoattr/fixup_bench_test.go rename to lang/blocktoattr/fixup_bench_test.go index 518fcfd0fb81..1515d2effdd4 100644 --- a/internal/lang/blocktoattr/fixup_bench_test.go +++ b/lang/blocktoattr/fixup_bench_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/blocktoattr/fixup_test.go b/lang/blocktoattr/fixup_test.go similarity index 99% rename from internal/lang/blocktoattr/fixup_test.go rename to lang/blocktoattr/fixup_test.go index 36ab48041c9a..6ab799fc3e51 100644 --- a/internal/lang/blocktoattr/fixup_test.go +++ b/lang/blocktoattr/fixup_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/hcl/v2/hcldec" "github.com/hashicorp/hcl/v2/hclsyntax" hcljson "github.com/hashicorp/hcl/v2/json" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/zclconf/go-cty/cty" ) diff --git a/lang/blocktoattr/schema.go b/lang/blocktoattr/schema.go new file mode 100644 index 000000000000..31e010cc79c5 --- /dev/null +++ b/lang/blocktoattr/schema.go @@ -0,0 +1,146 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNames(schema *configschema.Block) map[string]struct{} { + if schema == nil { + return nil + } + ambiguousNames := make(map[string]struct{}) + for name, attrS := range schema.Attributes { + aty := attrS.Type + if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { + ambiguousNames[name] = struct{}{} + } + } + return ambiguousNames +} + +func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { + ret := &hcl.BodySchema{} + + appearsAsBlock := make(map[string]struct{}) + { + // We'll construct some throwaway schemas here just to probe for + // whether each of our ambiguous names seems to be being used as + // an attribute or a block. We need to check both because in JSON + // syntax we rely on the schema to decide between attribute or block + // interpretation and so JSON will always answer yes to both of + // these questions and we want to prefer the attribute interpretation + // in that case. + var probeSchema hcl.BodySchema + + for name := range ambiguousNames { + probeSchema = hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: name, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + if _, exists := content.Attributes[name]; exists { + // Can decode as an attribute, so we'll go with that. + continue + } + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: name, + }, + }, + } + content, _, _ = body.PartialContent(&probeSchema) + if len(content.Blocks) > 0 || dynamicExpanded { + // A dynamic block with an empty iterator returns nothing. + // If there's no attribute and we have either a block or a + // dynamic expansion, we need to rewrite this one as a + // block for a successful result. + appearsAsBlock[name] = struct{}{} + } + } + if !dynamicExpanded { + // If we're deciding for a context where dynamic blocks haven't + // been expanded yet then we need to probe for those too. + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "dynamic", + LabelNames: []string{"type"}, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + for _, block := range content.Blocks { + if _, exists := ambiguousNames[block.Labels[0]]; exists { + appearsAsBlock[block.Labels[0]] = struct{}{} + } + } + } + } + + for _, attrS := range given.Attributes { + if _, exists := appearsAsBlock[attrS.Name]; exists { + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: attrS.Name, + }) + } else { + ret.Attributes = append(ret.Attributes, attrS) + } + } + + // Anything that is specified as a block type in the input schema remains + // that way by just passing through verbatim. + ret.Blocks = append(ret.Blocks, given.Blocks...) + + return ret +} + +// SchemaForCtyElementType converts a cty object type into an +// approximately-equivalent configschema.Block representing the element of +// a list or set. If the given type is not an object type then this +// function will panic. +func SchemaForCtyElementType(ty cty.Type) *configschema.Block { + atys := ty.AttributeTypes() + ret := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute, len(atys)), + } + for name, aty := range atys { + ret.Attributes[name] = &configschema.Attribute{ + Type: aty, + Optional: true, + } + } + return ret +} + +// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type +// into an approximately-equivalent configschema.NestedBlock. If the given type +// is not of the expected kind then this function will panic. +func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch { + case ty.IsListType(): + nesting = configschema.NestingList + case ty.IsSetType(): + nesting = configschema.NestingSet + default: + panic("unsuitable type") + } + nested := SchemaForCtyElementType(ty.ElementType()) + return &configschema.NestedBlock{ + Nesting: nesting, + Block: *nested, + } +} + +// TypeCanBeBlocks returns true if the given type is a list-of-object or +// set-of-object type, and would thus be subject to the blocktoattr fixup +// if used as an attribute type. +func TypeCanBeBlocks(ty cty.Type) bool { + return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() +} diff --git a/lang/blocktoattr/variables.go b/lang/blocktoattr/variables.go new file mode 100644 index 000000000000..ae5c609dfe4b --- /dev/null +++ b/lang/blocktoattr/variables.go @@ -0,0 +1,45 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" +) + +// ExpandedVariables finds all of the global variables referenced in the +// given body with the given schema while taking into account the possibilities +// both of "dynamic" blocks being expanded and the possibility of certain +// attributes being written instead as nested blocks as allowed by the +// FixUpBlockAttrs function. +// +// This function exists to allow variables to be analyzed prior to dynamic +// block expansion while also dealing with the fact that dynamic block expansion +// might in turn produce nested blocks that are subject to FixUpBlockAttrs. +// +// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, +// which is itself a drop-in replacement for hcldec.Variables. +func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { + rootNode := dynblock.WalkVariables(body) + return walkVariables(rootNode, body, schema) +} + +func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { + givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + ambiguousNames := ambiguousNames(schema) + effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) + vars, children := node.Visit(effectiveRawSchema) + + for _, child := range children { + if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { + vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) + } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { + // ☝️Check for collection type before element type, because if this is a mis-placed reference, + // a panic here will prevent other useful diags from being elevated to show the user what to fix + synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) + vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) + } + } + + return vars +} diff --git a/lang/blocktoattr/variables_test.go b/lang/blocktoattr/variables_test.go new file mode 100644 index 000000000000..0fa417142585 --- /dev/null +++ b/lang/blocktoattr/variables_test.go @@ -0,0 +1,200 @@ +package blocktoattr + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func TestExpandedVariables(t *testing.T) { + fooSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + Optional: true, + }, + "bar": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + } + + tests := map[string]struct { + src string + json bool + schema *configschema.Block + want []hcl.Traversal + }{ + "empty": { + src: ``, + schema: &configschema.Block{}, + want: nil, + }, + "attribute syntax": { + src: ` +foo = [ + { + bar = baz + }, +] +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 4, Column: 11, Byte: 23}, + End: hcl.Pos{Line: 4, Column: 14, Byte: 26}, + }, + }, + }, + }, + }, + "block syntax": { + src: ` +foo { + bar = baz +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 9, Byte: 15}, + End: hcl.Pos{Line: 3, Column: 12, Byte: 18}, + }, + }, + }, + }, + }, + "block syntax with nested blocks": { + src: ` +foo { + bar { + boop = baz + } +} +`, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + })), + })), + Optional: true, + }, + }, + }, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 4, Column: 12, Byte: 26}, + End: hcl.Pos{Line: 4, Column: 15, Byte: 29}, + }, + }, + }, + }, + }, + "dynamic block syntax": { + src: ` +dynamic "foo" { + for_each = beep + content { + bar = baz + } +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "beep", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 14, Byte: 30}, + End: hcl.Pos{Line: 3, Column: 18, Byte: 34}, + }, + }, + }, + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 5, Column: 11, Byte: 57}, + End: hcl.Pos{Line: 5, Column: 14, Byte: 60}, + }, + }, + }, + }, + }, + "misplaced dynamic block": { + src: ` +dynamic "bar" { + for_each = beep + content { + key = val + } +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "beep", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 14, Byte: 30}, + End: hcl.Pos{Line: 3, Column: 18, Byte: 34}, + }, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var f *hcl.File + var diags hcl.Diagnostics + if test.json { + f, diags = hcljson.Parse([]byte(test.src), "test.tf.json") + } else { + f, diags = hclsyntax.ParseConfig([]byte(test.src), "test.tf", hcl.Pos{Line: 1, Column: 1}) + } + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("unexpected diagnostic: %s", diag) + } + t.FailNow() + } + + got := ExpandedVariables(f.Body, test.schema) + + co := cmpopts.IgnoreUnexported(hcl.TraverseRoot{}) + if !cmp.Equal(got, test.want, co) { + t.Errorf("wrong result\n%s", cmp.Diff(test.want, got, co)) + } + }) + } + +} diff --git a/internal/lang/data.go b/lang/data.go similarity index 94% rename from internal/lang/data.go rename to lang/data.go index 710fccedc8c5..a47a2a32de20 100644 --- a/internal/lang/data.go +++ b/lang/data.go @@ -1,8 +1,8 @@ package lang import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/data_test.go b/lang/data_test.go similarity index 95% rename from internal/lang/data_test.go rename to lang/data_test.go index e86a8561839b..4215bdae0f1f 100644 --- a/internal/lang/data_test.go +++ b/lang/data_test.go @@ -1,8 +1,8 @@ package lang import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/doc.go b/lang/doc.go similarity index 100% rename from internal/lang/doc.go rename to lang/doc.go diff --git a/internal/lang/eval.go b/lang/eval.go similarity index 98% rename from internal/lang/eval.go rename to lang/eval.go index 5c82392bcc44..41e7a144f91f 100644 --- a/internal/lang/eval.go +++ b/lang/eval.go @@ -6,11 +6,11 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/ext/dynblock" "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/lang/blocktoattr" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/instances" + "github.com/hashicorp/terraform/lang/blocktoattr" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" ) diff --git a/internal/lang/eval_test.go b/lang/eval_test.go similarity index 99% rename from internal/lang/eval_test.go rename to lang/eval_test.go index 37e9e54a0c08..4b812a7edac3 100644 --- a/internal/lang/eval_test.go +++ b/lang/eval_test.go @@ -5,9 +5,9 @@ import ( "encoding/json" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/instances" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/instances" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" diff --git a/internal/lang/funcs/cidr.go b/lang/funcs/cidr.go similarity index 99% rename from internal/lang/funcs/cidr.go rename to lang/funcs/cidr.go index bf878b50cb7a..e3b007cede58 100644 --- a/internal/lang/funcs/cidr.go +++ b/lang/funcs/cidr.go @@ -5,7 +5,7 @@ import ( "math/big" "github.com/apparentlymart/go-cidr/cidr" - "github.com/hashicorp/terraform/internal/ipaddr" + "github.com/hashicorp/terraform/ipaddr" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/function" "github.com/zclconf/go-cty/cty/gocty" diff --git a/internal/lang/funcs/cidr_test.go b/lang/funcs/cidr_test.go similarity index 100% rename from internal/lang/funcs/cidr_test.go rename to lang/funcs/cidr_test.go diff --git a/internal/lang/funcs/collection.go b/lang/funcs/collection.go similarity index 100% rename from internal/lang/funcs/collection.go rename to lang/funcs/collection.go diff --git a/internal/lang/funcs/collection_test.go b/lang/funcs/collection_test.go similarity index 99% rename from internal/lang/funcs/collection_test.go rename to lang/funcs/collection_test.go index d470f357edbe..ca1f3cc0d4ea 100644 --- a/internal/lang/funcs/collection_test.go +++ b/lang/funcs/collection_test.go @@ -5,7 +5,7 @@ import ( "math" "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/funcs/conversion.go b/lang/funcs/conversion.go similarity index 97% rename from internal/lang/funcs/conversion.go rename to lang/funcs/conversion.go index 721606226e3f..3f1ed7a32130 100644 --- a/internal/lang/funcs/conversion.go +++ b/lang/funcs/conversion.go @@ -3,8 +3,8 @@ package funcs import ( "strconv" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/lang/types" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/lang/types" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" "github.com/zclconf/go-cty/cty/function" diff --git a/internal/lang/funcs/conversion_test.go b/lang/funcs/conversion_test.go similarity index 98% rename from internal/lang/funcs/conversion_test.go rename to lang/funcs/conversion_test.go index 9c3e7e9f74ae..1cbc41c2ed42 100644 --- a/internal/lang/funcs/conversion_test.go +++ b/lang/funcs/conversion_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/funcs/crypto.go b/lang/funcs/crypto.go similarity index 100% rename from internal/lang/funcs/crypto.go rename to lang/funcs/crypto.go diff --git a/internal/lang/funcs/crypto_test.go b/lang/funcs/crypto_test.go similarity index 100% rename from internal/lang/funcs/crypto_test.go rename to lang/funcs/crypto_test.go diff --git a/internal/lang/funcs/datetime.go b/lang/funcs/datetime.go similarity index 100% rename from internal/lang/funcs/datetime.go rename to lang/funcs/datetime.go diff --git a/internal/lang/funcs/datetime_test.go b/lang/funcs/datetime_test.go similarity index 100% rename from internal/lang/funcs/datetime_test.go rename to lang/funcs/datetime_test.go diff --git a/internal/lang/funcs/descriptions.go b/lang/funcs/descriptions.go similarity index 100% rename from internal/lang/funcs/descriptions.go rename to lang/funcs/descriptions.go diff --git a/internal/lang/funcs/encoding.go b/lang/funcs/encoding.go similarity index 100% rename from internal/lang/funcs/encoding.go rename to lang/funcs/encoding.go diff --git a/internal/lang/funcs/encoding_test.go b/lang/funcs/encoding_test.go similarity index 99% rename from internal/lang/funcs/encoding_test.go rename to lang/funcs/encoding_test.go index 2e05784e828b..cac5a8fdcc94 100644 --- a/internal/lang/funcs/encoding_test.go +++ b/lang/funcs/encoding_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/funcs/filesystem.go b/lang/funcs/filesystem.go similarity index 100% rename from internal/lang/funcs/filesystem.go rename to lang/funcs/filesystem.go diff --git a/lang/funcs/filesystem_test.go b/lang/funcs/filesystem_test.go new file mode 100644 index 000000000000..b5caa49146bb --- /dev/null +++ b/lang/funcs/filesystem_test.go @@ -0,0 +1,695 @@ +package funcs + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/terraform/lang/marks" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +func TestFile(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("Hello World"), + ``, + }, + { + cty.StringVal("testdata/icon.png"), + cty.NilVal, + `contents of "testdata/icon.png" are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/icon.png").Mark(marks.Sensitive), + cty.NilVal, + `contents of (sensitive value) are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + `no file exists at "testdata/missing"; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/missing").Mark(marks.Sensitive), + cty.NilVal, + `no file exists at (sensitive value); this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("File(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := File(".", test.Path) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestTemplateFile(t *testing.T) { + tests := []struct { + Path cty.Value + Vars cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.EmptyObjectVal, + cty.StringVal("Hello World"), + ``, + }, + { + cty.StringVal("testdata/icon.png"), + cty.EmptyObjectVal, + cty.NilVal, + `contents of "testdata/icon.png" are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/missing"), + cty.EmptyObjectVal, + cty.NilVal, + `no file exists at "testdata/missing"; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/secrets.txt").Mark(marks.Sensitive), + cty.EmptyObjectVal, + cty.NilVal, + `no file exists at (sensitive value); this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("Jodie"), + }), + cty.StringVal("Hello, Jodie!"), + ``, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.MapVal(map[string]cty.Value{ + "name!": cty.StringVal("Jodie"), + }), + cty.NilVal, + `invalid template variable name "name!": must start with a letter, followed by zero or more letters, digits, and underscores`, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Jimbo"), + }), + cty.StringVal("Hello, Jimbo!"), + ``, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.EmptyObjectVal, + cty.NilVal, + `vars map does not contain key "name", referenced at testdata/hello.tmpl:1,10-14`, + }, + { + cty.StringVal("testdata/func.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }), + cty.StringVal("The items are a, b, c"), + ``, + }, + { + cty.StringVal("testdata/recursive.tmpl"), + cty.MapValEmpty(cty.String), + cty.NilVal, + `testdata/recursive.tmpl:1,3-16: Error in function call; Call to function "templatefile" failed: cannot recursively call templatefile from inside templatefile call.`, + }, + { + cty.StringVal("testdata/list.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }), + cty.StringVal("- a\n- b\n- c\n"), + ``, + }, + { + cty.StringVal("testdata/list.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.True, + }), + cty.NilVal, + `testdata/list.tmpl:1,13-17: Iteration over non-iterable value; A value of type bool cannot be used as the collection in a 'for' expression.`, + }, + { + cty.StringVal("testdata/bare.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "val": cty.True, + }), + cty.True, // since this template contains only an interpolation, its true value shines through + ``, + }, + } + + templateFileFn := MakeTemplateFileFunc(".", func() map[string]function.Function { + return map[string]function.Function{ + "join": stdlib.JoinFunc, + "templatefile": MakeFileFunc(".", false), // just a placeholder, since templatefile itself overrides this + } + }) + + for _, test := range tests { + t.Run(fmt.Sprintf("TemplateFile(%#v, %#v)", test.Path, test.Vars), func(t *testing.T) { + got, err := templateFileFn.Call([]cty.Value{test.Path, test.Vars}) + + if argErr, ok := err.(function.ArgError); ok { + if argErr.Index < 0 || argErr.Index > 1 { + t.Errorf("ArgError index %d is out of range for templatefile (must be 0 or 1)", argErr.Index) + } + } + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileExists(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.BoolVal(true), + ``, + }, + { + cty.StringVal(""), + cty.BoolVal(false), + `"." is a directory, not a file`, + }, + { + cty.StringVal("testdata").Mark(marks.Sensitive), + cty.BoolVal(false), + `(sensitive value) is a directory, not a file`, + }, + { + cty.StringVal("testdata/missing"), + cty.BoolVal(false), + ``, + }, + { + cty.StringVal("testdata/unreadable/foobar"), + cty.BoolVal(false), + `failed to stat "testdata/unreadable/foobar"`, + }, + { + cty.StringVal("testdata/unreadable/foobar").Mark(marks.Sensitive), + cty.BoolVal(false), + `failed to stat (sensitive value)`, + }, + } + + // Ensure "unreadable" directory cannot be listed during the test run + fi, err := os.Lstat("testdata/unreadable") + if err != nil { + t.Fatal(err) + } + os.Chmod("testdata/unreadable", 0000) + defer func(mode os.FileMode) { + os.Chmod("testdata/unreadable", mode) + }(fi.Mode()) + + for _, test := range tests { + t.Run(fmt.Sprintf("FileExists(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := FileExists(".", test.Path) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSet(t *testing.T) { + tests := []struct { + Path cty.Value + Pattern cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("."), + cty.StringVal("testdata*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("{testdata,missing}"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/missing*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.???"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.{tmpl,txt}"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/hello.{tmpl,txt}"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("["), + cty.SetValEmpty(cty.String), + `failed to glob pattern "[": syntax error in pattern`, + }, + { + cty.StringVal("."), + cty.StringVal("[").Mark(marks.Sensitive), + cty.SetValEmpty(cty.String), + `failed to glob pattern (sensitive value): syntax error in pattern`, + }, + { + cty.StringVal("."), + cty.StringVal("\\"), + cty.SetValEmpty(cty.String), + `failed to glob pattern "\\": syntax error in pattern`, + }, + { + cty.StringVal("testdata"), + cty.StringVal("missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("missing*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello.???"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.tmpl"), + cty.StringVal("hello.txt"), + }), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("FileSet(\".\", %#v, %#v)", test.Path, test.Pattern), func(t *testing.T) { + got, err := FileSet(".", test.Path, test.Pattern) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileBase64(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("SGVsbG8gV29ybGQ="), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAq1BMVEX///9cTuVeUeRcTuZcTuZcT+VbSe1cTuVdT+MAAP9JSbZcT+VcTuZAQLFAQLJcTuVcTuZcUuBBQbA/P7JAQLJaTuRcT+RcTuVGQ7xAQLJVVf9cTuVcTuVGRMFeUeRbTeJcTuU/P7JeTeZbTOVcTeZAQLJBQbNAQLNaUORcTeZbT+VcTuRAQLNAQLRdTuRHR8xgUOdgUN9cTuVdTeRdT+VZTulcTuVAQLL///8+GmETAAAANnRSTlMApibw+osO6DcBB3fIX87+oRk3yehB0/Nj/gNs7nsTRv3dHmu//JYUMLVr3bssjxkgEK5CaxeK03nIAAAAAWJLR0QAiAUdSAAAAAlwSFlzAAADoQAAA6EBvJf9gwAAAAd0SU1FB+EEBRIQDxZNTKsAAACCSURBVBjTfc7JFsFQEATQQpCYxyBEzJ55rvf/f0ZHcyQLvelTd1GngEwWycs5+UISyKLraSi9geWKK9Gr1j7AeqOJVtt2XtD1Bchef2BjQDAcCTC0CsA4mihMtXw2XwgsV2sFw812F+4P3y2GdI6nn3FGSs//4HJNAXDzU4Dg/oj/E+bsEbhf5cMsAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDE3LTA0LTA1VDE4OjE2OjE1KzAyOjAws5bLVQAAACV0RVh0ZGF0ZTptb2RpZnkAMjAxNy0wNC0wNVQxODoxNjoxNSswMjowMMLLc+kAAAAZdEVYdFNvZnR3YXJlAHd3dy5pbmtzY2FwZS5vcmeb7jwaAAAAC3RFWHRUaXRsZQBHcm91cJYfIowAAABXelRYdFJhdyBwcm9maWxlIHR5cGUgaXB0YwAAeJzj8gwIcVYoKMpPy8xJ5VIAAyMLLmMLEyMTS5MUAxMgRIA0w2QDI7NUIMvY1MjEzMQcxAfLgEigSi4A6hcRdPJCNZUAAAAASUVORK5CYII="), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("FileBase64(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := FileBase64(".", test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBasename(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("hello.txt"), + false, + }, + { + cty.StringVal("hello.txt"), + cty.StringVal("hello.txt"), + false, + }, + { + cty.StringVal(""), + cty.StringVal("."), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Basename(%#v)", test.Path), func(t *testing.T) { + got, err := Basename(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestDirname(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("testdata"), + false, + }, + { + cty.StringVal("testdata/foo/hello.txt"), + cty.StringVal("testdata/foo"), + false, + }, + { + cty.StringVal("hello.txt"), + cty.StringVal("."), + false, + }, + { + cty.StringVal(""), + cty.StringVal("."), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Dirname(%#v)", test.Path), func(t *testing.T) { + got, err := Dirname(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestPathExpand(t *testing.T) { + homePath, err := homedir.Dir() + if err != nil { + t.Fatalf("Error getting home directory: %v", err) + } + + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("~/test-file"), + cty.StringVal(filepath.Join(homePath, "test-file")), + false, + }, + { + cty.StringVal("~/another/test/file"), + cty.StringVal(filepath.Join(homePath, "another/test/file")), + false, + }, + { + cty.StringVal("/root/file"), + cty.StringVal("/root/file"), + false, + }, + { + cty.StringVal("/"), + cty.StringVal("/"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Dirname(%#v)", test.Path), func(t *testing.T) { + got, err := Pathexpand(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/internal/lang/funcs/number.go b/lang/funcs/number.go similarity index 100% rename from internal/lang/funcs/number.go rename to lang/funcs/number.go diff --git a/internal/lang/funcs/number_test.go b/lang/funcs/number_test.go similarity index 99% rename from internal/lang/funcs/number_test.go rename to lang/funcs/number_test.go index 6caf19af1899..6419414102f4 100644 --- a/internal/lang/funcs/number_test.go +++ b/lang/funcs/number_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/funcs/redact.go b/lang/funcs/redact.go similarity index 87% rename from internal/lang/funcs/redact.go rename to lang/funcs/redact.go index bbec3f0a1bd1..d9fb3fc90cea 100644 --- a/internal/lang/funcs/redact.go +++ b/lang/funcs/redact.go @@ -3,7 +3,7 @@ package funcs import ( "fmt" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/funcs/redact_test.go b/lang/funcs/redact_test.go similarity index 95% rename from internal/lang/funcs/redact_test.go rename to lang/funcs/redact_test.go index e378d5f5afe7..f5a1c669e5d8 100644 --- a/internal/lang/funcs/redact_test.go +++ b/lang/funcs/redact_test.go @@ -3,7 +3,7 @@ package funcs import ( "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/lang/funcs/sensitive.go b/lang/funcs/sensitive.go new file mode 100644 index 000000000000..9fa65287a5e9 --- /dev/null +++ b/lang/funcs/sensitive.go @@ -0,0 +1,67 @@ +package funcs + +import ( + "github.com/hashicorp/terraform/lang/marks" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// SensitiveFunc returns a value identical to its argument except that +// Terraform will consider it to be sensitive. +var SensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // This function only affects the value's marks, so the result + // type is always the same as the argument type. + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + val, _ := args[0].Unmark() + return val.Mark(marks.Sensitive), nil + }, +}) + +// NonsensitiveFunc takes a sensitive value and returns the same value without +// the sensitive marking, effectively exposing the value. +var NonsensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // This function only affects the value's marks, so the result + // type is always the same as the argument type. + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if args[0].IsKnown() && !args[0].HasMark(marks.Sensitive) { + return cty.DynamicVal, function.NewArgErrorf(0, "the given value is not sensitive, so this call is redundant") + } + v, m := args[0].Unmark() + delete(m, marks.Sensitive) // remove the sensitive marking + return v.WithMarks(m), nil + }, +}) + +func Sensitive(v cty.Value) (cty.Value, error) { + return SensitiveFunc.Call([]cty.Value{v}) +} + +func Nonsensitive(v cty.Value) (cty.Value, error) { + return NonsensitiveFunc.Call([]cty.Value{v}) +} diff --git a/internal/lang/funcs/sensitive_test.go b/lang/funcs/sensitive_test.go similarity index 98% rename from internal/lang/funcs/sensitive_test.go rename to lang/funcs/sensitive_test.go index 2d0120e8e770..46d1627428ae 100644 --- a/internal/lang/funcs/sensitive_test.go +++ b/lang/funcs/sensitive_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/funcs/string.go b/lang/funcs/string.go similarity index 100% rename from internal/lang/funcs/string.go rename to lang/funcs/string.go diff --git a/internal/lang/funcs/string_test.go b/lang/funcs/string_test.go similarity index 100% rename from internal/lang/funcs/string_test.go rename to lang/funcs/string_test.go diff --git a/internal/lang/funcs/testdata/bare.tmpl b/lang/funcs/testdata/bare.tmpl similarity index 100% rename from internal/lang/funcs/testdata/bare.tmpl rename to lang/funcs/testdata/bare.tmpl diff --git a/internal/lang/funcs/testdata/func.tmpl b/lang/funcs/testdata/func.tmpl similarity index 100% rename from internal/lang/funcs/testdata/func.tmpl rename to lang/funcs/testdata/func.tmpl diff --git a/internal/lang/funcs/testdata/hello.tmpl b/lang/funcs/testdata/hello.tmpl similarity index 100% rename from internal/lang/funcs/testdata/hello.tmpl rename to lang/funcs/testdata/hello.tmpl diff --git a/internal/lang/funcs/testdata/hello.txt b/lang/funcs/testdata/hello.txt similarity index 100% rename from internal/lang/funcs/testdata/hello.txt rename to lang/funcs/testdata/hello.txt diff --git a/internal/lang/funcs/testdata/icon.png b/lang/funcs/testdata/icon.png similarity index 100% rename from internal/lang/funcs/testdata/icon.png rename to lang/funcs/testdata/icon.png diff --git a/internal/lang/funcs/testdata/list.tmpl b/lang/funcs/testdata/list.tmpl similarity index 100% rename from internal/lang/funcs/testdata/list.tmpl rename to lang/funcs/testdata/list.tmpl diff --git a/internal/lang/funcs/testdata/recursive.tmpl b/lang/funcs/testdata/recursive.tmpl similarity index 100% rename from internal/lang/funcs/testdata/recursive.tmpl rename to lang/funcs/testdata/recursive.tmpl diff --git a/internal/lang/funcs/testdata/unreadable/foobar b/lang/funcs/testdata/unreadable/foobar similarity index 100% rename from internal/lang/funcs/testdata/unreadable/foobar rename to lang/funcs/testdata/unreadable/foobar diff --git a/internal/lang/functions.go b/lang/functions.go similarity index 98% rename from internal/lang/functions.go rename to lang/functions.go index 8b1c9192d1c1..fefd3d47eddb 100644 --- a/internal/lang/functions.go +++ b/lang/functions.go @@ -9,8 +9,8 @@ import ( "github.com/zclconf/go-cty/cty/function" "github.com/zclconf/go-cty/cty/function/stdlib" - "github.com/hashicorp/terraform/internal/experiments" - "github.com/hashicorp/terraform/internal/lang/funcs" + "github.com/hashicorp/terraform/experiments" + "github.com/hashicorp/terraform/lang/funcs" ) var impureFunctions = []string{ diff --git a/internal/lang/functions_descriptions_test.go b/lang/functions_descriptions_test.go similarity index 91% rename from internal/lang/functions_descriptions_test.go rename to lang/functions_descriptions_test.go index ee456d87ac3e..e179000e774b 100644 --- a/internal/lang/functions_descriptions_test.go +++ b/lang/functions_descriptions_test.go @@ -3,7 +3,7 @@ package lang import ( "testing" - "github.com/hashicorp/terraform/internal/lang/funcs" + "github.com/hashicorp/terraform/lang/funcs" ) func TestFunctionDescriptions(t *testing.T) { diff --git a/internal/lang/functions_test.go b/lang/functions_test.go similarity index 99% rename from internal/lang/functions_test.go rename to lang/functions_test.go index e6d5e1987478..4fa528f96507 100644 --- a/internal/lang/functions_test.go +++ b/lang/functions_test.go @@ -8,8 +8,8 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/experiments" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/experiments" + "github.com/hashicorp/terraform/lang/marks" homedir "github.com/mitchellh/go-homedir" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/globalref/analyzer.go b/lang/globalref/analyzer.go similarity index 94% rename from internal/lang/globalref/analyzer.go rename to lang/globalref/analyzer.go index 7a24d781ef39..1915302e3075 100644 --- a/internal/lang/globalref/analyzer.go +++ b/lang/globalref/analyzer.go @@ -3,9 +3,9 @@ package globalref import ( "fmt" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/providers" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/providers" ) // Analyzer is the main component of this package, serving as a container for diff --git a/internal/lang/globalref/analyzer_contributing_resources.go b/lang/globalref/analyzer_contributing_resources.go similarity index 98% rename from internal/lang/globalref/analyzer_contributing_resources.go rename to lang/globalref/analyzer_contributing_resources.go index 4024bafd0c8d..a61a8f43ad09 100644 --- a/internal/lang/globalref/analyzer_contributing_resources.go +++ b/lang/globalref/analyzer_contributing_resources.go @@ -3,7 +3,7 @@ package globalref import ( "sort" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // ContributingResources analyzes all of the given references and diff --git a/internal/lang/globalref/analyzer_contributing_resources_test.go b/lang/globalref/analyzer_contributing_resources_test.go similarity index 99% rename from internal/lang/globalref/analyzer_contributing_resources_test.go rename to lang/globalref/analyzer_contributing_resources_test.go index 79c441c4318f..61962ec4ffaa 100644 --- a/internal/lang/globalref/analyzer_contributing_resources_test.go +++ b/lang/globalref/analyzer_contributing_resources_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestAnalyzerContributingResources(t *testing.T) { diff --git a/internal/lang/globalref/analyzer_meta_references.go b/lang/globalref/analyzer_meta_references.go similarity index 99% rename from internal/lang/globalref/analyzer_meta_references.go rename to lang/globalref/analyzer_meta_references.go index 9a2bb89920bf..e717533283af 100644 --- a/internal/lang/globalref/analyzer_meta_references.go +++ b/lang/globalref/analyzer_meta_references.go @@ -2,9 +2,9 @@ package globalref import ( "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" "github.com/zclconf/go-cty/cty/gocty" diff --git a/internal/lang/globalref/analyzer_meta_references_shortcuts.go b/lang/globalref/analyzer_meta_references_shortcuts.go similarity index 96% rename from internal/lang/globalref/analyzer_meta_references_shortcuts.go rename to lang/globalref/analyzer_meta_references_shortcuts.go index 580e99b360f5..a715cb8a9102 100644 --- a/internal/lang/globalref/analyzer_meta_references_shortcuts.go +++ b/lang/globalref/analyzer_meta_references_shortcuts.go @@ -3,8 +3,8 @@ package globalref import ( "fmt" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/lang" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang" ) // ReferencesFromOutputValue returns all of the direct references from the diff --git a/internal/lang/globalref/analyzer_meta_references_test.go b/lang/globalref/analyzer_meta_references_test.go similarity index 98% rename from internal/lang/globalref/analyzer_meta_references_test.go rename to lang/globalref/analyzer_meta_references_test.go index c693890cf6d2..014b60b93015 100644 --- a/internal/lang/globalref/analyzer_meta_references_test.go +++ b/lang/globalref/analyzer_meta_references_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestAnalyzerMetaReferences(t *testing.T) { diff --git a/internal/lang/globalref/analyzer_test.go b/lang/globalref/analyzer_test.go similarity index 88% rename from internal/lang/globalref/analyzer_test.go rename to lang/globalref/analyzer_test.go index 0a66217e7d9a..cc0de50c251e 100644 --- a/internal/lang/globalref/analyzer_test.go +++ b/lang/globalref/analyzer_test.go @@ -5,12 +5,12 @@ import ( "path/filepath" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/registry" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/registry" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/globalref/doc.go b/lang/globalref/doc.go similarity index 100% rename from internal/lang/globalref/doc.go rename to lang/globalref/doc.go diff --git a/internal/lang/globalref/reference.go b/lang/globalref/reference.go similarity index 98% rename from internal/lang/globalref/reference.go rename to lang/globalref/reference.go index d47cecfa70c7..eb9b7defbba2 100644 --- a/internal/lang/globalref/reference.go +++ b/lang/globalref/reference.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/lang/globalref/testdata/assorted/assorted-root.tf b/lang/globalref/testdata/assorted/assorted-root.tf similarity index 100% rename from internal/lang/globalref/testdata/assorted/assorted-root.tf rename to lang/globalref/testdata/assorted/assorted-root.tf diff --git a/internal/lang/globalref/testdata/assorted/child/assorted-child.tf b/lang/globalref/testdata/assorted/child/assorted-child.tf similarity index 100% rename from internal/lang/globalref/testdata/assorted/child/assorted-child.tf rename to lang/globalref/testdata/assorted/child/assorted-child.tf diff --git a/internal/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf b/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf similarity index 100% rename from internal/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf rename to lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf diff --git a/internal/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf b/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf similarity index 100% rename from internal/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf rename to lang/globalref/testdata/contributing-resources/contributing-resources-root.tf diff --git a/internal/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf b/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf similarity index 100% rename from internal/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf rename to lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf diff --git a/internal/lang/marks/marks.go b/lang/marks/marks.go similarity index 100% rename from internal/lang/marks/marks.go rename to lang/marks/marks.go diff --git a/internal/lang/references.go b/lang/references.go similarity index 92% rename from internal/lang/references.go rename to lang/references.go index 7f41b09b6173..569251cb8dfb 100644 --- a/internal/lang/references.go +++ b/lang/references.go @@ -2,10 +2,10 @@ package lang import ( "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/blocktoattr" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/blocktoattr" + "github.com/hashicorp/terraform/tfdiags" ) // References finds all of the references in the given set of traversals, diff --git a/internal/lang/scope.go b/lang/scope.go similarity index 94% rename from internal/lang/scope.go rename to lang/scope.go index 6c229e25d90a..3a34e9ca2a07 100644 --- a/internal/lang/scope.go +++ b/lang/scope.go @@ -5,8 +5,8 @@ import ( "github.com/zclconf/go-cty/cty/function" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/experiments" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/experiments" ) // Scope is the main type in this package, allowing dynamic evaluation of diff --git a/internal/lang/testdata/functions-test/hello.tmpl b/lang/testdata/functions-test/hello.tmpl similarity index 100% rename from internal/lang/testdata/functions-test/hello.tmpl rename to lang/testdata/functions-test/hello.tmpl diff --git a/internal/lang/testdata/functions-test/hello.txt b/lang/testdata/functions-test/hello.txt similarity index 100% rename from internal/lang/testdata/functions-test/hello.txt rename to lang/testdata/functions-test/hello.txt diff --git a/internal/lang/testdata/functions-test/subdirectory/hello.tmpl b/lang/testdata/functions-test/subdirectory/hello.tmpl similarity index 100% rename from internal/lang/testdata/functions-test/subdirectory/hello.tmpl rename to lang/testdata/functions-test/subdirectory/hello.tmpl diff --git a/internal/lang/testdata/functions-test/subdirectory/hello.txt b/lang/testdata/functions-test/subdirectory/hello.txt similarity index 100% rename from internal/lang/testdata/functions-test/subdirectory/hello.txt rename to lang/testdata/functions-test/subdirectory/hello.txt diff --git a/internal/lang/types/type_type.go b/lang/types/type_type.go similarity index 100% rename from internal/lang/types/type_type.go rename to lang/types/type_type.go diff --git a/internal/lang/types/types.go b/lang/types/types.go similarity index 100% rename from internal/lang/types/types.go rename to lang/types/types.go diff --git a/internal/legacy/helper/acctest/acctest.go b/legacy/helper/acctest/acctest.go similarity index 100% rename from internal/legacy/helper/acctest/acctest.go rename to legacy/helper/acctest/acctest.go diff --git a/internal/legacy/helper/acctest/random.go b/legacy/helper/acctest/random.go similarity index 100% rename from internal/legacy/helper/acctest/random.go rename to legacy/helper/acctest/random.go diff --git a/internal/legacy/helper/acctest/random_test.go b/legacy/helper/acctest/random_test.go similarity index 100% rename from internal/legacy/helper/acctest/random_test.go rename to legacy/helper/acctest/random_test.go diff --git a/internal/legacy/helper/acctest/remotetests.go b/legacy/helper/acctest/remotetests.go similarity index 100% rename from internal/legacy/helper/acctest/remotetests.go rename to legacy/helper/acctest/remotetests.go diff --git a/internal/legacy/helper/hashcode/hashcode.go b/legacy/helper/hashcode/hashcode.go similarity index 100% rename from internal/legacy/helper/hashcode/hashcode.go rename to legacy/helper/hashcode/hashcode.go diff --git a/internal/legacy/helper/hashcode/hashcode_test.go b/legacy/helper/hashcode/hashcode_test.go similarity index 100% rename from internal/legacy/helper/hashcode/hashcode_test.go rename to legacy/helper/hashcode/hashcode_test.go diff --git a/legacy/helper/schema/backend.go b/legacy/helper/schema/backend.go new file mode 100644 index 000000000000..17cdb060cddd --- /dev/null +++ b/legacy/helper/schema/backend.go @@ -0,0 +1,200 @@ +package schema + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/terraform" + ctyconvert "github.com/zclconf/go-cty/cty/convert" +) + +// Backend represents a partial backend.Backend implementation and simplifies +// the creation of configuration loading and validation. +// +// Unlike other schema structs such as Provider, this struct is meant to be +// embedded within your actual implementation. It provides implementations +// only for Input and Configure and gives you a method for accessing the +// configuration in the form of a ResourceData that you're expected to call +// from the other implementation funcs. +type Backend struct { + // Schema is the schema for the configuration of this backend. If this + // Backend has no configuration this can be omitted. + Schema map[string]*Schema + + // ConfigureFunc is called to configure the backend. Use the + // FromContext* methods to extract information from the context. + // This can be nil, in which case nothing will be called but the + // config will still be stored. + ConfigureFunc func(context.Context) error + + config *ResourceData +} + +var ( + backendConfigKey = contextKey("backend config") +) + +// FromContextBackendConfig extracts a ResourceData with the configuration +// from the context. This should only be called by Backend functions. +func FromContextBackendConfig(ctx context.Context) *ResourceData { + return ctx.Value(backendConfigKey).(*ResourceData) +} + +func (b *Backend) ConfigSchema() *configschema.Block { + // This is an alias of CoreConfigSchema just to implement the + // backend.Backend interface. + return b.CoreConfigSchema() +} + +func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { + if b == nil { + return configVal, nil + } + var diags tfdiags.Diagnostics + var err error + + // In order to use Transform below, this needs to be filled out completely + // according the schema. + configVal, err = b.CoreConfigSchema().CoerceValue(configVal) + if err != nil { + return configVal, diags.Append(err) + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := b.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return configVal, diags + } + + shimRC := b.shimConfig(configVal) + warns, errs := schemaMap(b.Schema).Validate(shimRC) + for _, warn := range warns { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + diags = diags.Append(err) + } + return configVal, diags +} + +func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { + if b == nil { + return nil + } + + var diags tfdiags.Diagnostics + sm := schemaMap(b.Schema) + shimRC := b.shimConfig(obj) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, shimRC, nil, nil, true) + if err != nil { + diags = diags.Append(err) + return diags + } + + data, err := sm.Data(nil, diff) + if err != nil { + diags = diags.Append(err) + return diags + } + b.config = data + + if b.ConfigureFunc != nil { + err = b.ConfigureFunc(context.WithValue( + context.Background(), backendConfigKey, data)) + if err != nil { + diags = diags.Append(err) + return diags + } + } + + return diags +} + +// shimConfig turns a new-style cty.Value configuration (which must be of +// an object type) into a minimal old-style *terraform.ResourceConfig object +// that should be populated enough to appease the not-yet-updated functionality +// in this package. This should be removed once everything is updated. +func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { + shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) + if !ok { + // If the configVal was nil, we still want a non-nil map here. + shimMap = map[string]interface{}{} + } + return &terraform.ResourceConfig{ + Config: shimMap, + Raw: shimMap, + } +} + +// Config returns the configuration. This is available after Configure is +// called. +func (b *Backend) Config() *ResourceData { + return b.config +} diff --git a/internal/legacy/helper/schema/backend_test.go b/legacy/helper/schema/backend_test.go similarity index 100% rename from internal/legacy/helper/schema/backend_test.go rename to legacy/helper/schema/backend_test.go diff --git a/internal/legacy/helper/schema/core_schema.go b/legacy/helper/schema/core_schema.go similarity index 99% rename from internal/legacy/helper/schema/core_schema.go rename to legacy/helper/schema/core_schema.go index da9c502da00f..6a53db1a71a3 100644 --- a/internal/legacy/helper/schema/core_schema.go +++ b/legacy/helper/schema/core_schema.go @@ -3,7 +3,7 @@ package schema import ( "fmt" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/legacy/helper/schema/core_schema_test.go b/legacy/helper/schema/core_schema_test.go similarity index 99% rename from internal/legacy/helper/schema/core_schema_test.go rename to legacy/helper/schema/core_schema_test.go index 84649c8bec14..7d4b32e01956 100644 --- a/internal/legacy/helper/schema/core_schema_test.go +++ b/legacy/helper/schema/core_schema_test.go @@ -7,7 +7,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" ) // add the implicit "id" attribute for test resources diff --git a/internal/legacy/helper/schema/data_source_resource_shim.go b/legacy/helper/schema/data_source_resource_shim.go similarity index 100% rename from internal/legacy/helper/schema/data_source_resource_shim.go rename to legacy/helper/schema/data_source_resource_shim.go diff --git a/internal/legacy/helper/schema/doc.go b/legacy/helper/schema/doc.go similarity index 100% rename from internal/legacy/helper/schema/doc.go rename to legacy/helper/schema/doc.go diff --git a/internal/legacy/helper/schema/equal.go b/legacy/helper/schema/equal.go similarity index 100% rename from internal/legacy/helper/schema/equal.go rename to legacy/helper/schema/equal.go diff --git a/internal/legacy/helper/schema/field_reader.go b/legacy/helper/schema/field_reader.go similarity index 100% rename from internal/legacy/helper/schema/field_reader.go rename to legacy/helper/schema/field_reader.go diff --git a/internal/legacy/helper/schema/field_reader_config.go b/legacy/helper/schema/field_reader_config.go similarity index 99% rename from internal/legacy/helper/schema/field_reader_config.go rename to legacy/helper/schema/field_reader_config.go index f4a43d1fce43..6509e48a6769 100644 --- a/internal/legacy/helper/schema/field_reader_config.go +++ b/legacy/helper/schema/field_reader_config.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/legacy/terraform" "github.com/mitchellh/mapstructure" ) diff --git a/internal/legacy/helper/schema/field_reader_config_test.go b/legacy/helper/schema/field_reader_config_test.go similarity index 98% rename from internal/legacy/helper/schema/field_reader_config_test.go rename to legacy/helper/schema/field_reader_config_test.go index 7a22f3ce1687..6dfc20566a90 100644 --- a/internal/legacy/helper/schema/field_reader_config_test.go +++ b/legacy/helper/schema/field_reader_config_test.go @@ -6,9 +6,9 @@ import ( "reflect" "testing" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/helper/hashcode" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/helper/hashcode" + "github.com/hashicorp/terraform/legacy/terraform" ) func TestConfigFieldReader_impl(t *testing.T) { diff --git a/internal/legacy/helper/schema/field_reader_diff.go b/legacy/helper/schema/field_reader_diff.go similarity index 99% rename from internal/legacy/helper/schema/field_reader_diff.go rename to legacy/helper/schema/field_reader_diff.go index 84ebe272e073..ec9fa2887c3d 100644 --- a/internal/legacy/helper/schema/field_reader_diff.go +++ b/legacy/helper/schema/field_reader_diff.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/legacy/terraform" "github.com/mitchellh/mapstructure" ) diff --git a/internal/legacy/helper/schema/field_reader_diff_test.go b/legacy/helper/schema/field_reader_diff_test.go similarity index 99% rename from internal/legacy/helper/schema/field_reader_diff_test.go rename to legacy/helper/schema/field_reader_diff_test.go index 1f6fa7da1743..77f387619c25 100644 --- a/internal/legacy/helper/schema/field_reader_diff_test.go +++ b/legacy/helper/schema/field_reader_diff_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/legacy/terraform" ) func TestDiffFieldReader_impl(t *testing.T) { diff --git a/internal/legacy/helper/schema/field_reader_map.go b/legacy/helper/schema/field_reader_map.go similarity index 100% rename from internal/legacy/helper/schema/field_reader_map.go rename to legacy/helper/schema/field_reader_map.go diff --git a/internal/legacy/helper/schema/field_reader_map_test.go b/legacy/helper/schema/field_reader_map_test.go similarity index 100% rename from internal/legacy/helper/schema/field_reader_map_test.go rename to legacy/helper/schema/field_reader_map_test.go diff --git a/internal/legacy/helper/schema/field_reader_multi.go b/legacy/helper/schema/field_reader_multi.go similarity index 100% rename from internal/legacy/helper/schema/field_reader_multi.go rename to legacy/helper/schema/field_reader_multi.go diff --git a/internal/legacy/helper/schema/field_reader_multi_test.go b/legacy/helper/schema/field_reader_multi_test.go similarity index 98% rename from internal/legacy/helper/schema/field_reader_multi_test.go rename to legacy/helper/schema/field_reader_multi_test.go index 7410335f68db..cc2c808fc79a 100644 --- a/internal/legacy/helper/schema/field_reader_multi_test.go +++ b/legacy/helper/schema/field_reader_multi_test.go @@ -5,7 +5,7 @@ import ( "strconv" "testing" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/legacy/terraform" ) func TestMultiLevelFieldReaderReadFieldExact(t *testing.T) { diff --git a/internal/legacy/helper/schema/field_reader_test.go b/legacy/helper/schema/field_reader_test.go similarity index 100% rename from internal/legacy/helper/schema/field_reader_test.go rename to legacy/helper/schema/field_reader_test.go diff --git a/internal/legacy/helper/schema/field_writer.go b/legacy/helper/schema/field_writer.go similarity index 100% rename from internal/legacy/helper/schema/field_writer.go rename to legacy/helper/schema/field_writer.go diff --git a/internal/legacy/helper/schema/field_writer_map.go b/legacy/helper/schema/field_writer_map.go similarity index 100% rename from internal/legacy/helper/schema/field_writer_map.go rename to legacy/helper/schema/field_writer_map.go diff --git a/internal/legacy/helper/schema/field_writer_map_test.go b/legacy/helper/schema/field_writer_map_test.go similarity index 100% rename from internal/legacy/helper/schema/field_writer_map_test.go rename to legacy/helper/schema/field_writer_map_test.go diff --git a/internal/legacy/helper/schema/getsource_string.go b/legacy/helper/schema/getsource_string.go similarity index 100% rename from internal/legacy/helper/schema/getsource_string.go rename to legacy/helper/schema/getsource_string.go diff --git a/legacy/helper/schema/provider.go b/legacy/helper/schema/provider.go new file mode 100644 index 000000000000..a4cb879b4c1c --- /dev/null +++ b/legacy/helper/schema/provider.go @@ -0,0 +1,477 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/legacy/terraform" +) + +var ReservedProviderFields = []string{ + "alias", + "version", +} + +// Provider represents a resource provider in Terraform, and properly +// implements all of the ResourceProvider API. +// +// By defining a schema for the configuration of the provider, the +// map of supporting resources, and a configuration function, the schema +// framework takes over and handles all the provider operations for you. +// +// After defining the provider structure, it is unlikely that you'll require any +// of the methods on Provider itself. +type Provider struct { + // Schema is the schema for the configuration of this provider. If this + // provider has no configuration, this can be omitted. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ResourcesMap is the list of available resources that this provider + // can manage, along with their Resource structure defining their + // own schemas and CRUD operations. + // + // Provider automatically handles routing operations such as Apply, + // Diff, etc. to the proper resource. + ResourcesMap map[string]*Resource + + // DataSourcesMap is the collection of available data sources that + // this provider implements, with a Resource instance defining + // the schema and Read operation of each. + // + // Resource instances for data sources must have a Read function + // and must *not* implement Create, Update or Delete. + DataSourcesMap map[string]*Resource + + // ProviderMetaSchema is the schema for the configuration of the meta + // information for this provider. If this provider has no meta info, + // this can be omitted. This functionality is currently experimental + // and subject to change or break without warning; it should only be + // used by providers that are collaborating on its use with the + // Terraform team. + ProviderMetaSchema map[string]*Schema + + // ConfigureFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. + // + // See the ConfigureFunc documentation for more information. + ConfigureFunc ConfigureFunc + + // MetaReset is called by TestReset to reset any state stored in the meta + // interface. This is especially important if the StopContext is stored by + // the provider. + MetaReset func() error + + meta interface{} + + // a mutex is required because TestReset can directly replace the stopCtx + stopMu sync.Mutex + stopCtx context.Context + stopCtxCancel context.CancelFunc + stopOnce sync.Once + + TerraformVersion string +} + +// ConfigureFunc is the function used to configure a Provider. +// +// The interface{} value returned by this function is stored and passed into +// the subsequent resources as the meta parameter. This return value is +// usually used to pass along a configured API client, a configuration +// structure, etc. +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// InternalValidate should be called to validate the structure +// of the provider. +// +// This should be called in a unit test for any provider to verify +// before release that a provider is properly configured for use with +// this library. +func (p *Provider) InternalValidate() error { + if p == nil { + return errors.New("provider is nil") + } + + var validationErrors error + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + + // Provider-specific checks + for k, _ := range sm { + if isReservedProviderFieldName(k) { + return fmt.Errorf("%s is a reserved field name for a provider", k) + } + } + + for k, r := range p.ResourcesMap { + if err := r.InternalValidate(nil, true); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) + } + } + + for k, r := range p.DataSourcesMap { + if err := r.InternalValidate(nil, false); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) + } + } + + return validationErrors +} + +func isReservedProviderFieldName(name string) bool { + for _, reservedName := range ReservedProviderFields { + if name == reservedName { + return true + } + } + return false +} + +// Meta returns the metadata associated with this provider that was +// returned by the Configure call. It will be nil until Configure is called. +func (p *Provider) Meta() interface{} { + return p.meta +} + +// SetMeta can be used to forcefully set the Meta object of the provider. +// Note that if Configure is called the return value will override anything +// set here. +func (p *Provider) SetMeta(v interface{}) { + p.meta = v +} + +// Stopped reports whether the provider has been stopped or not. +func (p *Provider) Stopped() bool { + ctx := p.StopContext() + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// StopCh returns a channel that is closed once the provider is stopped. +func (p *Provider) StopContext() context.Context { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + return p.stopCtx +} + +func (p *Provider) stopInit() { + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) +} + +// Stop implementation of terraform.ResourceProvider interface. +func (p *Provider) Stop() error { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtxCancel() + return nil +} + +// TestReset resets any state stored in the Provider, and will call TestReset +// on Meta if it implements the TestProvider interface. +// This may be used to reset the schema.Provider at the start of a test, and is +// automatically called by resource.Test. +func (p *Provider) TestReset() error { + p.stopInit() + if p.MetaReset != nil { + return p.MetaReset() + } + return nil +} + +// GetSchema implementation of terraform.ResourceProvider interface +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + +// Input implementation of terraform.ResourceProvider interface. +func (p *Provider) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + return schemaMap(p.Schema).Input(input, c) +} + +// Validate implementation of terraform.ResourceProvider interface. +func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err)} + } + + return schemaMap(p.Schema).Validate(c) +} + +// ValidateResource implementation of terraform.ResourceProvider interface. +func (p *Provider) ValidateResource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := p.ResourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support resource: %s", t)} + } + + return r.Validate(c) +} + +// Configure implementation of terraform.ResourceProvider interface. +func (p *Provider) Configure(c *terraform.ResourceConfig) error { + // No configuration + if p.ConfigureFunc == nil { + return nil + } + + sm := schemaMap(p.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, c, nil, p.meta, true) + if err != nil { + return err + } + + data, err := sm.Data(nil, diff) + if err != nil { + return err + } + + meta, err := p.ConfigureFunc(data) + if err != nil { + return err + } + + p.meta = meta + return nil +} + +// Apply implementation of terraform.ResourceProvider interface. +func (p *Provider) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Apply(s, d, p.meta) +} + +// Diff implementation of terraform.ResourceProvider interface. +func (p *Provider) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Diff(s, c, p.meta) +} + +// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't +// attempt to calculate ignore_changes. +func (p *Provider) SimpleDiff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.simpleDiff(s, c, p.meta) +} + +// Refresh implementation of terraform.ResourceProvider interface. +func (p *Provider) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState) (*terraform.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Refresh(s, p.meta) +} + +// Resources implementation of terraform.ResourceProvider interface. +func (p *Provider) Resources() []terraform.ResourceType { + keys := make([]string, 0, len(p.ResourcesMap)) + for k := range p.ResourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.ResourceType, 0, len(keys)) + for _, k := range keys { + resource := p.ResourcesMap[k] + + // This isn't really possible (it'd fail InternalValidate), but + // we do it anyways to avoid a panic. + if resource == nil { + resource = &Resource{} + } + + result = append(result, terraform.ResourceType{ + Name: k, + Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +func (p *Provider) ImportState( + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + // Find the resource + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + // If it doesn't support import, error + if r.Importer == nil { + return nil, fmt.Errorf("resource %s doesn't support import", info.Type) + } + + // Create the data + data := r.Data(nil) + data.SetId(id) + data.SetType(info.Type) + + // Call the import function + results := []*ResourceData{data} + if r.Importer.State != nil { + var err error + results, err = r.Importer.State(data, p.meta) + if err != nil { + return nil, err + } + } + + // Convert the results to InstanceState values and return it + states := make([]*terraform.InstanceState, len(results)) + for i, r := range results { + states[i] = r.State() + } + + // Verify that all are non-nil. If there are any nil the error + // isn't obvious so we circumvent that with a friendlier error. + for _, s := range states { + if s == nil { + return nil, fmt.Errorf( + "nil entry in ImportState results. This is always a bug with\n" + + "the resource that is being imported. Please report this as\n" + + "a bug to Terraform.") + } + } + + return states, nil +} + +// ValidateDataSource implementation of terraform.ResourceProvider interface. +func (p *Provider) ValidateDataSource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := p.DataSourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support data source: %s", t)} + } + + return r.Validate(c) +} + +// ReadDataDiff implementation of terraform.ResourceProvider interface. +func (p *Provider) ReadDataDiff( + info *terraform.InstanceInfo, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.Diff(nil, c, p.meta) +} + +// RefreshData implementation of terraform.ResourceProvider interface. +func (p *Provider) ReadDataApply( + info *terraform.InstanceInfo, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.ReadDataApply(d, p.meta) +} + +// DataSources implementation of terraform.ResourceProvider interface. +func (p *Provider) DataSources() []terraform.DataSource { + keys := make([]string, 0, len(p.DataSourcesMap)) + for k, _ := range p.DataSourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.DataSource, 0, len(keys)) + for _, k := range keys { + result = append(result, terraform.DataSource{ + Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} diff --git a/legacy/helper/schema/provider_test.go b/legacy/helper/schema/provider_test.go new file mode 100644 index 000000000000..7b9bd62c01b3 --- /dev/null +++ b/legacy/helper/schema/provider_test.go @@ -0,0 +1,620 @@ +package schema + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/legacy/terraform" +) + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = new(Provider) +} + +func TestProviderGetSchema(t *testing.T) { + // This functionality is already broadly tested in core_schema_test.go, + // so this is just to ensure that the call passes through correctly. + p := &Provider{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeString, + Required: true, + }, + }, + ResourcesMap: map[string]*Resource{ + "foo": &Resource{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeString, + Required: true, + }, + }, + }, + }, + DataSourcesMap: map[string]*Resource{ + "baz": &Resource{ + Schema: map[string]*Schema{ + "bur": { + Type: TypeString, + Required: true, + }, + }, + }, + }, + } + + want := &terraform.ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": &configschema.Attribute{ + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }, + ResourceTypes: map[string]*configschema.Block{ + "foo": testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": &configschema.Attribute{ + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + DataSources: map[string]*configschema.Block{ + "baz": testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bur": &configschema.Attribute{ + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + } + got, err := p.GetSchema(&terraform.ProviderSchemaRequest{ + ResourceTypes: []string{"foo", "bar"}, + DataSources: []string{"baz", "bar"}, + }) + if err != nil { + t.Fatalf("unexpected error %s", err) + } + + if !cmp.Equal(got, want, equateEmpty, typeComparer) { + t.Error("wrong result:\n", cmp.Diff(got, want, equateEmpty, typeComparer)) + } +} + +func TestProviderConfigure(t *testing.T) { + cases := []struct { + P *Provider + Config map[string]interface{} + Err bool + }{ + { + P: &Provider{}, + Config: nil, + Err: false, + }, + + { + P: &Provider{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ConfigureFunc: func(d *ResourceData) (interface{}, error) { + if d.Get("foo").(int) == 42 { + return nil, nil + } + + return nil, fmt.Errorf("nope") + }, + }, + Config: map[string]interface{}{ + "foo": 42, + }, + Err: false, + }, + + { + P: &Provider{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ConfigureFunc: func(d *ResourceData) (interface{}, error) { + if d.Get("foo").(int) == 42 { + return nil, nil + } + + return nil, fmt.Errorf("nope") + }, + }, + Config: map[string]interface{}{ + "foo": 52, + }, + Err: true, + }, + } + + for i, tc := range cases { + c := terraform.NewResourceConfigRaw(tc.Config) + err := tc.P.Configure(c) + if err != nil != tc.Err { + t.Fatalf("%d: %s", i, err) + } + } +} + +func TestProviderResources(t *testing.T) { + cases := []struct { + P *Provider + Result []terraform.ResourceType + }{ + { + P: &Provider{}, + Result: []terraform.ResourceType{}, + }, + + { + P: &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": nil, + "bar": nil, + }, + }, + Result: []terraform.ResourceType{ + terraform.ResourceType{Name: "bar", SchemaAvailable: true}, + terraform.ResourceType{Name: "foo", SchemaAvailable: true}, + }, + }, + + { + P: &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": nil, + "bar": &Resource{Importer: &ResourceImporter{}}, + "baz": nil, + }, + }, + Result: []terraform.ResourceType{ + terraform.ResourceType{Name: "bar", Importable: true, SchemaAvailable: true}, + terraform.ResourceType{Name: "baz", SchemaAvailable: true}, + terraform.ResourceType{Name: "foo", SchemaAvailable: true}, + }, + }, + } + + for i, tc := range cases { + actual := tc.P.Resources() + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestProviderDataSources(t *testing.T) { + cases := []struct { + P *Provider + Result []terraform.DataSource + }{ + { + P: &Provider{}, + Result: []terraform.DataSource{}, + }, + + { + P: &Provider{ + DataSourcesMap: map[string]*Resource{ + "foo": nil, + "bar": nil, + }, + }, + Result: []terraform.DataSource{ + terraform.DataSource{Name: "bar", SchemaAvailable: true}, + terraform.DataSource{Name: "foo", SchemaAvailable: true}, + }, + }, + } + + for i, tc := range cases { + actual := tc.P.DataSources() + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("%d: got %#v; want %#v", i, actual, tc.Result) + } + } +} + +func TestProviderValidate(t *testing.T) { + cases := []struct { + P *Provider + Config map[string]interface{} + Err bool + }{ + { + P: &Provider{ + Schema: map[string]*Schema{ + "foo": &Schema{}, + }, + }, + Config: nil, + Err: true, + }, + } + + for i, tc := range cases { + c := terraform.NewResourceConfigRaw(tc.Config) + _, es := tc.P.Validate(c) + if len(es) > 0 != tc.Err { + t.Fatalf("%d: %#v", i, es) + } + } +} + +func TestProviderDiff_legacyTimeoutType(t *testing.T) { + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "blah": &Resource{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(10 * time.Minute), + }, + }, + }, + } + + invalidCfg := map[string]interface{}{ + "foo": 42, + "timeouts": []interface{}{ + map[string]interface{}{ + "create": "40m", + }, + }, + } + ic := terraform.NewResourceConfigRaw(invalidCfg) + _, err := p.Diff( + &terraform.InstanceInfo{ + Type: "blah", + }, + nil, + ic, + ) + if err != nil { + t.Fatal(err) + } +} + +func TestProviderDiff_timeoutInvalidValue(t *testing.T) { + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "blah": &Resource{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(10 * time.Minute), + }, + }, + }, + } + + invalidCfg := map[string]interface{}{ + "foo": 42, + "timeouts": map[string]interface{}{ + "create": "invalid", + }, + } + ic := terraform.NewResourceConfigRaw(invalidCfg) + _, err := p.Diff( + &terraform.InstanceInfo{ + Type: "blah", + }, + nil, + ic, + ) + if err == nil { + t.Fatal("Expected provider.Diff to fail with invalid timeout value") + } + expectedErrMsg := `time: invalid duration "invalid"` + if !strings.Contains(err.Error(), expectedErrMsg) { + t.Fatalf("Unexpected error message: %q\nExpected message to contain %q", + err.Error(), + expectedErrMsg) + } +} + +func TestProviderValidateResource(t *testing.T) { + cases := []struct { + P *Provider + Type string + Config map[string]interface{} + Err bool + }{ + { + P: &Provider{}, + Type: "foo", + Config: nil, + Err: true, + }, + + { + P: &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": &Resource{}, + }, + }, + Type: "foo", + Config: nil, + Err: false, + }, + } + + for i, tc := range cases { + c := terraform.NewResourceConfigRaw(tc.Config) + _, es := tc.P.ValidateResource(tc.Type, c) + if len(es) > 0 != tc.Err { + t.Fatalf("%d: %#v", i, es) + } + } +} + +func TestProviderImportState_default(t *testing.T) { + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": &Resource{ + Importer: &ResourceImporter{}, + }, + }, + } + + states, err := p.ImportState(&terraform.InstanceInfo{ + Type: "foo", + }, "bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + if len(states) != 1 { + t.Fatalf("bad: %#v", states) + } + if states[0].ID != "bar" { + t.Fatalf("bad: %#v", states) + } +} + +func TestProviderImportState_setsId(t *testing.T) { + var val string + stateFunc := func(d *ResourceData, meta interface{}) ([]*ResourceData, error) { + val = d.Id() + return []*ResourceData{d}, nil + } + + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": &Resource{ + Importer: &ResourceImporter{ + State: stateFunc, + }, + }, + }, + } + + _, err := p.ImportState(&terraform.InstanceInfo{ + Type: "foo", + }, "bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + if val != "bar" { + t.Fatal("should set id") + } +} + +func TestProviderImportState_setsType(t *testing.T) { + var tVal string + stateFunc := func(d *ResourceData, meta interface{}) ([]*ResourceData, error) { + d.SetId("foo") + tVal = d.State().Ephemeral.Type + return []*ResourceData{d}, nil + } + + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": &Resource{ + Importer: &ResourceImporter{ + State: stateFunc, + }, + }, + }, + } + + _, err := p.ImportState(&terraform.InstanceInfo{ + Type: "foo", + }, "bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + if tVal != "foo" { + t.Fatal("should set type") + } +} + +func TestProviderMeta(t *testing.T) { + p := new(Provider) + if v := p.Meta(); v != nil { + t.Fatalf("bad: %#v", v) + } + + expected := 42 + p.SetMeta(42) + if v := p.Meta(); !reflect.DeepEqual(v, expected) { + t.Fatalf("bad: %#v", v) + } +} + +func TestProviderStop(t *testing.T) { + var p Provider + + if p.Stopped() { + t.Fatal("should not be stopped") + } + + // Verify stopch blocks + ch := p.StopContext().Done() + select { + case <-ch: + t.Fatal("should not be stopped") + case <-time.After(10 * time.Millisecond): + } + + // Stop it + if err := p.Stop(); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify + if !p.Stopped() { + t.Fatal("should be stopped") + } + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + t.Fatal("should be stopped") + } +} + +func TestProviderStop_stopFirst(t *testing.T) { + var p Provider + + // Stop it + if err := p.Stop(); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify + if !p.Stopped() { + t.Fatal("should be stopped") + } + + select { + case <-p.StopContext().Done(): + case <-time.After(10 * time.Millisecond): + t.Fatal("should be stopped") + } +} + +func TestProviderReset(t *testing.T) { + var p Provider + stopCtx := p.StopContext() + p.MetaReset = func() error { + stopCtx = p.StopContext() + return nil + } + + // cancel the current context + p.Stop() + + if err := p.TestReset(); err != nil { + t.Fatal(err) + } + + // the first context should have been replaced + if err := stopCtx.Err(); err != nil { + t.Fatal(err) + } + + // we should not get a canceled context here either + if err := p.StopContext().Err(); err != nil { + t.Fatal(err) + } +} + +func TestProvider_InternalValidate(t *testing.T) { + cases := []struct { + P *Provider + ExpectedErr error + }{ + { + P: &Provider{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeBool, + Optional: true, + }, + }, + }, + ExpectedErr: nil, + }, + { // Reserved resource fields should be allowed in provider block + P: &Provider{ + Schema: map[string]*Schema{ + "provisioner": { + Type: TypeString, + Optional: true, + }, + "count": { + Type: TypeInt, + Optional: true, + }, + }, + }, + ExpectedErr: nil, + }, + { // Reserved provider fields should not be allowed + P: &Provider{ + Schema: map[string]*Schema{ + "alias": { + Type: TypeString, + Optional: true, + }, + }, + }, + ExpectedErr: fmt.Errorf("%s is a reserved field name for a provider", "alias"), + }, + } + + for i, tc := range cases { + err := tc.P.InternalValidate() + if tc.ExpectedErr == nil { + if err != nil { + t.Fatalf("%d: Error returned (expected no error): %s", i, err) + } + continue + } + if tc.ExpectedErr != nil && err == nil { + t.Fatalf("%d: Expected error (%s), but no error returned", i, tc.ExpectedErr) + } + if err.Error() != tc.ExpectedErr.Error() { + t.Fatalf("%d: Errors don't match. Expected: %#v Given: %#v", i, tc.ExpectedErr, err) + } + } +} diff --git a/legacy/helper/schema/provisioner.go b/legacy/helper/schema/provisioner.go new file mode 100644 index 000000000000..dd745001eee5 --- /dev/null +++ b/legacy/helper/schema/provisioner.go @@ -0,0 +1,205 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/legacy/terraform" +) + +// Provisioner represents a resource provisioner in Terraform and properly +// implements all of the ResourceProvisioner API. +// +// This higher level structure makes it much easier to implement a new or +// custom provisioner for Terraform. +// +// The function callbacks for this structure are all passed a context object. +// This context object has a number of pre-defined values that can be accessed +// via the global functions defined in context.go. +type Provisioner struct { + // ConnSchema is the schema for the connection settings for this + // provisioner. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + // + // NOTE: The value of connection keys can only be strings for now. + ConnSchema map[string]*Schema + + // Schema is the schema for the usage of this provisioner. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ApplyFunc is the function for executing the provisioner. This is required. + // It is given a context. See the Provisioner struct docs for more + // information. + ApplyFunc func(ctx context.Context) error + + // ValidateFunc is a function for extended validation. This is optional + // and should be used when individual field validation is not enough. + ValidateFunc func(*terraform.ResourceConfig) ([]string, []error) + + stopCtx context.Context + stopCtxCancel context.CancelFunc + stopOnce sync.Once +} + +// Keys that can be used to access data in the context parameters for +// Provisioners. +var ( + connDataInvalid = contextKey("data invalid") + + // This returns a *ResourceData for the connection information. + // Guaranteed to never be nil. + ProvConnDataKey = contextKey("provider conn data") + + // This returns a *ResourceData for the config information. + // Guaranteed to never be nil. + ProvConfigDataKey = contextKey("provider config data") + + // This returns a terraform.UIOutput. Guaranteed to never be nil. + ProvOutputKey = contextKey("provider output") + + // This returns the raw InstanceState passed to Apply. Guaranteed to + // be set, but may be nil. + ProvRawStateKey = contextKey("provider raw state") +) + +// InternalValidate should be called to validate the structure +// of the provisioner. +// +// This should be called in a unit test to verify before release that this +// structure is properly configured for use. +func (p *Provisioner) InternalValidate() error { + if p == nil { + return errors.New("provisioner is nil") + } + + var validationErrors error + { + sm := schemaMap(p.ConnSchema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + } + + { + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + } + + if p.ApplyFunc == nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf( + "ApplyFunc must not be nil")) + } + + return validationErrors +} + +// StopContext returns a context that checks whether a provisioner is stopped. +func (p *Provisioner) StopContext() context.Context { + p.stopOnce.Do(p.stopInit) + return p.stopCtx +} + +func (p *Provisioner) stopInit() { + p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) +} + +// Stop implementation of terraform.ResourceProvisioner interface. +func (p *Provisioner) Stop() error { + p.stopOnce.Do(p.stopInit) + p.stopCtxCancel() + return nil +} + +// GetConfigSchema implementation of terraform.ResourceProvisioner interface. +func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) { + return schemaMap(p.Schema).CoreConfigSchema(), nil +} + +// Apply implementation of terraform.ResourceProvisioner interface. +func (p *Provisioner) Apply( + o terraform.UIOutput, + s *terraform.InstanceState, + c *terraform.ResourceConfig) error { + var connData, configData *ResourceData + + { + // We first need to turn the connection information into a + // terraform.ResourceConfig so that we can use that type to more + // easily build a ResourceData structure. We do this by simply treating + // the conn info as configuration input. + raw := make(map[string]interface{}) + if s != nil { + for k, v := range s.Ephemeral.ConnInfo { + raw[k] = v + } + } + + c := terraform.NewResourceConfigRaw(raw) + sm := schemaMap(p.ConnSchema) + diff, err := sm.Diff(nil, c, nil, nil, true) + if err != nil { + return err + } + connData, err = sm.Data(nil, diff) + if err != nil { + return err + } + } + + { + // Build the configuration data. Doing this requires making a "diff" + // even though that's never used. We use that just to get the correct types. + configMap := schemaMap(p.Schema) + diff, err := configMap.Diff(nil, c, nil, nil, true) + if err != nil { + return err + } + configData, err = configMap.Data(nil, diff) + if err != nil { + return err + } + } + + // Build the context and call the function + ctx := p.StopContext() + ctx = context.WithValue(ctx, ProvConnDataKey, connData) + ctx = context.WithValue(ctx, ProvConfigDataKey, configData) + ctx = context.WithValue(ctx, ProvOutputKey, o) + ctx = context.WithValue(ctx, ProvRawStateKey, s) + return p.ApplyFunc(ctx) +} + +// Validate implements the terraform.ResourceProvisioner interface. +func (p *Provisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provisioner failed! This is always a bug\n"+ + "with the provisioner itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err)} + } + + if p.Schema != nil { + w, e := schemaMap(p.Schema).Validate(c) + ws = append(ws, w...) + es = append(es, e...) + } + + if p.ValidateFunc != nil { + w, e := p.ValidateFunc(c) + ws = append(ws, w...) + es = append(es, e...) + } + + return ws, es +} diff --git a/legacy/helper/schema/provisioner_test.go b/legacy/helper/schema/provisioner_test.go new file mode 100644 index 000000000000..36e5c51ad193 --- /dev/null +++ b/legacy/helper/schema/provisioner_test.go @@ -0,0 +1,334 @@ +package schema + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + "github.com/hashicorp/terraform/legacy/terraform" +) + +func TestProvisioner_impl(t *testing.T) { + var _ terraform.ResourceProvisioner = new(Provisioner) +} + +func noopApply(ctx context.Context) error { + return nil +} + +func TestProvisionerValidate(t *testing.T) { + cases := []struct { + Name string + P *Provisioner + Config map[string]interface{} + Err bool + Warns []string + }{ + { + Name: "No ApplyFunc", + P: &Provisioner{}, + Config: nil, + Err: true, + }, + { + Name: "Incorrect schema", + P: &Provisioner{ + Schema: map[string]*Schema{ + "foo": {}, + }, + ApplyFunc: noopApply, + }, + Config: nil, + Err: true, + }, + { + "Basic required field", + &Provisioner{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Required: true, + Type: TypeString, + }, + }, + ApplyFunc: noopApply, + }, + nil, + true, + nil, + }, + + { + "Basic required field set", + &Provisioner{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Required: true, + Type: TypeString, + }, + }, + ApplyFunc: noopApply, + }, + map[string]interface{}{ + "foo": "bar", + }, + false, + nil, + }, + { + Name: "Warning from property validation", + P: &Provisioner{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + ws = append(ws, "Simple warning from property validation") + return + }, + }, + }, + ApplyFunc: noopApply, + }, + Config: map[string]interface{}{ + "foo": "", + }, + Err: false, + Warns: []string{"Simple warning from property validation"}, + }, + { + Name: "No schema", + P: &Provisioner{ + Schema: nil, + ApplyFunc: noopApply, + }, + Config: nil, + Err: false, + }, + { + Name: "Warning from provisioner ValidateFunc", + P: &Provisioner{ + Schema: nil, + ApplyFunc: noopApply, + ValidateFunc: func(*terraform.ResourceConfig) (ws []string, errors []error) { + ws = append(ws, "Simple warning from provisioner ValidateFunc") + return + }, + }, + Config: nil, + Err: false, + Warns: []string{"Simple warning from provisioner ValidateFunc"}, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + c := terraform.NewResourceConfigRaw(tc.Config) + ws, es := tc.P.Validate(c) + if len(es) > 0 != tc.Err { + t.Fatalf("%d: %#v %s", i, es, es) + } + if (tc.Warns != nil || len(ws) != 0) && !reflect.DeepEqual(ws, tc.Warns) { + t.Fatalf("%d: warnings mismatch, actual: %#v", i, ws) + } + }) + } +} + +func TestProvisionerApply(t *testing.T) { + cases := []struct { + Name string + P *Provisioner + Conn map[string]string + Config map[string]interface{} + Err bool + }{ + { + "Basic config", + &Provisioner{ + ConnSchema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ApplyFunc: func(ctx context.Context) error { + cd := ctx.Value(ProvConnDataKey).(*ResourceData) + d := ctx.Value(ProvConfigDataKey).(*ResourceData) + if d.Get("foo").(int) != 42 { + return fmt.Errorf("bad config data") + } + if cd.Get("foo").(string) != "bar" { + return fmt.Errorf("bad conn data") + } + + return nil + }, + }, + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "foo": 42, + }, + false, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + c := terraform.NewResourceConfigRaw(tc.Config) + + state := &terraform.InstanceState{ + Ephemeral: terraform.EphemeralState{ + ConnInfo: tc.Conn, + }, + } + + err := tc.P.Apply(nil, state, c) + if err != nil != tc.Err { + t.Fatalf("%d: %s", i, err) + } + }) + } +} + +func TestProvisionerApply_nilState(t *testing.T) { + p := &Provisioner{ + ConnSchema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ApplyFunc: func(ctx context.Context) error { + return nil + }, + } + + conf := map[string]interface{}{ + "foo": 42, + } + + c := terraform.NewResourceConfigRaw(conf) + err := p.Apply(nil, nil, c) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvisionerStop(t *testing.T) { + var p Provisioner + + // Verify stopch blocks + ch := p.StopContext().Done() + select { + case <-ch: + t.Fatal("should not be stopped") + case <-time.After(10 * time.Millisecond): + } + + // Stop it + if err := p.Stop(); err != nil { + t.Fatalf("err: %s", err) + } + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + t.Fatal("should be stopped") + } +} + +func TestProvisionerStop_apply(t *testing.T) { + p := &Provisioner{ + ConnSchema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ApplyFunc: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + } + + conn := map[string]string{ + "foo": "bar", + } + + conf := map[string]interface{}{ + "foo": 42, + } + + c := terraform.NewResourceConfigRaw(conf) + state := &terraform.InstanceState{ + Ephemeral: terraform.EphemeralState{ + ConnInfo: conn, + }, + } + + // Run the apply in a goroutine + doneCh := make(chan struct{}) + go func() { + p.Apply(nil, state, c) + close(doneCh) + }() + + // Should block + select { + case <-doneCh: + t.Fatal("should not be done") + case <-time.After(10 * time.Millisecond): + } + + // Stop! + p.Stop() + + select { + case <-doneCh: + case <-time.After(10 * time.Millisecond): + t.Fatal("should be done") + } +} + +func TestProvisionerStop_stopFirst(t *testing.T) { + var p Provisioner + + // Stop it + if err := p.Stop(); err != nil { + t.Fatalf("err: %s", err) + } + + select { + case <-p.StopContext().Done(): + case <-time.After(10 * time.Millisecond): + t.Fatal("should be stopped") + } +} diff --git a/legacy/helper/schema/resource.go b/legacy/helper/schema/resource.go new file mode 100644 index 000000000000..fd14bb7c8e49 --- /dev/null +++ b/legacy/helper/schema/resource.go @@ -0,0 +1,842 @@ +package schema + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/legacy/terraform" + "github.com/zclconf/go-cty/cty" +) + +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "id", + "lifecycle", + "provider", + "provisioner", +} + +// Resource represents a thing in Terraform that has a set of configurable +// attributes and a lifecycle (create, read, update, delete). +// +// The Resource schema is an abstraction that allows provider writers to +// worry only about CRUD operations while off-loading validation, diff +// generation, etc. to this higher level library. +// +// In spite of the name, this struct is not used only for terraform resources, +// but also for data sources. In the case of data sources, the Create, +// Update and Delete functions must not be provided. +type Resource struct { + // Schema is the schema for the configuration of this resource. + // + // The keys of this map are the configuration keys, and the values + // describe the schema of the configuration value. + // + // The schema is used to represent both configurable data as well + // as data that might be computed in the process of creating this + // resource. + Schema map[string]*Schema + + // SchemaVersion is the version number for this resource's Schema + // definition. The current SchemaVersion stored in the state for each + // resource. Provider authors can increment this version number + // when Schema semantics change. If the State's SchemaVersion is less than + // the current SchemaVersion, the InstanceState is yielded to the + // MigrateState callback, where the provider can make whatever changes it + // needs to update the state to be compatible to the latest version of the + // Schema. + // + // When unset, SchemaVersion defaults to 0, so provider authors can start + // their Versioning at any integer >= 1 + SchemaVersion int + + // MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + // + // MigrateState is responsible for updating an InstanceState with an old + // version to the format expected by the current version of the Schema. + // + // It is called during Refresh if the State's stored SchemaVersion is less + // than the current SchemaVersion of the Resource. + // + // The function is yielded the state's stored SchemaVersion and a pointer to + // the InstanceState that needs updating, as well as the configured + // provider's configured meta interface{}, in case the migration process + // needs to make any remote API calls. + MigrateState StateMigrateFunc + + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by Terraform when the stored schema version is less + // than the current SchemaVersion of the Resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + + // The functions below are the CRUD operations for this resource. + // + // The only optional operation is Update. If Update is not implemented, + // then updates will not be supported for this resource. + // + // The ResourceData parameter in the functions below are used to + // query configuration and changes for the resource as well as to set + // the ID, computed data, etc. + // + // The interface{} parameter is the result of the ConfigureFunc in + // the provider for this resource. If the provider does not define + // a ConfigureFunc, this will be nil. This parameter should be used + // to store API clients, configuration structures, etc. + // + // If any errors occur during each of the operation, an error should be + // returned. If a resource was partially updated, be careful to enable + // partial state mode for ResourceData and use it accordingly. + // + // Exists is a function that is called to check if a resource still + // exists. If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. You + // can also signal existence in the Read method by calling d.SetId("") + // if the Resource is no longer present and should be removed from state. + // The *ResourceData passed to Exists should _not_ be modified. + Create CreateFunc + Read ReadFunc + Update UpdateFunc + Delete DeleteFunc + Exists ExistsFunc + + // CustomizeDiff is a custom function for working with the diff that + // Terraform has created for this resource - it can be used to customize the + // diff that has been created, diff values not controlled by configuration, + // or even veto the diff altogether and abort the plan. It is passed a + // *ResourceDiff, a structure similar to ResourceData but lacking most write + // functions like Set, while introducing new functions that work with the + // diff such as SetNew, SetNewComputed, and ForceNew. + // + // The phases Terraform runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // If this function needs to access external API resources, remember to flag + // the RequiresRefresh attribute mentioned below to ensure that + // -refresh=false is blocked when running plan or apply, as this means that + // this resource requires refresh-like behaviour to work effectively. + // + // For the most part, only computed fields can be customized by this + // function. + // + // This function is only allowed on regular resources (not data sources). + CustomizeDiff CustomizeDiffFunc + + // Importer is the ResourceImporter implementation for this resource. + // If this is nil, then this resource does not support importing. If + // this is non-nil, then it supports importing and ResourceImporter + // must be validated. The validity of ResourceImporter is verified + // by InternalValidate on Resource. + Importer *ResourceImporter + + // If non-empty, this string is emitted as a warning during Validate. + DeprecationMessage string + + // Timeouts allow users to specify specific time durations in which an + // operation should time out, to allow them to extend an action to suit their + // usage. For example, a user may specify a large Creation timeout for their + // AWS RDS Instance due to it's size, or restoring from a snapshot. + // Resource implementors must enable Timeout support by adding the allowed + // actions (Create, Read, Update, Delete, Default) to the Resource struct, and + // accessing them in the matching methods. + Timeouts *ResourceTimeout +} + +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + +// See Resource documentation. +type CreateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ReadFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type UpdateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type DeleteFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ExistsFunc func(*ResourceData, interface{}) (bool, error) + +// See Resource documentation. +type StateMigrateFunc func( + int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) + +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// See StateUpgrader +type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + +// See Resource documentation. +type CustomizeDiffFunc func(*ResourceDiff, interface{}) error + +// Apply creates, updates, and/or deletes a resource. +func (r *Resource) Apply( + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, error) { + data, err := schemaMap(r.Schema).Data(s, d) + if err != nil { + return s, err + } + if s != nil && data != nil { + data.providerMeta = s.ProviderMeta + } + + // Instance Diff shoould have the timeout info, need to copy it over to the + // ResourceData meta + rt := ResourceTimeout{} + if _, ok := d.Meta[TimeoutKey]; ok { + if err := rt.DiffDecode(d); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } else if s != nil { + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + } else { + log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + } + data.timeouts = &rt + + if s == nil { + // The Terraform API dictates that this should never happen, but + // it doesn't hurt to be safe in this case. + s = new(terraform.InstanceState) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource since it is created + if err := r.Delete(data, meta); err != nil { + return r.recordCurrentSchemaVersion(data.State()), err + } + + // Make sure the ID is gone. + data.SetId("") + } + + // If we're only destroying, and not creating, then return + // now since we're done! + if !d.RequiresNew() { + return nil, nil + } + + // Reset the data to be stateless since we just destroyed + data, err = schemaMap(r.Schema).Data(nil, d) + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt + if err != nil { + return nil, err + } + } + + err = nil + if data.Id() == "" { + // We're creating, it is a new resource. + data.MarkNewResource() + err = r.Create(data, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf("doesn't support update") + } + + err = r.Update(data, meta) + } + + return r.recordCurrentSchemaVersion(data.State()), err +} + +// Diff returns a diff of this resource. +func (r *Resource) Diff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) + } + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) + if err != nil { + return instanceDiff, err + } + + if instanceDiff != nil { + if err := t.DiffEncode(instanceDiff); err != nil { + log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + } + } else { + log.Printf("[DEBUG] Instance Diff is nil in Diff()") + } + + return instanceDiff, err +} + +func (r *Resource) simpleDiff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + instanceDiff = terraform.NewInstanceDiff() + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + return instanceDiff, nil +} + +// Validate validates the resource configuration against the schema. +func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { + warns, errs := schemaMap(r.Schema).Validate(c) + + if r.DeprecationMessage != "" { + warns = append(warns, r.DeprecationMessage) + } + + return warns, errs +} + +// ReadDataApply loads the data for a data source, given a diff that +// describes the configuration arguments and desired computed attributes. +func (r *Resource) ReadDataApply( + d *terraform.InstanceDiff, + meta interface{}, +) (*terraform.InstanceState, error) { + // Data sources are always built completely from scratch + // on each read, so the source state is always nil. + data, err := schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + // Data sources can set an ID if they want, but they aren't + // required to; we'll provide a placeholder if they don't, + // to preserve the invariant that all resources have non-empty + // ids. + state.ID = "-" + } + + return r.recordCurrentSchemaVersion(state), err +} + +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +// Refresh refreshes the state of the resource. +func (r *Resource) Refresh( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + // there may be new StateUpgraders that need to be run + s, err := r.upgradeState(s, meta) + if err != nil { + return s, err + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + var err error + + needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) + migrate := needsMigration && r.MigrateState != nil + + if migrate { + s, err = r.MigrateState(stateSchemaVersion, s, meta) + if err != nil { + return s, err + } + } + + if len(r.StateUpgraders) == 0 { + return s, nil + } + + // If we ran MigrateState, then the stateSchemaVersion value is no longer + // correct. We can expect the first upgrade function to be the correct + // schema type version. + if migrate { + stateSchemaVersion = r.StateUpgraders[0].Version + } + + schemaType := r.CoreConfigSchema().ImpliedType() + // find the expected type to convert the state + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion == upgrader.Version { + schemaType = upgrader.Type + } + } + + // StateUpgraders only operate on the new JSON format state, so the state + // need to be converted. + stateVal, err := StateValueFromInstanceState(s, schemaType) + if err != nil { + return nil, err + } + + jsonState, err := StateValueToJSONMap(stateVal, schemaType) + if err != nil { + return nil, err + } + + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion != upgrader.Version { + continue + } + + jsonState, err = upgrader.Upgrade(jsonState, meta) + if err != nil { + return nil, err + } + stateSchemaVersion++ + } + + // now we need to re-flatmap the new state + stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) + if err != nil { + return nil, err + } + + return r.ShimInstanceStateFromValue(stateVal) +} + +// InternalValidate should be called to validate the structure +// of the resource. +// +// This should be called in a unit test for any resource to verify +// before release that a resource is properly configured for use with +// this library. +// +// Provider.InternalValidate() will automatically call this for all of +// the resources it manages, so you don't need to call this manually if it +// is part of a Provider. +func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { + if r == nil { + return errors.New("resource is nil") + } + + if !writable { + if r.Create != nil || r.Update != nil || r.Delete != nil { + return fmt.Errorf("must not implement Create, Update or Delete") + } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } + } + + tsm := topSchemaMap + + if r.isTopLevel() && writable { + // All non-Computed attributes must be ForceNew if Update is not defined + if r.Update == nil { + nonForceNewAttrs := make([]string, 0) + for k, v := range r.Schema { + if !v.ForceNew && !v.Computed { + nonForceNewAttrs = append(nonForceNewAttrs, k) + } + } + if len(nonForceNewAttrs) > 0 { + return fmt.Errorf( + "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) + } + } else { + nonUpdateableAttrs := make([]string, 0) + for k, v := range r.Schema { + if v.ForceNew || v.Computed && !v.Optional { + nonUpdateableAttrs = append(nonUpdateableAttrs, k) + } + } + updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) + if updateableAttrs == 0 { + return fmt.Errorf( + "All fields are ForceNew or Computed w/out Optional, Update is superfluous") + } + } + + tsm = schemaMap(r.Schema) + + // Destroy, and Read are required + if r.Read == nil { + return fmt.Errorf("Read must be implemented") + } + if r.Delete == nil { + return fmt.Errorf("Delete must be implemented") + } + + // If we have an importer, we need to verify the importer. + if r.Importer != nil { + if err := r.Importer.InternalValidate(); err != nil { + return err + } + } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k, _ := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + return schemaMap(r.Schema).InternalValidate(tsm) +} + +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + // Allow phasing out "id" + // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 + if name == "id" && (s.Deprecated != "" || s.Removed != "") { + return false + } + + for _, reservedName := range ReservedResourceFields { + if name == reservedName { + return true + } + } + return false +} + +// Data returns a ResourceData struct for this Resource. Each return value +// is a separate copy and can be safely modified differently. +// +// The data returned from this function has no actual affect on the Resource +// itself (including the state given to this function). +// +// This function is useful for unit tests and ResourceImporter functions. +func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { + result, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + // At the time of writing, this isn't possible (Data never returns + // non-nil errors). We panic to find this in the future if we have to. + // I don't see a reason for Data to ever return an error. + panic(err) + } + + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + + // Set the schema version to latest by default + result.meta = map[string]interface{}{ + "schema_version": strconv.Itoa(r.SchemaVersion), + } + + return result +} + +// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing +// +// TODO: May be able to be removed with the above ResourceData function. +func (r *Resource) TestResourceData() *ResourceData { + return &ResourceData{ + schema: r.Schema, + } +} + +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through in the schema +// of the receiving Resource. +func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { + return SchemasForFlatmapPath(path, r.Schema) +} + +// Returns true if the resource is "top level" i.e. not a sub-resource. +func (r *Resource) isTopLevel() bool { + // TODO: This is a heuristic; replace with a definitive attribute? + return (r.Create != nil || r.Read != nil) +} + +// Determines if a given InstanceState needs to be migrated by checking the +// stored version number with the current SchemaVersion +func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { + // Get the raw interface{} value for the schema version. If it doesn't + // exist or is nil then set it to zero. + raw := is.Meta["schema_version"] + if raw == nil { + raw = "0" + } + + // Try to convert it to a string. If it isn't a string then we pretend + // that it isn't set at all. It should never not be a string unless it + // was manually tampered with. + rawString, ok := raw.(string) + if !ok { + rawString = "0" + } + + stateSchemaVersion, _ := strconv.Atoi(rawString) + + // Don't run MigrateState if the version is handled by a StateUpgrader, + // since StateMigrateFuncs are not required to handle unknown versions + maxVersion := r.SchemaVersion + if len(r.StateUpgraders) > 0 { + maxVersion = r.StateUpgraders[0].Version + } + + return stateSchemaVersion < maxVersion, stateSchemaVersion +} + +func (r *Resource) recordCurrentSchemaVersion( + state *terraform.InstanceState) *terraform.InstanceState { + if state != nil && r.SchemaVersion > 0 { + if state.Meta == nil { + state.Meta = make(map[string]interface{}) + } + state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) + } + return state +} + +// Noop is a convenience implementation of resource function which takes +// no action and returns no error. +func Noop(*ResourceData, interface{}) error { + return nil +} + +// RemoveFromState is a convenience implementation of a resource function +// which sets the resource ID to empty string (to remove it from state) +// and returns no error. +func RemoveFromState(d *ResourceData, _ interface{}) error { + d.SetId("") + return nil +} diff --git a/legacy/helper/schema/resource_data.go b/legacy/helper/schema/resource_data.go new file mode 100644 index 000000000000..8ec64248a9cc --- /dev/null +++ b/legacy/helper/schema/resource_data.go @@ -0,0 +1,561 @@ +package schema + +import ( + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/hashicorp/terraform/legacy/terraform" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// ResourceData is used to query and set the attributes of a resource. +// +// ResourceData is the primary argument received for CRUD operations on +// a resource as well as configuration of a provider. It is a powerful +// structure that can be used to not only query data, but check for changes, +// define partial state updates, etc. +// +// The most relevant methods to take a look at are Get, Set, and Partial. +type ResourceData struct { + // Settable (internally) + schema map[string]*Schema + config *terraform.ResourceConfig + state *terraform.InstanceState + diff *terraform.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + providerMeta cty.Value + + // Don't set + multiReader *MultiLevelFieldReader + setWriter *MapFieldWriter + newState *terraform.InstanceState + partial bool + partialMap map[string]struct{} + once sync.Once + isNew bool + + panicOnError bool +} + +// getResult is the internal structure that is generated when a Get +// is called that contains some extra data that might be used. +type getResult struct { + Value interface{} + ValueProcessed interface{} + Computed bool + Exists bool + Schema *Schema +} + +// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary +// values, bypassing schema. This MUST NOT be used in normal circumstances - +// it exists only to support the remote_state data source. +// +// Deprecated: Fully define schema attributes and use Set() instead. +func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { + d.once.Do(d.init) + + d.setWriter.unsafeWriteField(key, value) +} + +// Get returns the data for the given key, or nil if the key doesn't exist +// in the schema. +// +// If the key does exist in the schema but doesn't exist in the configuration, +// then the default value for that type will be returned. For strings, this is +// "", for numbers it is 0, etc. +// +// If you want to test if something is set at all in the configuration, +// use GetOk. +func (d *ResourceData) Get(key string) interface{} { + v, _ := d.GetOk(key) + return v +} + +// GetChange returns the old and new value for a given key. +// +// HasChange should be used to check if a change exists. It is possible +// that both the old and new value are the same if the old value was not +// set and the new value is. This is common, for example, for boolean +// fields which have a zero value of false. +func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { + o, n := d.getChange(key, getSourceState, getSourceDiff) + return o.Value, n.Value +} + +// GetOk returns the data for the given key and whether or not the key +// has been set to a non-zero value at some point. +// +// The first result will not necessarilly be nil if the value doesn't exist. +// The second result should be checked to determine this information. +func (d *ResourceData) GetOk(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists returns the data for a given key and whether or not the key +// has been set to a non-zero value. This is only useful for determining +// if boolean attributes have been set, if they are Optional but do not +// have a Default value. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +// This should only be used if absolutely required/needed. +func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + return r.Value, exists +} + +func (d *ResourceData) getRaw(key string, level getSource) getResult { + var parts []string + if key != "" { + parts = strings.Split(key, ".") + } + + return d.get(parts, level) +} + +// HasChange returns whether or not the given key has been changed. +func (d *ResourceData) HasChange(key string) bool { + o, n := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := o.(Equal); ok { + return !eq.Equal(n) + } + + return !reflect.DeepEqual(o, n) +} + +// Partial turns partial state mode on/off. +// +// When partial state mode is enabled, then only key prefixes specified +// by SetPartial will be in the final state. This allows providers to return +// partial states for partially applied resources (when errors occur). +func (d *ResourceData) Partial(on bool) { + d.partial = on + if on { + if d.partialMap == nil { + d.partialMap = make(map[string]struct{}) + } + } else { + d.partialMap = nil + } +} + +// Set sets the value for the given key. +// +// If the key is invalid or the value is not a correct type, an error +// will be returned. +func (d *ResourceData) Set(key string, value interface{}) error { + d.once.Do(d.init) + + // If the value is a pointer to a non-struct, get its value and + // use that. This allows Set to take a pointer to primitives to + // simplify the interface. + reflectVal := reflect.ValueOf(value) + if reflectVal.Kind() == reflect.Ptr { + if reflectVal.IsNil() { + // If the pointer is nil, then the value is just nil + value = nil + } else { + // Otherwise, we dereference the pointer as long as its not + // a pointer to a struct, since struct pointers are allowed. + reflectVal = reflect.Indirect(reflectVal) + if reflectVal.Kind() != reflect.Struct { + value = reflectVal.Interface() + } + } + } + + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil && d.panicOnError { + panic(err) + } + return err +} + +// SetPartial adds the key to the final state output while +// in partial state mode. The key must be a root key in the schema (i.e. +// it cannot be "list.0"). +// +// If partial state mode is disabled, then this has no effect. Additionally, +// whenever partial state mode is toggled, the partial data is cleared. +func (d *ResourceData) SetPartial(k string) { + if d.partial { + d.partialMap[k] = struct{}{} + } +} + +func (d *ResourceData) MarkNewResource() { + d.isNew = true +} + +func (d *ResourceData) IsNewResource() bool { + return d.isNew +} + +// Id returns the ID of the resource. +func (d *ResourceData) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + if result == "" { + result = d.state.Attributes["id"] + } + } + + if d.newState != nil { + result = d.newState.ID + if result == "" { + result = d.newState.Attributes["id"] + } + } + + return result +} + +// ConnInfo returns the connection info for this resource. +func (d *ResourceData) ConnInfo() map[string]string { + if d.newState != nil { + return d.newState.Ephemeral.ConnInfo + } + + if d.state != nil { + return d.state.Ephemeral.ConnInfo + } + + return nil +} + +// SetId sets the ID of the resource. If the value is blank, then the +// resource is destroyed. +func (d *ResourceData) SetId(v string) { + d.once.Do(d.init) + d.newState.ID = v + + // once we transition away from the legacy state types, "id" will no longer + // be a special field, and will become a normal attribute. + // set the attribute normally + d.setWriter.unsafeWriteField("id", v) + + // Make sure the newState is also set, otherwise the old value + // may get precedence. + if d.newState.Attributes == nil { + d.newState.Attributes = map[string]string{} + } + d.newState.Attributes["id"] = v +} + +// SetConnInfo sets the connection info for a resource. +func (d *ResourceData) SetConnInfo(v map[string]string) { + d.once.Do(d.init) + d.newState.Ephemeral.ConnInfo = v +} + +// SetType sets the ephemeral type for the data. This is only required +// for importing. +func (d *ResourceData) SetType(t string) { + d.once.Do(d.init) + d.newState.Ephemeral.Type = t +} + +// State returns the new InstanceState after the diff and any Set +// calls. +func (d *ResourceData) State() *terraform.InstanceState { + var result terraform.InstanceState + result.ID = d.Id() + result.Meta = d.meta + + // If we have no ID, then this resource doesn't exist and we just + // return nil. + if result.ID == "" { + return nil + } + + if d.timeouts != nil { + if err := d.timeouts.StateEncode(&result); err != nil { + log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) + } + } + + // Look for a magic key in the schema that determines we skip the + // integrity check of fields existing in the schema, allowing dynamic + // keys to be created. + hasDynamicAttributes := false + for k, _ := range d.schema { + if k == "__has_dynamic_attributes" { + hasDynamicAttributes = true + log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) + } + } + + // In order to build the final state attributes, we read the full + // attribute set as a map[string]interface{}, write it to a MapFieldWriter, + // and then use that map. + rawMap := make(map[string]interface{}) + for k := range d.schema { + source := getSourceSet + if d.partial { + source = getSourceState + if _, ok := d.partialMap[k]; ok { + source = getSourceSet + } + } + + raw := d.get([]string{k}, source) + if raw.Exists && !raw.Computed { + rawMap[k] = raw.Value + if raw.ValueProcessed != nil { + rawMap[k] = raw.ValueProcessed + } + } + } + + mapW := &MapFieldWriter{Schema: d.schema} + if err := mapW.WriteField(nil, rawMap); err != nil { + log.Printf("[ERR] Error writing fields: %s", err) + return nil + } + + result.Attributes = mapW.Map() + + if hasDynamicAttributes { + // If we have dynamic attributes, just copy the attributes map + // one for one into the result attributes. + for k, v := range d.setWriter.Map() { + // Don't clobber schema values. This limits usage of dynamic + // attributes to names which _do not_ conflict with schema + // keys! + if _, ok := result.Attributes[k]; !ok { + result.Attributes[k] = v + } + } + } + + if d.newState != nil { + result.Ephemeral = d.newState.Ephemeral + } + + // TODO: This is hacky and we can remove this when we have a proper + // state writer. We should instead have a proper StateFieldWriter + // and use that. + for k, schema := range d.schema { + if schema.Type != TypeMap { + continue + } + + if result.Attributes[k] == "" { + delete(result.Attributes, k) + } + } + + if v := d.Id(); v != "" { + result.Attributes["id"] = d.Id() + } + + if d.state != nil { + result.Tainted = d.state.Tainted + } + + return &result +} + +// Timeout returns the data for the given timeout key +// Returns a duration of 20 minutes for any key not found, or not found and no default. +func (d *ResourceData) Timeout(key string) time.Duration { + key = strings.ToLower(key) + + // System default of 20 minutes + defaultTimeout := 20 * time.Minute + + if d.timeouts == nil { + return defaultTimeout + } + + var timeout *time.Duration + switch key { + case TimeoutCreate: + timeout = d.timeouts.Create + case TimeoutRead: + timeout = d.timeouts.Read + case TimeoutUpdate: + timeout = d.timeouts.Update + case TimeoutDelete: + timeout = d.timeouts.Delete + } + + if timeout != nil { + return *timeout + } + + if d.timeouts.Default != nil { + return *d.timeouts.Default + } + + return defaultTimeout +} + +func (d *ResourceData) init() { + // Initialize the field that will store our new state + var copyState terraform.InstanceState + if d.state != nil { + copyState = *d.state.DeepCopy() + } + d.newState = ©State + + // Initialize the map for storing set data + d.setWriter = &MapFieldWriter{Schema: d.schema} + + // Initialize the reader for getting data from the + // underlying sources (config, diff, etc.) + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["set"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.setWriter.Map()), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "set", + }, + + Readers: readers, + } +} + +func (d *ResourceData) diffChange( + k string) (interface{}, interface{}, bool, bool, bool) { + // Get the change between the state and the config. + o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) + if !o.Exists { + o.Value = nil + } + if !n.Exists { + n.Value = nil + } + + // Return the old, new, and whether there is a change + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false +} + +func (d *ResourceData) getChange( + k string, + oldLevel getSource, + newLevel getSource) (getResult, getResult) { + var parts, parts2 []string + if k != "" { + parts = strings.Split(k, ".") + parts2 = strings.Split(k, ".") + } + + o := d.get(parts, oldLevel) + n := d.get(parts2, newLevel) + return o, n +} + +func (d *ResourceData) get(addr []string, source getSource) getResult { + d.once.Do(d.init) + + level := "set" + flags := source & ^getSourceLevelMask + exact := flags&getSourceExact != 0 + source = source & getSourceLevelMask + if source >= getSourceSet { + level = "set" + } else if source >= getSourceDiff { + level = "diff" + } else if source >= getSourceConfig { + level = "config" + } else { + level = "state" + } + + var result FieldReadResult + var err error + if exact { + result, err = d.multiReader.ReadFieldExact(addr, level) + } else { + result, err = d.multiReader.ReadFieldMerge(addr, level) + } + if err != nil { + panic(err) + } + + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +func (d *ResourceData) GetProviderMeta(dst interface{}) error { + if d.providerMeta.IsNull() { + return nil + } + return gocty.FromCtyValue(d.providerMeta, &dst) +} diff --git a/internal/legacy/helper/schema/resource_data_get_source.go b/legacy/helper/schema/resource_data_get_source.go similarity index 100% rename from internal/legacy/helper/schema/resource_data_get_source.go rename to legacy/helper/schema/resource_data_get_source.go diff --git a/legacy/helper/schema/resource_data_test.go b/legacy/helper/schema/resource_data_test.go new file mode 100644 index 000000000000..bfe1387ae6a9 --- /dev/null +++ b/legacy/helper/schema/resource_data_test.go @@ -0,0 +1,3564 @@ +package schema + +import ( + "fmt" + "math" + "os" + "reflect" + "testing" + "time" + + "github.com/hashicorp/terraform/legacy/terraform" +) + +func TestResourceDataGet(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *terraform.InstanceState + Diff *terraform.InstanceDiff + Key string + Value interface{} + }{ + // #0 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "bar", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + }, + + // #1 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + Value: "foo", + }, + + // #2 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo!", + NewExtra: "foo", + }, + }, + }, + + Key: "availability_zone", + Value: "foo", + }, + + // #3 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + }, + }, + + Diff: nil, + + Key: "availability_zone", + + Value: "bar", + }, + + // #4 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "bar", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + }, + + // #5 + { + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "port": "80", + }, + }, + + Diff: nil, + + Key: "port", + + Value: 80, + }, + + // #6 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Key: "ports.1", + + Value: 2, + }, + + // #7 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Key: "ports.#", + + Value: 3, + }, + + // #8 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Key: "ports.#", + + Value: 0, + }, + + // #9 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Key: "ports", + + Value: []interface{}{1, 2, 5}, + }, + + // #10 + { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ingress.#": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ingress.0.from": &terraform.ResourceAttrDiff{ + Old: "", + New: "8080", + }, + }, + }, + + Key: "ingress.0", + + Value: map[string]interface{}{ + "from": 8080, + }, + }, + + // #11 + { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ingress.#": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ingress.0.from": &terraform.ResourceAttrDiff{ + Old: "", + New: "8080", + }, + }, + }, + + Key: "ingress", + + Value: []interface{}{ + map[string]interface{}{ + "from": 8080, + }, + }, + }, + + // #12 Computed get + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Key: "availability_zone", + + Value: "foo", + }, + + // #13 Full object + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "", + + Value: map[string]interface{}{ + "availability_zone": "foo", + }, + }, + + // #14 List of maps + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "2", + }, + "config_vars.0.foo": &terraform.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + "config_vars.1.bar": &terraform.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Key: "config_vars", + + Value: []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + // #15 List of maps in state + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "2", + "config_vars.0.foo": "baz", + "config_vars.1.bar": "bar", + }, + }, + + Diff: nil, + + Key: "config_vars", + + Value: []interface{}{ + map[string]interface{}{ + "foo": "baz", + }, + map[string]interface{}{ + "bar": "bar", + }, + }, + }, + + // #16 List of maps with removal in diff + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.FOO": "bar", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "config_vars.0.FOO": &terraform.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + }, + }, + + Key: "config_vars", + + Value: []interface{}{}, + }, + + // #17 Sets + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.80": "80", + }, + }, + + Diff: nil, + + Key: "ports", + + Value: []interface{}{80}, + }, + + // #18 + { + Schema: map[string]*Schema{ + "data": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "value": &Schema{ + Type: TypeString, + Required: true, + }, + }, + }, + Set: func(a interface{}) int { + m := a.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "data.#": "1", + "data.10.index": "10", + "data.10.value": "50", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "data.10.value": &terraform.ResourceAttrDiff{ + Old: "50", + New: "80", + }, + }, + }, + + Key: "data", + + Value: []interface{}{ + map[string]interface{}{ + "index": 10, + "value": "80", + }, + }, + }, + + // #19 Empty Set + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + + Value: []interface{}{}, + }, + + // #20 Float zero + { + Schema: map[string]*Schema{ + "ratio": &Schema{ + Type: TypeFloat, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ratio", + + Value: 0.0, + }, + + // #21 Float given + { + Schema: map[string]*Schema{ + "ratio": &Schema{ + Type: TypeFloat, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ratio": "0.5", + }, + }, + + Diff: nil, + + Key: "ratio", + + Value: 0.5, + }, + + // #22 Float diff + { + Schema: map[string]*Schema{ + "ratio": &Schema{ + Type: TypeFloat, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ratio": "-0.5", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ratio": &terraform.ResourceAttrDiff{ + Old: "-0.5", + New: "33.0", + }, + }, + }, + + Key: "ratio", + + Value: 33.0, + }, + + // #23 Sets with removed elements + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.80": "80", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "2", + New: "1", + }, + "ports.80": &terraform.ResourceAttrDiff{ + Old: "80", + New: "80", + }, + "ports.8080": &terraform.ResourceAttrDiff{ + Old: "8080", + New: "0", + NewRemoved: true, + }, + }, + }, + + Key: "ports", + + Value: []interface{}{80}, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + v := d.Get(tc.Key) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("Bad: %d\n\n%#v\n\nExpected: %#v", i, v, tc.Value) + } + } +} + +func TestResourceDataGetChange(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *terraform.InstanceState + Diff *terraform.InstanceDiff + Key string + OldValue interface{} + NewValue interface{} + }{ + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + OldValue: "", + NewValue: "foo", + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + OldValue: "foo", + NewValue: "foo", + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + o, n := d.GetChange(tc.Key) + if !reflect.DeepEqual(o, tc.OldValue) { + t.Fatalf("Old Bad: %d\n\n%#v", i, o) + } + if !reflect.DeepEqual(n, tc.NewValue) { + t.Fatalf("New Bad: %d\n\n%#v", i, n) + } + } +} + +func TestResourceDataGetOk(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *terraform.InstanceState + Diff *terraform.InstanceDiff + Key string + Value interface{} + Ok bool + }{ + /* + * Primitives + */ + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "", + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + /* + * Lists + */ + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + /* + * Map + */ + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: map[string]interface{}{}, + Ok: false, + }, + + /* + * Set + */ + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports.0", + Value: 0, + Ok: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "0", + }, + }, + }, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + // Further illustrates and clarifiies the GetOk semantics from #933, and + // highlights the limitation that zero-value config is currently + // indistinguishable from unset config. + { + Schema: map[string]*Schema{ + "from_port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "from_port": &terraform.ResourceAttrDiff{ + Old: "", + New: "0", + }, + }, + }, + + Key: "from_port", + Value: 0, + Ok: false, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + v, ok := d.GetOk(tc.Key) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("Bad: %d\n\n%#v", i, v) + } + if ok != tc.Ok { + t.Fatalf("%d: expected ok: %t, got: %t", i, tc.Ok, ok) + } + } +} + +func TestResourceDataGetOkExists(t *testing.T) { + cases := []struct { + Name string + Schema map[string]*Schema + State *terraform.InstanceState + Diff *terraform.InstanceDiff + Key string + Value interface{} + Ok bool + }{ + /* + * Primitives + */ + { + Name: "string-literal-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: true, + }, + + { + Name: "string-computed-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + { + Name: "string-optional-computed-nil-diff", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + /* + * Lists + */ + + { + Name: "list-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeList, + Optional: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + /* + * Map + */ + + { + Name: "map-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeMap, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: map[string]interface{}{}, + Ok: false, + }, + + /* + * Set + */ + + { + Name: "set-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + { + Name: "set-optional-key", + Schema: map[string]*Schema{ + "ports": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports.0", + Value: 0, + Ok: false, + }, + + { + Name: "bool-literal-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + }, + }, + }, + + Key: "availability_zone", + Value: false, + Ok: true, + }, + + { + Name: "bool-literal-set", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": { + New: "true", + }, + }, + }, + + Key: "availability_zone", + Value: true, + Ok: true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("%s err: %s", tc.Name, err) + } + + v, ok := d.GetOkExists(tc.Key) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("Bad %s: \n%#v", tc.Name, v) + } + if ok != tc.Ok { + t.Fatalf("%s: expected ok: %t, got: %t", tc.Name, tc.Ok, ok) + } + }) + } +} + +func TestResourceDataTimeout(t *testing.T) { + cases := []struct { + Name string + Rd *ResourceData + Expected *ResourceTimeout + }{ + { + Name: "Basic example default", + Rd: &ResourceData{timeouts: timeoutForValues(10, 3, 0, 15, 0)}, + Expected: expectedTimeoutForValues(10, 3, 0, 15, 0), + }, + { + Name: "Resource and config match update, create", + Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 3, 0, 0)}, + Expected: expectedTimeoutForValues(10, 0, 3, 0, 0), + }, + { + Name: "Resource provides default", + Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 0, 0, 7)}, + Expected: expectedTimeoutForValues(10, 7, 7, 7, 7), + }, + { + Name: "Resource provides default and delete", + Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 0, 15, 7)}, + Expected: expectedTimeoutForValues(10, 7, 7, 15, 7), + }, + { + Name: "Resource provides default, config overwrites other values", + Rd: &ResourceData{timeouts: timeoutForValues(10, 3, 0, 0, 13)}, + Expected: expectedTimeoutForValues(10, 3, 13, 13, 13), + }, + { + Name: "Resource has no config", + Rd: &ResourceData{}, + Expected: expectedTimeoutForValues(0, 0, 0, 0, 0), + }, + } + + keys := timeoutKeys() + for i, c := range cases { + t.Run(fmt.Sprintf("%d-%s", i, c.Name), func(t *testing.T) { + + for _, k := range keys { + got := c.Rd.Timeout(k) + var ex *time.Duration + switch k { + case TimeoutCreate: + ex = c.Expected.Create + case TimeoutRead: + ex = c.Expected.Read + case TimeoutUpdate: + ex = c.Expected.Update + case TimeoutDelete: + ex = c.Expected.Delete + case TimeoutDefault: + ex = c.Expected.Default + } + + if got > 0 && ex == nil { + t.Fatalf("Unexpected value in (%s), case %d check 1:\n\texpected: %#v\n\tgot: %#v", k, i, ex, got) + } + if got == 0 && ex != nil { + t.Fatalf("Unexpected value in (%s), case %d check 2:\n\texpected: %#v\n\tgot: %#v", k, i, *ex, got) + } + + // confirm values + if ex != nil { + if got != *ex { + t.Fatalf("Timeout %s case (%d) expected (%s), got (%s)", k, i, *ex, got) + } + } + } + + }) + } +} + +func TestResourceDataHasChange(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *terraform.InstanceState + Diff *terraform.InstanceDiff + Key string + Change bool + }{ + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + Change: true, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + Change: false, + }, + + { + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "tags.Name": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "foo", + }, + }, + }, + + Key: "tags", + + Change: true, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.80": "80", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + }, + }, + + Key: "ports", + + Change: true, + }, + + // https://github.com/hashicorp/terraform/issues/927 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.80": "80", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "tags.foo": &terraform.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Key: "ports", + + Change: false, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := d.HasChange(tc.Key) + if actual != tc.Change { + t.Fatalf("Bad: %d %#v", i, actual) + } + } +} + +func TestResourceDataSet(t *testing.T) { + var testNilPtr *string + + cases := []struct { + Schema map[string]*Schema + State *terraform.InstanceState + Diff *terraform.InstanceDiff + Key string + Value interface{} + Err bool + GetKey string + GetValue interface{} + + // GetPreProcess can be set to munge the return value before being + // compared to GetValue + GetPreProcess func(interface{}) interface{} + }{ + // #0: Basic good + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: "foo", + + GetKey: "availability_zone", + GetValue: "foo", + }, + + // #1: Basic int + { + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "port", + Value: 80, + + GetKey: "port", + GetValue: 80, + }, + + // #2: Basic bool + { + Schema: map[string]*Schema{ + "vpc": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "vpc", + Value: true, + + GetKey: "vpc", + GetValue: true, + }, + + // #3 + { + Schema: map[string]*Schema{ + "vpc": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "vpc", + Value: false, + + GetKey: "vpc", + GetValue: false, + }, + + // #4: Invalid type + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: 80, + Err: true, + + GetKey: "availability_zone", + GetValue: "", + }, + + // #5: List of primitives, set list + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []int{1, 2, 5}, + + GetKey: "ports", + GetValue: []interface{}{1, 2, 5}, + }, + + // #6: List of primitives, set list with error + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{1, "NOPE", 5}, + Err: true, + + GetKey: "ports", + GetValue: []interface{}{}, + }, + + // #7: Set a list of maps + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "config_vars", + Value: []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + }, + Err: false, + + GetKey: "config_vars", + GetValue: []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + // #8: Set, with list + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "100", + "ports.1": "80", + "ports.2": "80", + }, + }, + + Key: "ports", + Value: []interface{}{100, 125, 125}, + + GetKey: "ports", + GetValue: []interface{}{100, 125}, + }, + + // #9: Set, with Set + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.100": "100", + "ports.80": "80", + "ports.81": "81", + }, + }, + + Key: "ports", + Value: &Set{ + m: map[string]interface{}{ + "1": 1, + "2": 2, + }, + }, + + GetKey: "ports", + GetValue: []interface{}{1, 2}, + }, + + // #10: Set single item + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.100": "100", + "ports.80": "80", + }, + }, + + Key: "ports.100", + Value: 256, + Err: true, + + GetKey: "ports", + GetValue: []interface{}{100, 80}, + }, + + // #11: Set with nested set + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + }, + + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + Set: func(a interface{}) int { + return a.(map[string]interface{})["port"].(int) + }, + }, + }, + + State: nil, + + Key: "ports", + Value: []interface{}{ + map[string]interface{}{ + "port": 80, + }, + }, + + GetKey: "ports", + GetValue: []interface{}{ + map[string]interface{}{ + "port": 80, + "set": []interface{}{}, + }, + }, + + GetPreProcess: func(v interface{}) interface{} { + if v == nil { + return v + } + s, ok := v.([]interface{}) + if !ok { + return v + } + for _, v := range s { + m, ok := v.(map[string]interface{}) + if !ok { + continue + } + if m["set"] == nil { + continue + } + if s, ok := m["set"].(*Set); ok { + m["set"] = s.List() + } + } + + return v + }, + }, + + // #12: List of floats, set list + { + Schema: map[string]*Schema{ + "ratios": &Schema{ + Type: TypeList, + Computed: true, + Elem: &Schema{Type: TypeFloat}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ratios", + Value: []float64{1.0, 2.2, 5.5}, + + GetKey: "ratios", + GetValue: []interface{}{1.0, 2.2, 5.5}, + }, + + // #12: Set of floats, set list + { + Schema: map[string]*Schema{ + "ratios": &Schema{ + Type: TypeSet, + Computed: true, + Elem: &Schema{Type: TypeFloat}, + Set: func(a interface{}) int { + return int(math.Float64bits(a.(float64))) + }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ratios", + Value: []float64{1.0, 2.2, 5.5}, + + GetKey: "ratios", + GetValue: []interface{}{1.0, 2.2, 5.5}, + }, + + // #13: Basic pointer + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: testPtrTo("foo"), + + GetKey: "availability_zone", + GetValue: "foo", + }, + + // #14: Basic nil value + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: testPtrTo(nil), + + GetKey: "availability_zone", + GetValue: "", + }, + + // #15: Basic nil pointer + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: testNilPtr, + + GetKey: "availability_zone", + GetValue: "", + }, + + // #16: Set in a list + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + }, + }, + + State: nil, + + Key: "ports", + Value: []interface{}{ + map[string]interface{}{ + "set": []interface{}{ + 1, + }, + }, + }, + + GetKey: "ports", + GetValue: []interface{}{ + map[string]interface{}{ + "set": []interface{}{ + 1, + }, + }, + }, + GetPreProcess: func(v interface{}) interface{} { + if v == nil { + return v + } + s, ok := v.([]interface{}) + if !ok { + return v + } + for _, v := range s { + m, ok := v.(map[string]interface{}) + if !ok { + continue + } + if m["set"] == nil { + continue + } + if s, ok := m["set"].(*Set); ok { + m["set"] = s.List() + } + } + + return v + }, + }, + } + + oldEnv := os.Getenv(PanicOnErr) + os.Setenv(PanicOnErr, "") + defer os.Setenv(PanicOnErr, oldEnv) + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = d.Set(tc.Key, tc.Value) + if err != nil != tc.Err { + t.Fatalf("%d err: %s", i, err) + } + + v := d.Get(tc.GetKey) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if tc.GetPreProcess != nil { + v = tc.GetPreProcess(v) + } + + if !reflect.DeepEqual(v, tc.GetValue) { + t.Fatalf("Get Bad: %d\n\n%#v", i, v) + } + } +} + +func TestResourceDataState_dynamicAttributes(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *terraform.InstanceState + Diff *terraform.InstanceDiff + Set map[string]interface{} + UnsafeSet map[string]string + Result *terraform.InstanceState + }{ + { + Schema: map[string]*Schema{ + "__has_dynamic_attributes": { + Type: TypeString, + Optional: true, + }, + + "schema_field": { + Type: TypeString, + Required: true, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "schema_field": "present", + }, + + UnsafeSet: map[string]string{ + "test1": "value", + "test2": "value", + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "schema_field": "present", + "test1": "value", + "test2": "value", + }, + }, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + for k, v := range tc.Set { + d.Set(k, v) + } + + for k, v := range tc.UnsafeSet { + d.UnsafeSetFieldRaw(k, v) + } + + // Set an ID so that the state returned is not nil + idSet := false + if d.Id() == "" { + idSet = true + d.SetId("foo") + } + + actual := d.State() + + // If we set an ID, then undo what we did so the comparison works + if actual != nil && idSet { + actual.ID = "" + delete(actual.Attributes, "id") + } + + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("Bad: %d\n\n%#v\n\nExpected:\n\n%#v", i, actual, tc.Result) + } + } +} + +func TestResourceDataState_schema(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *terraform.InstanceState + Diff *terraform.InstanceDiff + Set map[string]interface{} + Result *terraform.InstanceState + Partial []string + }{ + // #0 Basic primitive in diff + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + }, + + // #1 Basic primitive set override + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Set: map[string]interface{}{ + "availability_zone": "bar", + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + }, + }, + }, + + // #2 + { + Schema: map[string]*Schema{ + "vpc": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "vpc": true, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "vpc": "true", + }, + }, + }, + + // #3 Basic primitive with StateFunc set + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + StateFunc: func(interface{}) string { return "" }, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + NewExtra: "foo!", + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + }, + + // #4 List + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.0": "80", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "", + New: "100", + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.0": "80", + "ports.1": "100", + }, + }, + }, + + // #5 List of resources + { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "1", + "ingress.0.from": "80", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ingress.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "ingress.0.from": &terraform.ResourceAttrDiff{ + Old: "80", + New: "150", + }, + "ingress.1.from": &terraform.ResourceAttrDiff{ + Old: "", + New: "100", + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "2", + "ingress.0.from": "150", + "ingress.1.from": "100", + }, + }, + }, + + // #6 List of maps + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "2", + "config_vars.0.%": "2", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "bar", + "config_vars.1.%": "1", + "config_vars.1.bar": "baz", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.0.bar": &terraform.ResourceAttrDiff{ + NewRemoved: true, + }, + }, + }, + + Set: map[string]interface{}{ + "config_vars": []map[string]interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "baz": "bang", + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "2", + "config_vars.0.%": "1", + "config_vars.0.foo": "bar", + "config_vars.1.%": "1", + "config_vars.1.baz": "bang", + }, + }, + }, + + // #7 List of maps with removal in diff + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.FOO": "bar", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "config_vars.0.FOO": &terraform.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "0", + }, + }, + }, + + // #8 Basic state with other keys + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Result: &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "availability_zone": "foo", + }, + }, + }, + + // #9 Sets + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.100": "100", + "ports.80": "80", + "ports.81": "81", + }, + }, + + Diff: nil, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.80": "80", + "ports.81": "81", + "ports.100": "100", + }, + }, + }, + + // #10 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "ports": []interface{}{100, 80}, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.80": "80", + "ports.100": "100", + }, + }, + }, + + // #11 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "order": &Schema{ + Type: TypeInt, + }, + + "a": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + + "b": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + }, + }, + Set: func(a interface{}) int { + m := a.(map[string]interface{}) + return m["order"].(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.10.order": "10", + "ports.10.a.#": "1", + "ports.10.a.0": "80", + "ports.20.order": "20", + "ports.20.b.#": "1", + "ports.20.b.0": "100", + }, + }, + + Set: map[string]interface{}{ + "ports": []interface{}{ + map[string]interface{}{ + "order": 20, + "b": []interface{}{100}, + }, + map[string]interface{}{ + "order": 10, + "a": []interface{}{80}, + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.10.order": "10", + "ports.10.a.#": "1", + "ports.10.a.0": "80", + "ports.10.b.#": "0", + "ports.20.order": "20", + "ports.20.a.#": "0", + "ports.20.b.#": "1", + "ports.20.b.0": "100", + }, + }, + }, + + /* + * PARTIAL STATES + */ + + // #12 Basic primitive + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Partial: []string{}, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{}, + }, + }, + + // #13 List + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.0": "80", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "", + New: "100", + }, + }, + }, + + Partial: []string{}, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.0": "80", + }, + }, + }, + + // #14 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Partial: []string{}, + + Set: map[string]interface{}{ + "ports": []interface{}{}, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{}, + }, + }, + + // #15 List of resources + { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "1", + "ingress.0.from": "80", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ingress.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "ingress.0.from": &terraform.ResourceAttrDiff{ + Old: "80", + New: "150", + }, + "ingress.1.from": &terraform.ResourceAttrDiff{ + Old: "", + New: "100", + }, + }, + }, + + Partial: []string{}, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "1", + "ingress.0.from": "80", + }, + }, + }, + + // #16 List of maps + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "2", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "bar", + "config_vars.1.bar": "baz", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.0.bar": &terraform.ResourceAttrDiff{ + NewRemoved: true, + }, + }, + }, + + Set: map[string]interface{}{ + "config_vars": []map[string]interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "baz": "bang", + }, + }, + }, + + Partial: []string{}, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + // TODO: broken, shouldn't bar be removed? + "config_vars.#": "2", + "config_vars.0.%": "2", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "bar", + "config_vars.1.%": "1", + "config_vars.1.bar": "baz", + }, + }, + }, + + // #17 Sets + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.100": "100", + "ports.80": "80", + "ports.81": "81", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.120": &terraform.ResourceAttrDiff{ + New: "120", + }, + }, + }, + + Partial: []string{}, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.80": "80", + "ports.81": "81", + "ports.100": "100", + }, + }, + }, + + // #18 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Partial: []string{}, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{}, + }, + }, + + // #19 Maps + { + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "tags.Name": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "tags.%": "1", + "tags.Name": "foo", + }, + }, + }, + + // #20 empty computed map + { + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "tags.Name": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Set: map[string]interface{}{ + "tags": map[string]string{}, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "tags.%": "0", + }, + }, + }, + + // #21 + { + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{}, + }, + }, + + // #22 + { + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Set: map[string]interface{}{ + "foo": "bar", + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + }, + + // #23 Set of maps + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{Type: TypeInt}, + "uuids": &Schema{Type: TypeMap}, + }, + }, + Set: func(a interface{}) int { + m := a.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.10.uuids.#": &terraform.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Set: map[string]interface{}{ + "ports": []interface{}{ + map[string]interface{}{ + "index": 10, + "uuids": map[string]interface{}{ + "80": "value", + }, + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.10.index": "10", + "ports.10.uuids.%": "1", + "ports.10.uuids.80": "value", + }, + }, + }, + + // #24 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.100": "100", + "ports.80": "80", + "ports.81": "81", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "3", + New: "0", + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + }, + + // #25 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "ports": []interface{}{}, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + }, + + // #26 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "ports": []interface{}{}, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + }, + + // #27 Set lists + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{Type: TypeInt}, + "uuids": &Schema{Type: TypeMap}, + }, + }, + }, + }, + + State: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Set: map[string]interface{}{ + "ports": []interface{}{ + map[string]interface{}{ + "index": 10, + "uuids": map[string]interface{}{ + "80": "value", + }, + }, + }, + }, + + Result: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.0.index": "10", + "ports.0.uuids.%": "1", + "ports.0.uuids.80": "value", + }, + }, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + for k, v := range tc.Set { + if err := d.Set(k, v); err != nil { + t.Fatalf("%d err: %s", i, err) + } + } + + // Set an ID so that the state returned is not nil + idSet := false + if d.Id() == "" { + idSet = true + d.SetId("foo") + } + + // If we have partial, then enable partial state mode. + if tc.Partial != nil { + d.Partial(true) + for _, k := range tc.Partial { + d.SetPartial(k) + } + } + + actual := d.State() + + // If we set an ID, then undo what we did so the comparison works + if actual != nil && idSet { + actual.ID = "" + delete(actual.Attributes, "id") + } + + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("Bad: %d\n\n%#v\n\nExpected:\n\n%#v", i, actual, tc.Result) + } + } +} + +func TestResourceData_nonStringValuesInMap(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + Diff *terraform.InstanceDiff + MapFieldName string + ItemName string + ExpectedType string + }{ + { + Schema: map[string]*Schema{ + "boolMap": &Schema{ + Type: TypeMap, + Elem: TypeBool, + Optional: true, + }, + }, + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "boolMap.%": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "boolMap.boolField": &terraform.ResourceAttrDiff{ + Old: "", + New: "true", + }, + }, + }, + MapFieldName: "boolMap", + ItemName: "boolField", + ExpectedType: "bool", + }, + { + Schema: map[string]*Schema{ + "intMap": &Schema{ + Type: TypeMap, + Elem: TypeInt, + Optional: true, + }, + }, + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "intMap.%": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "intMap.intField": &terraform.ResourceAttrDiff{ + Old: "", + New: "8", + }, + }, + }, + MapFieldName: "intMap", + ItemName: "intField", + ExpectedType: "int", + }, + { + Schema: map[string]*Schema{ + "floatMap": &Schema{ + Type: TypeMap, + Elem: TypeFloat, + Optional: true, + }, + }, + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "floatMap.%": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "floatMap.floatField": &terraform.ResourceAttrDiff{ + Old: "", + New: "8.22", + }, + }, + }, + MapFieldName: "floatMap", + ItemName: "floatField", + ExpectedType: "float64", + }, + } + + for _, c := range cases { + d, err := schemaMap(c.Schema).Data(nil, c.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + m, ok := d.Get(c.MapFieldName).(map[string]interface{}) + if !ok { + t.Fatalf("expected %q to be castable to a map", c.MapFieldName) + } + field, ok := m[c.ItemName] + if !ok { + t.Fatalf("expected %q in the map", c.ItemName) + } + + typeName := reflect.TypeOf(field).Name() + if typeName != c.ExpectedType { + t.Fatalf("expected %q to be %q, it is %q.", + c.ItemName, c.ExpectedType, typeName) + } + } +} + +func TestResourceDataSetConnInfo(t *testing.T) { + d := &ResourceData{} + d.SetId("foo") + d.SetConnInfo(map[string]string{ + "foo": "bar", + }) + + expected := map[string]string{ + "foo": "bar", + } + + actual := d.State() + if !reflect.DeepEqual(actual.Ephemeral.ConnInfo, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDataSetMeta_Timeouts(t *testing.T) { + d := &ResourceData{} + d.SetId("foo") + + rt := ResourceTimeout{ + Create: DefaultTimeout(7 * time.Minute), + } + + d.timeouts = &rt + + expected := expectedForValues(7, 0, 0, 0, 0) + + actual := d.State() + if !reflect.DeepEqual(actual.Meta[TimeoutKey], expected) { + t.Fatalf("Bad Meta_timeout match:\n\texpected: %#v\n\tgot: %#v", expected, actual.Meta[TimeoutKey]) + } +} + +func TestResourceDataSetId(t *testing.T) { + d := &ResourceData{ + state: &terraform.InstanceState{ + ID: "test", + Attributes: map[string]string{ + "id": "test", + }, + }, + } + d.SetId("foo") + + actual := d.State() + + // SetId should set both the ID field as well as the attribute, to aid in + // transitioning to the new type system. + if actual.ID != "foo" || actual.Attributes["id"] != "foo" { + t.Fatalf("bad: %#v", actual) + } + + d.SetId("") + actual = d.State() + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDataSetId_clear(t *testing.T) { + d := &ResourceData{ + state: &terraform.InstanceState{ID: "bar"}, + } + d.SetId("") + + actual := d.State() + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDataSetId_override(t *testing.T) { + d := &ResourceData{ + state: &terraform.InstanceState{ID: "bar"}, + } + d.SetId("foo") + + actual := d.State() + if actual.ID != "foo" { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDataSetType(t *testing.T) { + d := &ResourceData{} + d.SetId("foo") + d.SetType("bar") + + actual := d.State() + if v := actual.Ephemeral.Type; v != "bar" { + t.Fatalf("bad: %#v", actual) + } +} + +func testPtrTo(raw interface{}) interface{} { + return &raw +} diff --git a/internal/legacy/helper/schema/resource_diff.go b/legacy/helper/schema/resource_diff.go similarity index 99% rename from internal/legacy/helper/schema/resource_diff.go rename to legacy/helper/schema/resource_diff.go index 72d4711eb22b..bd9103a87d17 100644 --- a/internal/legacy/helper/schema/resource_diff.go +++ b/legacy/helper/schema/resource_diff.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/legacy/terraform" ) // newValueWriter is a minor re-implementation of MapFieldWriter to include diff --git a/internal/legacy/helper/schema/resource_diff_test.go b/legacy/helper/schema/resource_diff_test.go similarity index 99% rename from internal/legacy/helper/schema/resource_diff_test.go rename to legacy/helper/schema/resource_diff_test.go index 9737177147af..ca9f046aa4f6 100644 --- a/internal/legacy/helper/schema/resource_diff_test.go +++ b/legacy/helper/schema/resource_diff_test.go @@ -7,8 +7,8 @@ import ( "testing" "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/terraform" ) // testSetFunc is a very simple function we use to test a foo/bar complex set. diff --git a/internal/legacy/helper/schema/resource_importer.go b/legacy/helper/schema/resource_importer.go similarity index 100% rename from internal/legacy/helper/schema/resource_importer.go rename to legacy/helper/schema/resource_importer.go diff --git a/legacy/helper/schema/resource_test.go b/legacy/helper/schema/resource_test.go new file mode 100644 index 000000000000..2b3a7e157531 --- /dev/null +++ b/legacy/helper/schema/resource_test.go @@ -0,0 +1,1687 @@ +package schema + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/terraform" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func TestResourceApply_create(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + called := false + r.Create = func(d *ResourceData, m interface{}) error { + called = true + d.SetId("foo") + return nil + } + + var s *terraform.InstanceState = nil + + d := &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + New: "42", + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("not called") + } + + expected := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_Timeout_state(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + }, + } + + called := false + r.Create = func(d *ResourceData, m interface{}) error { + called = true + d.SetId("foo") + return nil + } + + var s *terraform.InstanceState = nil + + d := &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + New: "42", + }, + }, + } + + diffTimeout := &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + } + + if err := diffTimeout.DiffEncode(d); err != nil { + t.Fatalf("Error encoding timeout to diff: %s", err) + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("not called") + } + + expected := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + TimeoutKey: expectedForValues(40, 0, 80, 40, 0), + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Not equal in Timeout State:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) + } +} + +// Regression test to ensure that the meta data is read from state, if a +// resource is destroyed and the timeout meta is no longer available from the +// config +func TestResourceApply_Timeout_destroy(t *testing.T) { + timeouts := &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + } + + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: timeouts, + } + + called := false + var delTimeout time.Duration + r.Delete = func(d *ResourceData, m interface{}) error { + delTimeout = d.Timeout(TimeoutDelete) + called = true + return nil + } + + s := &terraform.InstanceState{ + ID: "bar", + } + + if err := timeouts.StateEncode(s); err != nil { + t.Fatalf("Error encoding to state: %s", err) + } + + d := &terraform.InstanceDiff{ + Destroy: true, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("delete not called") + } + + if *timeouts.Delete != delTimeout { + t.Fatalf("timeouts don't match, expected (%#v), got (%#v)", timeouts.Delete, delTimeout) + } + + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDiff_Timeout_diff(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + }, + } + + r.Create = func(d *ResourceData, m interface{}) error { + d.SetId("foo") + return nil + } + + conf := terraform.NewResourceConfigRaw( + map[string]interface{}{ + "foo": 42, + TimeoutsConfigKey: map[string]interface{}{ + "create": "2h", + }, + }, + ) + var s *terraform.InstanceState + + actual, err := r.Diff(s, conf, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + New: "42", + }, + }, + } + + diffTimeout := &ResourceTimeout{ + Create: DefaultTimeout(120 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + } + + if err := diffTimeout.DiffEncode(expected); err != nil { + t.Fatalf("Error encoding timeout to diff: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Not equal Meta in Timeout Diff:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) + } +} + +func TestResourceDiff_CustomizeFunc(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + var called bool + + r.CustomizeDiff = func(d *ResourceDiff, m interface{}) error { + called = true + return nil + } + + conf := terraform.NewResourceConfigRaw( + map[string]interface{}{ + "foo": 42, + }, + ) + + var s *terraform.InstanceState + + _, err := r.Diff(s, conf, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatalf("diff customization not called") + } +} + +func TestResourceApply_destroy(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + called := false + r.Delete = func(d *ResourceData, m interface{}) error { + called = true + return nil + } + + s := &terraform.InstanceState{ + ID: "bar", + } + + d := &terraform.InstanceDiff{ + Destroy: true, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("delete not called") + } + + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_destroyCreate(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + } + + change := false + r.Create = func(d *ResourceData, m interface{}) error { + change = d.HasChange("tags") + d.SetId("foo") + return nil + } + r.Delete = func(d *ResourceData, m interface{}) error { + return nil + } + + var s *terraform.InstanceState = &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "bar", + "tags.Name": "foo", + }, + } + + d := &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + New: "42", + RequiresNew: true, + }, + "tags.Name": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "foo", + RequiresNew: true, + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !change { + t.Fatal("should have change") + } + + expected := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + "tags.%": "1", + "tags.Name": "foo", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_destroyPartial(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + SchemaVersion: 3, + } + + r.Delete = func(d *ResourceData, m interface{}) error { + d.Set("foo", 42) + return fmt.Errorf("some error") + } + + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + d := &terraform.InstanceDiff{ + Destroy: true, + } + + actual, err := r.Apply(s, d, nil) + if err == nil { + t.Fatal("should error") + } + + expected := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "3", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("expected:\n%#v\n\ngot:\n%#v", expected, actual) + } +} + +func TestResourceApply_update(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Update = func(d *ResourceData, m interface{}) error { + d.Set("foo", 42) + return nil + } + + s := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "12", + }, + } + + d := &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + New: "13", + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_updateNoCallback(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Update = nil + + s := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "12", + }, + } + + d := &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + New: "13", + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err == nil { + t.Fatal("should error") + } + + expected := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "12", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_isNewResource(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + } + + updateFunc := func(d *ResourceData, m interface{}) error { + d.Set("foo", "updated") + if d.IsNewResource() { + d.Set("foo", "new-resource") + } + return nil + } + r.Create = func(d *ResourceData, m interface{}) error { + d.SetId("foo") + d.Set("foo", "created") + return updateFunc(d, m) + } + r.Update = updateFunc + + d := &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + New: "bla-blah", + }, + }, + } + + // positive test + var s *terraform.InstanceState = nil + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "new-resource", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("actual: %#v\nexpected: %#v", + actual, expected) + } + + // negative test + s = &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "new-resource", + }, + } + + actual, err = r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected = &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "updated", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("actual: %#v\nexpected: %#v", + actual, expected) + } +} + +func TestResourceInternalValidate(t *testing.T) { + cases := []struct { + In *Resource + Writable bool + Err bool + }{ + 0: { + nil, + true, + true, + }, + + // No optional and no required + 1: { + &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Required: true, + }, + }, + }, + true, + true, + }, + + // Update undefined for non-ForceNew field + 2: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "boo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + true, + true, + }, + + // Update defined for ForceNew field + 3: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + ForceNew: true, + }, + }, + }, + true, + true, + }, + + // non-writable doesn't need Update, Create or Delete + 4: { + &Resource{ + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + false, + false, + }, + + // non-writable *must not* have Create + 5: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + false, + true, + }, + + // writable must have Read + 6: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + true, + true, + }, + + // writable must have Delete + 7: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + true, + true, + }, + + 8: { // Reserved name at root should be disallowed + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "count": { + Type: TypeInt, + Optional: true, + }, + }, + }, + true, + true, + }, + + 9: { // Reserved name at nested levels should be allowed + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "parent_list": &Schema{ + Type: TypeString, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "provisioner": { + Type: TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + true, + false, + }, + + 10: { // Provider reserved name should be allowed in resource + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "alias": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + true, + false, + }, + + 11: { // ID should be allowed in data source + &Resource{ + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "id": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + false, + false, + }, + + 12: { // Deprecated ID should be allowed in resource + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "id": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "Use x_id instead", + }, + }, + }, + true, + false, + }, + + 13: { // non-writable must not define CustomizeDiff + &Resource{ + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + CustomizeDiff: func(*ResourceDiff, interface{}) error { return nil }, + }, + false, + true, + }, + 14: { // Deprecated resource + &Resource{ + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + DeprecationMessage: "This resource has been deprecated.", + }, + true, + true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + sm := schemaMap{} + if tc.In != nil { + sm = schemaMap(tc.In.Schema) + } + + err := tc.In.InternalValidate(sm, tc.Writable) + if err != nil && !tc.Err { + t.Fatalf("%d: expected validation to pass: %s", i, err) + } + if err == nil && tc.Err { + t.Fatalf("%d: expected validation to fail", i) + } + }) + } +} + +func TestResourceRefresh(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + if m != 42 { + return fmt.Errorf("meta not passed") + } + + return d.Set("foo", d.Get("foo").(int)+1) + } + + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + expected := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "foo": "13", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceRefresh_blankId(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + d.SetId("foo") + return nil + } + + s := &terraform.InstanceState{ + ID: "", + Attributes: map[string]string{}, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceRefresh_delete(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + d.SetId("") + return nil + } + + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceRefresh_existsError(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Exists = func(*ResourceData, interface{}) (bool, error) { + return false, fmt.Errorf("error") + } + + r.Read = func(d *ResourceData, m interface{}) error { + panic("shouldn't be called") + } + + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + actual, err := r.Refresh(s, 42) + if err == nil { + t.Fatalf("should error") + } + if !reflect.DeepEqual(actual, s) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceRefresh_noExists(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Exists = func(*ResourceData, interface{}) (bool, error) { + return false, nil + } + + r.Read = func(d *ResourceData, m interface{}) error { + panic("shouldn't be called") + } + + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != nil { + t.Fatalf("should have no state") + } +} + +func TestResourceRefresh_needsMigration(t *testing.T) { + // Schema v2 it deals only in newfoo, which tracks foo as an int + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + return d.Set("newfoo", d.Get("newfoo").(int)+1) + } + + r.MigrateState = func( + v int, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // Real state migration functions will probably switch on this value, + // but we'll just assert on it for now. + if v != 1 { + t.Fatalf("Expected StateSchemaVersion to be 1, got %d", v) + } + + if meta != 42 { + t.Fatal("Expected meta to be passed through to the migration function") + } + + oldfoo, err := strconv.ParseFloat(s.Attributes["oldfoo"], 64) + if err != nil { + t.Fatalf("err: %#v", err) + } + s.Attributes["newfoo"] = strconv.Itoa(int(oldfoo * 10)) + delete(s.Attributes, "oldfoo") + + return s, nil + } + + // State is v1 and deals in oldfoo, which tracked foo as a float at 1/10th + // the scale of newfoo + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "oldfoo": "1.2", + }, + Meta: map[string]interface{}{ + "schema_version": "1", + }, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "newfoo": "13", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) + } +} + +func TestResourceRefresh_noMigrationNeeded(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + return d.Set("newfoo", d.Get("newfoo").(int)+1) + } + + r.MigrateState = func( + v int, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + t.Fatal("Migrate function shouldn't be called!") + return nil, nil + } + + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "newfoo": "12", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + actual, err := r.Refresh(s, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "newfoo": "13", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) + } +} + +func TestResourceRefresh_stateSchemaVersionUnset(t *testing.T) { + r := &Resource{ + // Version 1 > Version 0 + SchemaVersion: 1, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + return d.Set("newfoo", d.Get("newfoo").(int)+1) + } + + r.MigrateState = func( + v int, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + s.Attributes["newfoo"] = s.Attributes["oldfoo"] + return s, nil + } + + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "oldfoo": "12", + }, + } + + actual, err := r.Refresh(s, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "newfoo": "13", + }, + Meta: map[string]interface{}{ + "schema_version": "1", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) + } +} + +func TestResourceRefresh_migrateStateErr(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + t.Fatal("Read should never be called!") + return nil + } + + r.MigrateState = func( + v int, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + return s, fmt.Errorf("triggering an error") + } + + s := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "oldfoo": "12", + }, + } + + _, err := r.Refresh(s, nil) + if err == nil { + t.Fatal("expected error, but got none!") + } +} + +func TestResourceData(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + state := &terraform.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + } + + data := r.Data(state) + if data.Id() != "foo" { + t.Fatalf("err: %s", data.Id()) + } + if v := data.Get("foo"); v != 42 { + t.Fatalf("bad: %#v", v) + } + + // Set expectations + state.Meta = map[string]interface{}{ + "schema_version": "2", + } + + result := data.State() + if !reflect.DeepEqual(result, state) { + t.Fatalf("bad: %#v", result) + } +} + +func TestResourceData_blank(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + data := r.Data(nil) + if data.Id() != "" { + t.Fatalf("err: %s", data.Id()) + } + if v := data.Get("foo"); v != 0 { + t.Fatalf("bad: %#v", v) + } +} + +func TestResourceData_timeouts(t *testing.T) { + one := 1 * time.Second + two := 2 * time.Second + three := 3 * time.Second + four := 4 * time.Second + five := 5 * time.Second + + timeouts := &ResourceTimeout{ + Create: &one, + Read: &two, + Update: &three, + Delete: &four, + Default: &five, + } + + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: timeouts, + } + + data := r.Data(nil) + if data.Id() != "" { + t.Fatalf("err: %s", data.Id()) + } + + if !reflect.DeepEqual(timeouts, data.timeouts) { + t.Fatalf("incorrect ResourceData timeouts: %#v\n", *data.timeouts) + } +} + +func TestResource_UpgradeState(t *testing.T) { + // While this really only calls itself and therefore doesn't test any of + // the Resource code directly, it still serves as an example of registering + // a StateUpgrader. + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.StateUpgraders = []StateUpgrader{ + { + Version: 1, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + "oldfoo": cty.Number, + }), + Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + + oldfoo, ok := m["oldfoo"].(float64) + if !ok { + t.Fatalf("expected 1.2, got %#v", m["oldfoo"]) + } + m["newfoo"] = int(oldfoo * 10) + delete(m, "oldfoo") + + return m, nil + }, + }, + } + + oldStateAttrs := map[string]string{ + "id": "bar", + "oldfoo": "1.2", + } + + // convert the legacy flatmap state to the json equivalent + ty := r.StateUpgraders[0].Type + val, err := hcl2shim.HCL2ValueFromFlatmap(oldStateAttrs, ty) + if err != nil { + t.Fatal(err) + } + js, err := ctyjson.Marshal(val, ty) + if err != nil { + t.Fatal(err) + } + + // unmarshal the state using the json default types + var m map[string]interface{} + if err := json.Unmarshal(js, &m); err != nil { + t.Fatal(err) + } + + actual, err := r.StateUpgraders[0].Upgrade(m, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "id": "bar", + "newfoo": 12, + } + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %#v\ngot: %#v\n", expected, actual) + } +} + +func TestResource_ValidateUpgradeState(t *testing.T) { + r := &Resource{ + SchemaVersion: 3, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + if err := r.InternalValidate(nil, true); err != nil { + t.Fatal(err) + } + + r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ + Version: 2, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + }), + Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + }, + }) + if err := r.InternalValidate(nil, true); err != nil { + t.Fatal(err) + } + + // check for missing type + r.StateUpgraders[0].Type = cty.Type{} + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("StateUpgrader must have type") + } + r.StateUpgraders[0].Type = cty.Object(map[string]cty.Type{ + "id": cty.String, + }) + + // check for missing Upgrade func + r.StateUpgraders[0].Upgrade = nil + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("StateUpgrader must have an Upgrade func") + } + r.StateUpgraders[0].Upgrade = func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + } + + // check for skipped version + r.StateUpgraders[0].Version = 0 + r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ + Version: 2, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + }), + Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + }, + }) + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("StateUpgraders cannot skip versions") + } + + // add the missing version, but fail because it's still out of order + r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ + Version: 1, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + }), + Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + }, + }) + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("upgraders must be defined in order") + } + + r.StateUpgraders[1], r.StateUpgraders[2] = r.StateUpgraders[2], r.StateUpgraders[1] + if err := r.InternalValidate(nil, true); err != nil { + t.Fatal(err) + } + + // can't add an upgrader for a schema >= the current version + r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ + Version: 3, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + }), + Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + }, + }) + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("StateUpgraders cannot have a version >= current SchemaVersion") + } +} + +// The legacy provider will need to be able to handle both types of schema +// transformations, which has been retrofitted into the Refresh method. +func TestResource_migrateAndUpgrade(t *testing.T) { + r := &Resource{ + SchemaVersion: 4, + Schema: map[string]*Schema{ + "four": { + Type: TypeInt, + Required: true, + }, + }, + // this MigrateState will take the state to version 2 + MigrateState: func(v int, is *terraform.InstanceState, _ interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + _, ok := is.Attributes["zero"] + if !ok { + return nil, fmt.Errorf("zero not found in %#v", is.Attributes) + } + is.Attributes["one"] = "1" + delete(is.Attributes, "zero") + fallthrough + case 1: + _, ok := is.Attributes["one"] + if !ok { + return nil, fmt.Errorf("one not found in %#v", is.Attributes) + } + is.Attributes["two"] = "2" + delete(is.Attributes, "one") + default: + return nil, fmt.Errorf("invalid schema version %d", v) + } + return is, nil + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + return d.Set("four", 4) + } + + r.StateUpgraders = []StateUpgrader{ + { + Version: 2, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + "two": cty.Number, + }), + Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + _, ok := m["two"].(float64) + if !ok { + return nil, fmt.Errorf("two not found in %#v", m) + } + m["three"] = float64(3) + delete(m, "two") + return m, nil + }, + }, + { + Version: 3, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + "three": cty.Number, + }), + Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + _, ok := m["three"].(float64) + if !ok { + return nil, fmt.Errorf("three not found in %#v", m) + } + m["four"] = float64(4) + delete(m, "three") + return m, nil + }, + }, + } + + testStates := []*terraform.InstanceState{ + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "zero": "0", + }, + Meta: map[string]interface{}{ + "schema_version": "0", + }, + }, + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "one": "1", + }, + Meta: map[string]interface{}{ + "schema_version": "1", + }, + }, + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "two": "2", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + }, + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "three": "3", + }, + Meta: map[string]interface{}{ + "schema_version": "3", + }, + }, + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "four": "4", + }, + Meta: map[string]interface{}{ + "schema_version": "4", + }, + }, + } + + for i, s := range testStates { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + newState, err := r.Refresh(s, nil) + if err != nil { + t.Fatal(err) + } + + expected := &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "four": "4", + }, + Meta: map[string]interface{}{ + "schema_version": "4", + }, + } + + if !cmp.Equal(expected, newState, equateEmpty) { + t.Fatal(cmp.Diff(expected, newState, equateEmpty)) + } + }) + } +} diff --git a/internal/legacy/helper/schema/resource_timeout.go b/legacy/helper/schema/resource_timeout.go similarity index 98% rename from internal/legacy/helper/schema/resource_timeout.go rename to legacy/helper/schema/resource_timeout.go index df033d4c4557..9e3c5b437e98 100644 --- a/internal/legacy/helper/schema/resource_timeout.go +++ b/legacy/helper/schema/resource_timeout.go @@ -5,8 +5,8 @@ import ( "log" "time" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/terraform" "github.com/mitchellh/copystructure" ) diff --git a/internal/legacy/helper/schema/resource_timeout_test.go b/legacy/helper/schema/resource_timeout_test.go similarity index 99% rename from internal/legacy/helper/schema/resource_timeout_test.go rename to legacy/helper/schema/resource_timeout_test.go index f5091755b481..c6f2a326632f 100644 --- a/internal/legacy/helper/schema/resource_timeout_test.go +++ b/legacy/helper/schema/resource_timeout_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/legacy/terraform" ) func TestResourceTimeout_ConfigDecode_badkey(t *testing.T) { diff --git a/legacy/helper/schema/schema.go b/legacy/helper/schema/schema.go new file mode 100644 index 000000000000..5077ecfd89c7 --- /dev/null +++ b/legacy/helper/schema/schema.go @@ -0,0 +1,1854 @@ +// schema is a high-level framework for easily writing new providers +// for Terraform. Usage of schema is recommended over attempting to write +// to the low-level plugin interfaces manually. +// +// schema breaks down provider creation into simple CRUD operations for +// resources. The logic of diffing, destroying before creating, updating +// or creating, etc. is all handled by the framework. The plugin author +// only needs to implement a configuration schema and the CRUD operations and +// everything else is meant to just work. +// +// A good starting point is to view the Provider structure. +package schema + +import ( + "context" + "fmt" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/terraform" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/mapstructure" +) + +// Name of ENV variable which (if not empty) prefers panic over error +const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" + +// type used for schema package context keys +type contextKey string + +var ( + protoVersionMu sync.Mutex + protoVersion5 = false +) + +func isProto5() bool { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + return protoVersion5 + +} + +// SetProto5 enables a feature flag for any internal changes required required +// to work with the new plugin protocol. This should not be called by +// provider. +func SetProto5() { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + protoVersion5 = true +} + +// Schema is used to describe the structure of a value. +// +// Read the documentation of the struct elements for important details. +type Schema struct { + // Type is the type of the value and must be one of the ValueType values. + // + // This type not only determines what type is expected/valid in configuring + // this value, but also what type is returned when ResourceData.Get is + // called. The types returned by Get are: + // + // TypeBool - bool + // TypeInt - int + // TypeFloat - float64 + // TypeString - string + // TypeList - []interface{} + // TypeMap - map[string]interface{} + // TypeSet - *schema.Set + // + Type ValueType + + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + + // If one of these is set, then this item can come from the configuration. + // Both cannot be set. If Optional is set, the value is optional. If + // Required is set, the value is required. + // + // One of these must be set if the value is not computed. That is: + // value either comes from the config, is computed, or is both. + Optional bool + Required bool + + // If this is non-nil, the provided function will be used during diff + // of this field. If this is nil, a default diff for the type of the + // schema will be used. + // + // This allows comparison based on something other than primitive, list + // or map equality - for example SSH public keys may be considered + // equivalent regardless of trailing whitespace. + DiffSuppressFunc SchemaDiffSuppressFunc + + // If this is non-nil, then this will be a default value that is used + // when this item is not set in the configuration. + // + // DefaultFunc can be specified to compute a dynamic default. + // Only one of Default or DefaultFunc can be set. If DefaultFunc is + // used then its return value should be stable to avoid generating + // confusing/perpetual diffs. + // + // Changing either Default or the return value of DefaultFunc can be + // a breaking change, especially if the attribute in question has + // ForceNew set. If a default needs to change to align with changing + // assumptions in an upstream API then it may be necessary to also use + // the MigrateState function on the resource to change the state to match, + // or have the Read function adjust the state value to align with the + // new default. + // + // If Required is true above, then Default cannot be set. DefaultFunc + // can be set with Required. If the DefaultFunc returns nil, then there + // will be no default and the user will be asked to fill it in. + // + // If either of these is set, then the user won't be asked for input + // for this key if the default is not nil. + Default interface{} + DefaultFunc SchemaDefaultFunc + + // Description is used as the description for docs or asking for user + // input. It should be relatively short (a few sentences max) and should + // be formatted to fit a CLI. + Description string + + // InputDefault is the default value to use for when inputs are requested. + // This differs from Default in that if Default is set, no input is + // asked for. If Input is asked, this will be the default value offered. + InputDefault string + + // The fields below relate to diffs. + // + // If Computed is true, then the result of this value is computed + // (unless specified by config) on creation. + // + // If ForceNew is true, then a change in this resource necessitates + // the creation of a new resource. + // + // StateFunc is a function called to change the value of this before + // storing it in the state (and likewise before comparing for diffs). + // The use for this is for example with large strings, you may want + // to simply store the hash of it. + Computed bool + ForceNew bool + StateFunc SchemaStateFunc + + // The following fields are only set for a TypeList, TypeSet, or TypeMap. + // + // Elem represents the element type. For a TypeMap, it must be a *Schema + // with a Type that is one of the primitives: TypeString, TypeBool, + // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a + // *Resource. If it is *Schema, the element type is just a simple value. + // If it is *Resource, the element type is a complex structure, + // potentially managed via its own CRUD actions on the API. + Elem interface{} + + // The following fields are only set for a TypeList or TypeSet. + // + // MaxItems defines a maximum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however more than one instance would + // cause instability. + // + // MinItems defines a minimum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however less than one instance would + // cause instability. + // + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MaxItems int + MinItems int + + // PromoteSingle originally allowed for a single element to be assigned + // where a primitive list was expected, but this no longer works from + // Terraform v0.12 onwards (Terraform Core will require a list to be set + // regardless of what this is set to) and so only applies to Terraform v0.11 + // and earlier, and so should be used only to retain this functionality + // for those still using v0.11 with a provider that formerly used this. + PromoteSingle bool + + // The following fields are only valid for a TypeSet type. + // + // Set defines a function to determine the unique ID of an item so that + // a proper set can be built. + Set SchemaSetFunc + + // ComputedWhen is a set of queries on the configuration. Whenever any + // of these things is changed, it will require a recompute (this requires + // that Computed is set to true). + // + // NOTE: This currently does not work. + ComputedWhen []string + + // ConflictsWith is a set of schema keys that conflict with this schema. + // This will only check that they're set in the _config_. This will not + // raise an error for a malfunctioning resource that sets a conflicting + // key. + ConflictsWith []string + + // When Deprecated is set, this attribute is deprecated. + // + // A deprecated field still works, but will probably stop working in near + // future. This string is the message shown to the user with instructions on + // how to address the deprecation. + Deprecated string + + // When Removed is set, this attribute has been removed from the schema + // + // Removed attributes can be left in the Schema to generate informative error + // messages for the user when they show up in resource configurations. + // This string is the message shown to the user with instructions on + // what do to about the removed attribute. + Removed string + + // ValidateFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield warnings or + // errors based on inspection of that value. + // + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + ValidateFunc SchemaValidateFunc + + // Sensitive ensures that the attribute's value does not get displayed in + // logs or regular output. It should be used for passwords or other + // secret fields. Future versions of Terraform may encrypt these + // values. + Sensitive bool +} + +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + +// SchemaDiffSuppressFunc is a function which can be used to determine +// whether a detected diff on a schema element is "valid" or not, and +// suppress it from the plan if necessary. +// +// Return true if the diff should be suppressed, false to retain it. +type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool + +// SchemaDefaultFunc is a function called to return a default value for +// a field. +type SchemaDefaultFunc func() (interface{}, error) + +// EnvDefaultFunc is a helper function that returns the value of the +// given environment variable, if one exists, or the default value +// otherwise. +func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return dv, nil + } +} + +// MultiEnvDefaultFunc is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v, nil + } + } + return dv, nil + } +} + +// SchemaSetFunc is a function that must return a unique ID for the given +// element. This unique ID is used to store the element in a hash. +type SchemaSetFunc func(interface{}) int + +// SchemaStateFunc is a function used to convert some type to a string +// to be stored in the state. +type SchemaStateFunc func(interface{}) string + +// SchemaValidateFunc is a function used to validate a single field in the +// schema. +type SchemaValidateFunc func(interface{}, string) ([]string, []error) + +func (s *Schema) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Returns a default value for this schema by either reading Default or +// evaluating DefaultFunc. If neither of these are defined, returns nil. +func (s *Schema) DefaultValue() (interface{}, error) { + if s.Default != nil { + return s.Default, nil + } + + if s.DefaultFunc != nil { + defaultValue, err := s.DefaultFunc() + if err != nil { + return nil, fmt.Errorf("error loading default: %s", err) + } + return defaultValue, nil + } + + return nil, nil +} + +// Returns a zero value for the schema. +func (s *Schema) ZeroValue() interface{} { + // If it's a set then we'll do a bit of extra work to provide the + // right hashing function in our empty value. + if s.Type == TypeSet { + setFunc := s.Set + if setFunc == nil { + // Default set function uses the schema to hash the whole value + elem := s.Elem + switch t := elem.(type) { + case *Schema: + setFunc = HashSchema(t) + case *Resource: + setFunc = HashResource(t) + default: + panic("invalid set element type") + } + } + return &Set{F: setFunc} + } else { + return s.Type.Zero() + } +} + +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { + if d == nil { + return d + } + + if s.Type == TypeBool { + normalizeBoolString := func(s string) string { + switch s { + case "0": + return "false" + case "1": + return "true" + } + return s + } + d.Old = normalizeBoolString(d.Old) + d.New = normalizeBoolString(d.New) + } + + if s.Computed && !d.NewRemoved && d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + + if s.ForceNew { + // ForceNew, mark that this field is requiring new under the + // following conditions, explained below: + // + // * Old != New - There is a change in value. This field + // is therefore causing a new resource. + // + // * NewComputed - This field is being computed, hence a + // potential change in value, mark as causing a new resource. + d.RequiresNew = d.Old != d.New || d.NewComputed + } + + if d.NewRemoved { + return d + } + + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } + } + + if d.New == "" && !d.NewComputed { + // Computed attribute without a new value set + d.NewComputed = true + } + } + + if s.Sensitive { + // Set the Sensitive flag so output is hidden in the UI + d.Sensitive = true + } + + return d +} + +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + +// schemaMap is a wrapper that adds nice functions on top of schemas. +type schemaMap map[string]*Schema + +func (m schemaMap) panicOnError() bool { + if os.Getenv(PanicOnErr) != "" { + return true + } + return false +} + +// Data returns a ResourceData for the given schema, state, and diff. +// +// The diff is optional. +func (m schemaMap) Data( + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*ResourceData, error) { + return &ResourceData{ + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), + }, nil +} + +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copy.(*schemaMap) +} + +// Diff returns the diff for a resource given the schema map, +// state, and configuration. +func (m schemaMap) Diff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}, + handleRequiresNew bool) (*terraform.InstanceDiff, error) { + result := new(terraform.InstanceDiff) + result.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Make sure to mark if the resource is tainted + if s != nil { + result.DestroyTainted = s.Tainted + } + + d := &ResourceData{ + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), + } + + for k, schema := range m { + err := m.diff(k, schema, result, d, false) + if err != nil { + return nil, err + } + } + + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result, rd, false) + if err != nil { + return nil, err + } + } + } + + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted + + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() + + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) + if err != nil { + return nil, err + } + } + + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } + } + + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } + } + + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 + } + + } + + // Go through and detect all of the ComputedWhens now that we've + // finished the diff. + // TODO + + if result.Empty() { + // If we don't have any diff elements, just return nil + return nil, nil + } + + return result, nil +} + +// Input implements the terraform.ResourceProvider method by asking +// for input for required configuration keys that don't have a value. +func (m schemaMap) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := m[k] + + // Skip things that don't require config, if that is even valid + // for a provider schema. + // Required XOR Optional must always be true to validate, so we only + // need to check one. + if v.Optional { + continue + } + + // Deprecated fields should never prompt + if v.Deprecated != "" { + continue + } + + // Skip things that have a value of some sort already + if _, ok := c.Raw[k]; ok { + continue + } + + // Skip if it has a default value + defaultValue, err := v.DefaultValue() + if err != nil { + return nil, fmt.Errorf("%s: error loading default: %s", k, err) + } + if defaultValue != nil { + continue + } + + var value interface{} + switch v.Type { + case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: + continue + case TypeString: + value, err = m.inputString(input, k, v) + default: + panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) + } + + if err != nil { + return nil, fmt.Errorf( + "%s: %s", k, err) + } + + c.Config[k] = value + } + + return c, nil +} + +// Validate validates the configuration against this schema mapping. +func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { + return m.validateObject("", m, c) +} + +// InternalValidate validates the format of this schema. This should be called +// from a unit test (and not in user-path code) to verify that a schema +// is properly built. +func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { + if topSchemaMap == nil { + topSchemaMap = m + } + for k, v := range m { + if v.Type == TypeInvalid { + return fmt.Errorf("%s: Type must be specified", k) + } + + if v.Optional && v.Required { + return fmt.Errorf("%s: Optional or Required must be set, not both", k) + } + + if v.Required && v.Computed { + return fmt.Errorf("%s: Cannot be both Required and Computed", k) + } + + if !v.Required && !v.Optional && !v.Computed { + return fmt.Errorf("%s: One of optional, required, or computed must be set", k) + } + + computedOnly := v.Computed && !v.Optional + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + + if v.Computed && v.Default != nil { + return fmt.Errorf("%s: Default must be nil if computed", k) + } + + if v.Required && v.Default != nil { + return fmt.Errorf("%s: Default cannot be set with Required", k) + } + + if len(v.ComputedWhen) > 0 && !v.Computed { + return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) + } + + if len(v.ConflictsWith) > 0 && v.Required { + return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) + } + + if len(v.ConflictsWith) > 0 { + for _, key := range v.ConflictsWith { + parts := strings.Split(key, ".") + sm := topSchemaMap + var target *Schema + for _, part := range parts { + // Skip index fields + if _, err := strconv.Atoi(part); err == nil { + continue + } + + var ok bool + if target, ok = sm[part]; !ok { + return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s) at part (%s)", k, key, part) + } + + if subResource, ok := target.Elem.(*Resource); ok { + sm = schemaMap(subResource.Schema) + } + } + if target == nil { + return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm) + } + if target.Required { + return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key) + } + + if len(target.ComputedWhen) > 0 { + return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key) + } + } + } + + if v.Type == TypeList || v.Type == TypeSet { + if v.Elem == nil { + return fmt.Errorf("%s: Elem must be set for lists", k) + } + + if v.Default != nil { + return fmt.Errorf("%s: Default is not valid for lists or sets", k) + } + + if v.Type != TypeSet && v.Set != nil { + return fmt.Errorf("%s: Set can only be set for TypeSet", k) + } + + switch t := v.Elem.(type) { + case *Resource: + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { + return err + } + case *Schema: + bad := t.Computed || t.Optional || t.Required + if bad { + return fmt.Errorf( + "%s: Elem must have only Type set", k) + } + } + } else { + if v.MaxItems > 0 || v.MinItems > 0 { + return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) + } + } + + // Computed-only field + if v.Computed && !v.Optional { + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + } + + if v.ValidateFunc != nil { + switch v.Type { + case TypeList, TypeSet: + return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) + } + } + + if v.Deprecated == "" && v.Removed == "" { + if !isValidFieldName(k) { + return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) + } + } + } + + return nil +} + +func isValidFieldName(name string) bool { + re := regexp.MustCompile("^[a-z0-9_]+$") + return re.MatchString(name) +} + +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + Id() string +} + +func (m schemaMap) diff( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + unsupressedDiff := new(terraform.InstanceDiff) + unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + var err error + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + err = m.diffString(k, schema, unsupressedDiff, d, all) + case TypeList: + err = m.diffList(k, schema, unsupressedDiff, d, all) + case TypeMap: + err = m.diffMap(k, schema, unsupressedDiff, d, all) + case TypeSet: + err = m.diffSet(k, schema, unsupressedDiff, d, all) + default: + err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) + } + + for attrK, attrV := range unsupressedDiff.Attributes { + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + + attrV = &terraform.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } + } + } + diff.Attributes[attrK] = attrV + } + + return err +} + +func (m schemaMap) diffList( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + o, n, _, computedList, customized := d.diffChange(k) + if computedList { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedList && schema.Computed { + return nil + } + + if o == nil { + o = []interface{}{} + } + if n == nil { + n = []interface{}{} + } + if s, ok := o.(*Set); ok { + o = s.List() + } + if s, ok := n.(*Set); ok { + n = s.List() + } + os := o.([]interface{}) + vs := n.([]interface{}) + + // If the new value was set, and the two are equal, then we're done. + // We have to do this check here because sets might be NOT + // reflect.DeepEqual so we need to wait until we get the []interface{} + if !all && nSet && reflect.DeepEqual(os, vs) { + return nil + } + + // Get the counts + oldLen := len(os) + newLen := len(vs) + oldStr := strconv.FormatInt(int64(oldLen), 10) + + // If the whole list is computed, then say that the # is computed + if computedList { + diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ + Old: oldStr, + NewComputed: true, + RequiresNew: schema.ForceNew, + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + computed := oldLen == 0 && newLen == 0 && schema.Computed + if changed || computed || all { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + newStr := "" + if !computed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Figure out the maximum + maxLen := oldLen + if newLen > maxLen { + maxLen = newLen + } + + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for i := 0; i < maxLen; i++ { + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%d.%s", k, i, k2) + err := m.diff(subK, schema, diff, d, all) + if err != nil { + return err + } + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeList). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + for i := 0; i < maxLen; i++ { + subK := fmt.Sprintf("%s.%d", k, i) + err := m.diff(subK, &t2, diff, d, all) + if err != nil { + return err + } + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + + return nil +} + +func (m schemaMap) diffMap( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + prefix := k + "." + + // First get all the values from the state + var stateMap, configMap map[string]string + o, n, _, nComputed, customized := d.diffChange(k) + if err := mapstructure.WeakDecode(o, &stateMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(n, &configMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Keep track of whether the state _exists_ at all prior to clearing it + stateExists := o != nil + + // Delete any count values, since we don't use those + delete(configMap, "%") + delete(stateMap, "%") + + // Check if the number of elements has changed. + oldLen, newLen := len(stateMap), len(configMap) + changed := oldLen != newLen + if oldLen != 0 && newLen == 0 && schema.Computed { + changed = false + } + + // It is computed if we have no old value, no new value, the schema + // says it is computed, and it didn't exist in the state before. The + // last point means: if it existed in the state, even empty, then it + // has already been computed. + computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists + + // If the count has changed or we're computed, then add a diff for the + // count. "nComputed" means that the new value _contains_ a value that + // is computed. We don't do granular diffs for this yet, so we mark the + // whole map as computed. + if changed || computed || nComputed { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed || nComputed, + ForceNew: schema.ForceNew, + } + + oldStr := strconv.FormatInt(int64(oldLen), 10) + newStr := "" + if !computed && !nComputed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".%"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // If the new map is nil and we're computed, then ignore it. + if n == nil && schema.Computed { + return nil + } + + // Now we compare, preferring values from the config map + for k, v := range configMap { + old, ok := stateMap[k] + delete(stateMap, k) + + if old == v && ok && !all { + continue + } + + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) + } + for k, v := range stateMap { + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) + } + + return nil +} + +func (m schemaMap) diffSet( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + o, n, _, computedSet, customized := d.diffChange(k) + if computedSet { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedSet && schema.Computed { + return nil + } + + if o == nil { + o = schema.ZeroValue().(*Set) + } + if n == nil { + n = schema.ZeroValue().(*Set) + } + os := o.(*Set) + ns := n.(*Set) + + // If the new value was set, compare the listCode's to determine if + // the two are equal. Comparing listCode's instead of the actual values + // is needed because there could be computed values in the set which + // would result in false positives while comparing. + if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { + return nil + } + + // Get the counts + oldLen := os.Len() + newLen := ns.Len() + oldStr := strconv.Itoa(oldLen) + newStr := strconv.Itoa(newLen) + + // Build a schema for our count + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + // If the set computed then say that the # is computed + if computedSet || schema.Computed && !nSet { + // If # already exists, equals 0 and no new set is supplied, there + // is nothing to record in the diff + count, ok := d.GetOk(k + ".#") + if ok && count.(int) == 0 && !nSet && !computedSet { + return nil + } + + // Set the count but make sure that if # does not exist, we don't + // use the zeroed value + countStr := strconv.Itoa(count.(int)) + if !ok { + countStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + if changed || all { + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Build the list of codes that will make up our set. This is the + // removed codes as well as all the codes in the new codes. + codes := make([][]string, 2) + codes[0] = os.Difference(ns).listCode() + codes[1] = ns.listCode() + for _, list := range codes { + for _, code := range list { + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%s.%s", k, code, k2) + err := m.diff(subK, schema, diff, d, true) + if err != nil { + return err + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeSet). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + subK := fmt.Sprintf("%s.%s", k, code) + err := m.diff(subK, &t2, diff, d, true) + if err != nil { + return err + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + } + } + + return nil +} + +func (m schemaMap) diffString( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + var originalN interface{} + var os, ns string + o, n, _, computed, customized := d.diffChange(k) + if schema.StateFunc != nil && n != nil { + originalN = n + n = schema.StateFunc(n) + } + nraw := n + if nraw == nil && o != nil { + nraw = schema.Type.Zero() + } + if err := mapstructure.WeakDecode(o, &os); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(nraw, &ns); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + if os == ns && !all && !computed { + // They're the same value. If there old value is not blank or we + // have an ID, then return right away since we're already set up. + if os != "" || d.Id() != "" { + return nil + } + + // Otherwise, only continue if we're computed + if !schema.Computed { + return nil + } + } + + removed := false + if o != nil && n == nil && !computed { + removed = true + } + if removed && schema.Computed { + return nil + } + + diff.Attributes[k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) + + return nil +} + +func (m schemaMap) inputString( + input terraform.UIInput, + k string, + schema *Schema) (interface{}, error) { + result, err := input.Input(context.Background(), &terraform.InputOpts{ + Id: k, + Query: k, + Description: schema.Description, + Default: schema.InputDefault, + }) + + return result, err +} + +func (m schemaMap) validate( + k string, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + raw, ok := c.Get(k) + if !ok && schema.DefaultFunc != nil { + // We have a dynamic default. Check if we have a value. + var err error + raw, err = schema.DefaultFunc() + if err != nil { + return nil, []error{fmt.Errorf( + "%q, error loading default: %s", k, err)} + } + + // We're okay as long as we had a value set + ok = raw != nil + } + if !ok { + if schema.Required { + return nil, []error{fmt.Errorf( + "%q: required field is not set", k)} + } + + return nil, nil + } + + if !schema.Required && !schema.Optional { + // This is a computed-only field + return nil, []error{fmt.Errorf( + "%q: this field cannot be set", k)} + } + + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { + if schema.Deprecated != "" { + return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil + } + return nil, nil + } + + err := m.validateConflictingAttributes(k, schema, c) + if err != nil { + return nil, []error{err} + } + + return m.validateType(k, raw, schema, c) +} + +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == hcl2shim.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} +func (m schemaMap) validateConflictingAttributes( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ConflictsWith) == 0 { + return nil + } + + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == hcl2shim.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } + return fmt.Errorf( + "%q: conflicts with %s", k, conflictingKey) + } + } + + return nil +} + +func (m schemaMap) validateList( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return nil, nil + } + } + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + + // If we support promotion and the raw value isn't a slice, wrap + // it in []interface{} and check again. + if schema.PromoteSingle && rawV.Kind() != reflect.Slice { + raw = []interface{}{raw} + rawV = reflect.ValueOf(raw) + } + + if rawV.Kind() != reflect.Slice { + return nil, []error{fmt.Errorf( + "%s: should be a list", k)} + } + + // We can't validate list length if this came from a dynamic block. + // Since there's no way to determine if something was from a dynamic block + // at this point, we're going to skip validation in the new protocol if + // there are any unknowns. Validate will eventually be called again once + // all values are known. + if isProto5() && !isWhollyKnown(raw) { + return nil, nil + } + + // Validate length + if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} + } + + if schema.MinItems > 0 && rawV.Len() < schema.MinItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} + } + + // Now build the []interface{} + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + var ws []string + var es []error + for i, raw := range raws { + key := fmt.Sprintf("%s.%d", k, i) + + // Reify the key value from the ResourceConfig. + // If the list was computed we have all raw values, but some of these + // may be known in the config, and aren't individually marked as Computed. + if r, ok := c.Get(key); ok { + raw = r + } + + var ws2 []string + var es2 []error + switch t := schema.Elem.(type) { + case *Resource: + // This is a sub-resource + ws2, es2 = m.validateObject(key, t.Schema, c) + case *Schema: + ws2, es2 = m.validateType(key, raw, t, c) + } + + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + return ws, es +} + +func (m schemaMap) validateMap( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return nil, nil + } + } + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + // Otherwise it's likely raw is an interpolation. + return nil, nil + case reflect.Map: + case reflect.Slice: + default: + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + + // If it is not a slice, validate directly + if rawV.Kind() != reflect.Slice { + mapIface := rawV.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + if schema.ValidateFunc != nil { + return schema.ValidateFunc(mapIface, k) + } + return nil, nil + } + + // It is a slice, verify that all the elements are maps + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for _, raw := range raws { + v := reflect.ValueOf(raw) + if v.Kind() != reflect.Map { + return nil, []error{fmt.Errorf( + "%s: should be a map", k)} + } + mapIface := v.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + } + + if schema.ValidateFunc != nil { + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.ValidateFunc(validatableMap, k) + } + + return nil, nil +} + +func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { + for key, raw := range m { + valueType, err := getValueType(k, schema) + if err != nil { + return nil, []error{err} + } + + switch valueType { + case TypeBool: + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeInt: + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeFloat: + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeString: + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + } + return nil, nil +} + +func getValueType(k string, schema *Schema) (ValueType, error) { + if schema.Elem == nil { + return TypeString, nil + } + if vt, ok := schema.Elem.(ValueType); ok { + return vt, nil + } + + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. + if s, ok := schema.Elem.(*Schema); ok { + return s.Type, nil + } + + if _, ok := schema.Elem.(*Resource); ok { + // TODO: We don't actually support this (yet) + // but silently pass the validation, until we decide + // how to handle nested structures in maps + return TypeString, nil + } + return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) +} + +func (m schemaMap) validateObject( + k string, + schema map[string]*Schema, + c *terraform.ResourceConfig) ([]string, []error) { + raw, _ := c.Get(k) + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { + return nil, []error{fmt.Errorf( + "%s: expected object, got %s", + k, reflect.ValueOf(raw).Kind())} + } + + var ws []string + var es []error + for subK, s := range schema { + key := subK + if k != "" { + key = fmt.Sprintf("%s.%s", k, subK) + } + + ws2, es2 := m.validate(key, s, c) + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + // Detect any extra/unknown keys and report those as errors. + if m, ok := raw.(map[string]interface{}); ok { + for subk, _ := range m { + if _, ok := schema[subk]; !ok { + if subk == TimeoutsConfigKey { + continue + } + es = append(es, fmt.Errorf( + "%s: invalid or unknown key: %s", k, subk)) + } + } + } + + return ws, es +} + +func (m schemaMap) validatePrimitive( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + + // a nil value shouldn't happen in the old protocol, and in the new + // protocol the types have already been validated. Either way, we can't + // reflect on nil, so don't panic. + if raw == nil { + return nil, nil + } + + // Catch if the user gave a complex type where a primitive was + // expected, so we can return a friendly error message that + // doesn't contain Go type system terminology. + switch reflect.ValueOf(raw).Type().Kind() { + case reflect.Slice: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a list", k), + } + case reflect.Map: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a map", k), + } + default: // ok + } + + if c.IsComputed(k) { + // If the key is being computed, then it is not an error as + // long as it's not a slice or map. + return nil, nil + } + + var decoded interface{} + switch schema.Type { + case TypeBool: + // Verify that we can parse this as the correct type + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeInt: + switch { + case isProto5(): + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + if v, ok := raw.(int); ok { + decoded = v + } else { + return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} + } + default: + // Verify that we can parse this as an int + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + } + case TypeFloat: + // Verify that we can parse this as an int + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeString: + // Verify that we can parse this as a string + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + + if schema.ValidateFunc != nil { + return schema.ValidateFunc(decoded, k) + } + + return nil, nil +} + +func (m schemaMap) validateType( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + var ws []string + var es []error + switch schema.Type { + case TypeSet, TypeList: + ws, es = m.validateList(k, raw, schema, c) + case TypeMap: + ws, es = m.validateMap(k, raw, schema, c) + default: + ws, es = m.validatePrimitive(k, raw, schema, c) + } + + if schema.Deprecated != "" { + ws = append(ws, fmt.Sprintf( + "%q: [DEPRECATED] %s", k, schema.Deprecated)) + } + + if schema.Removed != "" { + es = append(es, fmt.Errorf( + "%q: [REMOVED] %s", k, schema.Removed)) + } + + return ws, es +} + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return new(Set) + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/legacy/helper/schema/schema_test.go b/legacy/helper/schema/schema_test.go new file mode 100644 index 000000000000..02158f1cbe49 --- /dev/null +++ b/legacy/helper/schema/schema_test.go @@ -0,0 +1,5558 @@ +package schema + +import ( + "bytes" + "errors" + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/helper/hashcode" + "github.com/hashicorp/terraform/legacy/terraform" +) + +func TestEnvDefaultFunc(t *testing.T) { + key := "TF_TEST_ENV_DEFAULT_FUNC" + defer os.Unsetenv(key) + + f := EnvDefaultFunc(key, "42") + if err := os.Setenv(key, "foo"); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "foo" { + t.Fatalf("bad: %#v", actual) + } + + if err := os.Unsetenv(key); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err = f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "42" { + t.Fatalf("bad: %#v", actual) + } +} + +func TestMultiEnvDefaultFunc(t *testing.T) { + keys := []string{ + "TF_TEST_MULTI_ENV_DEFAULT_FUNC1", + "TF_TEST_MULTI_ENV_DEFAULT_FUNC2", + } + defer func() { + for _, k := range keys { + os.Unsetenv(k) + } + }() + + // Test that the first key is returned first + f := MultiEnvDefaultFunc(keys, "42") + if err := os.Setenv(keys[0], "foo"); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "foo" { + t.Fatalf("bad: %#v", actual) + } + + if err := os.Unsetenv(keys[0]); err != nil { + t.Fatalf("err: %s", err) + } + + // Test that the second key is returned if the first one is empty + f = MultiEnvDefaultFunc(keys, "42") + if err := os.Setenv(keys[1], "foo"); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err = f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "foo" { + t.Fatalf("bad: %#v", actual) + } + + if err := os.Unsetenv(keys[1]); err != nil { + t.Fatalf("err: %s", err) + } + + // Test that the default value is returned when no keys are set + actual, err = f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "42" { + t.Fatalf("bad: %#v", actual) + } +} + +func TestValueType_Zero(t *testing.T) { + cases := []struct { + Type ValueType + Value interface{} + }{ + {TypeBool, false}, + {TypeInt, 0}, + {TypeFloat, 0.0}, + {TypeString, ""}, + {TypeList, []interface{}{}}, + {TypeMap, map[string]interface{}{}}, + {TypeSet, new(Set)}, + } + + for i, tc := range cases { + actual := tc.Type.Zero() + if !reflect.DeepEqual(actual, tc.Value) { + t.Fatalf("%d: %#v != %#v", i, actual, tc.Value) + } + } +} + +func TestSchemaMap_Diff(t *testing.T) { + cases := []struct { + Name string + Schema map[string]*Schema + State *terraform.InstanceState + Config map[string]interface{} + CustomizeDiff CustomizeDiffFunc + Diff *terraform.InstanceDiff + Err bool + }{ + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + ID: "foo", + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Computed, but set in config", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "Default", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Default: "foo", + }, + }, + + State: nil, + + Config: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "DefaultFunc, value", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + }, + }, + + State: nil, + + Config: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "DefaultFunc, configuration set", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "String with StateFunc", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + StateFunc: func(a interface{}) string { + return a.(string) + "!" + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo!", + NewExtra: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "StateFunc not called with nil value", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + StateFunc: func(a interface{}) string { + t.Fatalf("should not get here!") + return "" + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Variable computed", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": hcl2shim.UnknownVariableValue, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Int decode", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "port": 27, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "port": &terraform.ResourceAttrDiff{ + Old: "", + New: "27", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "bool decode", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "port": false, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "port": &terraform.ResourceAttrDiff{ + Old: "", + New: "false", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Bool", + Schema: map[string]*Schema{ + "delete": &Schema{ + Type: TypeBool, + Optional: true, + Default: false, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "delete": "false", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "List decode", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.0": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "List decode with promotion", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": "5", + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "ports.0": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "List decode with promotion with list", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{"5"}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "ports.0": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.0": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, hcl2shim.UnknownVariableValue, 5}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.0": "1", + "ports.1": "2", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "2", + New: "3", + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "3", + RequiresNew: true, + }, + "ports.0": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + RequiresNew: true, + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "", + New: "2", + RequiresNew: true, + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "List with computed set", + Schema: map[string]*Schema{ + "config": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &Resource{ + Schema: map[string]*Schema{ + "name": { + Type: TypeString, + Required: true, + }, + + "rules": { + Type: TypeSet, + Computed: true, + Elem: &Schema{Type: TypeString}, + Set: HashString, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "config": []interface{}{ + map[string]interface{}{ + "name": "hello", + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + + "config.0.name": &terraform.ResourceAttrDiff{ + Old: "", + New: "hello", + }, + + "config.0.rules.#": &terraform.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.5": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Computed: true, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{"2", "5", 1}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.5": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, hcl2shim.UnknownVariableValue, "5"}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.1": "1", + "ports.2": "2", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "2", + New: "3", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.1": "1", + "ports.2": "2", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "2", + New: "0", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "1", + New: "0", + NewRemoved: true, + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "2", + New: "0", + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + "ports.#": "1", + "ports.80": "80", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + ps := m["ports"].([]interface{}) + result := 0 + for _, p := range ps { + result += p.(int) + } + return result + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "2", + "ingress.80.ports.#": "1", + "ingress.80.ports.0": "80", + "ingress.443.ports.#": "1", + "ingress.443.ports.0": "443", + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "ports": []interface{}{443}, + }, + map[string]interface{}{ + "ports": []interface{}{80}, + }, + }, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "List of structure decode", + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "from": 8080, + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ingress.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "ingress.0.from": &terraform.ResourceAttrDiff{ + Old: "", + New: "8080", + }, + }, + }, + + Err: false, + }, + + { + Name: "ComputedWhen", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 80, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 80, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + /* TODO + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 8080, + }, + + Diff: &terraform.ResourceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "foo", + NewComputed: true, + }, + "port": &terraform.ResourceAttrDiff{ + Old: "80", + New: "8080", + }, + }, + }, + + Err: false, + }, + */ + + { + Name: "Maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeMap, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "config_vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.%": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + + "config_vars.bar": &terraform.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeMap, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "config_vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.foo": &terraform.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.bar": &terraform.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "vars.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "vars.foo": &terraform.ResourceAttrDiff{ + Old: "bar", + New: "", + NewRemoved: true, + }, + "vars.bar": &terraform.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "vars.foo": "bar", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeMap}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "config_vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.0.foo": &terraform.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.0.bar": &terraform.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeMap}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "baz", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config_vars.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "config_vars.0.%": &terraform.ResourceAttrDiff{ + Old: "2", + New: "0", + }, + "config_vars.0.foo": &terraform.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.0.bar": &terraform.ResourceAttrDiff{ + Old: "baz", + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "ForceNews", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + ForceNew: true, + }, + + "address": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + "address": "foo", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "bar", + New: "foo", + RequiresNew: true, + }, + + "address": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + ForceNew: true, + }, + + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + "ports.#": "1", + "ports.80": "80", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "bar", + New: "foo", + RequiresNew: true, + }, + + "ports.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + Computed: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "instances.#": "0", + }, + }, + + Config: map[string]interface{}{ + "instances": []interface{}{hcl2shim.UnknownVariableValue}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "instances.#": &terraform.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway": hcl2shim.UnknownVariableValue, + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "route.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.~1.index": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.~1.gateway": &terraform.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway": []interface{}{ + hcl2shim.UnknownVariableValue, + }, + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "route.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.~1.index": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.~1.gateway.#": &terraform.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Computed maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: nil, + + Config: nil, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "vars.%": &terraform.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Computed maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "vars.%": "0", + }, + }, + + Config: map[string]interface{}{ + "vars": map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "vars.%": &terraform.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: " - Empty", + Schema: map[string]*Schema{}, + + State: &terraform.InstanceState{}, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Float", + Schema: map[string]*Schema{ + "some_threshold": &Schema{ + Type: TypeFloat, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "some_threshold": "567.8", + }, + }, + + Config: map[string]interface{}{ + "some_threshold": 12.34, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "some_threshold": &terraform.ResourceAttrDiff{ + Old: "567.8", + New: "12.34", + }, + }, + }, + + Err: false, + }, + + { + Name: "https://github.com/hashicorp/terraform/issues/824", + Schema: map[string]*Schema{ + "block_device": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "device_name": &Schema{ + Type: TypeString, + Required: true, + }, + "delete_on_termination": &Schema{ + Type: TypeBool, + Optional: true, + Default: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool))) + return hashcode.String(buf.String()) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "block_device.#": "2", + "block_device.616397234.delete_on_termination": "true", + "block_device.616397234.device_name": "/dev/sda1", + "block_device.2801811477.delete_on_termination": "true", + "block_device.2801811477.device_name": "/dev/sdx", + }, + }, + + Config: map[string]interface{}{ + "block_device": []interface{}{ + map[string]interface{}{ + "device_name": "/dev/sda1", + }, + map[string]interface{}{ + "device_name": "/dev/sdx", + }, + }, + }, + Diff: nil, + Err: false, + }, + + { + Name: "Zero value in state shouldn't result in diff", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeBool, + Optional: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "port": "false", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Same as prev, but for sets", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "route.#": "0", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "A set computed element shouldn't cause a diff", + Schema: map[string]*Schema{ + "active": &Schema{ + Type: TypeBool, + Computed: true, + ForceNew: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "active": "true", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "An empty set should show up in the diff", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + ForceNew: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "instances.#": "1", + "instances.3": "foo", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "instances.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "instances.3": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Map with empty value", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "vars": map[string]interface{}{ + "foo": "", + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "vars.%": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "vars.foo": &terraform.ResourceAttrDiff{ + Old: "", + New: "", + }, + }, + }, + + Err: false, + }, + + { + Name: "Unset bool, not in state", + Schema: map[string]*Schema{ + "force": &Schema{ + Type: TypeBool, + Optional: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Unset set, not in state", + Schema: map[string]*Schema{ + "metadata_keys": &Schema{ + Type: TypeSet, + Optional: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(interface{}) int { return 0 }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Unset list in state, should not show up computed", + Schema: map[string]*Schema{ + "metadata_keys": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "metadata_keys.#": "0", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set element computed element", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, hcl2shim.UnknownVariableValue}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Computed map without config that's known to be empty does not generate diff", + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + Config: nil, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "tags.%": "0", + }, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set with hyphen keys", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway-name": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway-name": "hello", + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "route.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.1.index": &terraform.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.1.gateway-name": &terraform.ResourceAttrDiff{ + Old: "", + New: "hello", + }, + }, + }, + + Err: false, + }, + + { + Name: ": StateFunc in nested set (#1759)", + Schema: map[string]*Schema{ + "service_account": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "scopes": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{ + Type: TypeString, + StateFunc: func(v interface{}) string { + return v.(string) + "!" + }, + }, + Set: func(v interface{}) int { + i, err := strconv.Atoi(v.(string)) + if err != nil { + t.Fatalf("err: %s", err) + } + return i + }, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "service_account": []interface{}{ + map[string]interface{}{ + "scopes": []interface{}{"123"}, + }, + }, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "service_account.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + "service_account.0.scopes.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + "service_account.0.scopes.123": &terraform.ResourceAttrDiff{ + Old: "", + New: "123!", + NewExtra: "123", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Removing set elements", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + ForceNew: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "instances.#": "2", + "instances.3": "333", + "instances.2": "22", + }, + }, + + Config: map[string]interface{}{ + "instances": []interface{}{"333", "4444"}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "instances.#": &terraform.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "instances.2": &terraform.ResourceAttrDiff{ + Old: "22", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + "instances.3": &terraform.ResourceAttrDiff{ + Old: "333", + New: "333", + }, + "instances.4": &terraform.ResourceAttrDiff{ + Old: "", + New: "4444", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Bools can be set with 0/1 in config, still get true/false", + Schema: map[string]*Schema{ + "one": &Schema{ + Type: TypeBool, + Optional: true, + }, + "two": &Schema{ + Type: TypeBool, + Optional: true, + }, + "three": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "one": "false", + "two": "true", + "three": "true", + }, + }, + + Config: map[string]interface{}{ + "one": "1", + "two": "0", + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "one": &terraform.ResourceAttrDiff{ + Old: "false", + New: "true", + }, + "two": &terraform.ResourceAttrDiff{ + Old: "true", + New: "false", + }, + "three": &terraform.ResourceAttrDiff{ + Old: "true", + New: "false", + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "tainted in state w/ no attr changes is still a replacement", + Schema: map[string]*Schema{}, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "id": "someid", + }, + Tainted: true, + }, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{}, + DestroyTainted: true, + }, + + Err: false, + }, + + { + Name: "Set ForceNew only marks the changing element as ForceNew", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "3", + New: "3", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + "ports.4": &terraform.ResourceAttrDiff{ + Old: "4", + New: "0", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "removed optional items should trigger ForceNew", + Schema: map[string]*Schema{ + "description": &Schema{ + Type: TypeString, + ForceNew: true, + Optional: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "description": "foo", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "description": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "", + RequiresNew: true, + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + // GH-7715 + { + Name: "computed value for boolean field", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeBool, + ForceNew: true, + Computed: true, + Optional: true, + }, + }, + + State: &terraform.InstanceState{}, + + Config: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + Old: "", + New: "false", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set ForceNew marks count as ForceNew if computed", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{hcl2shim.UnknownVariableValue, 2, 1}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "3", + New: "", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "List with computed schema and ForceNew", + Schema: map[string]*Schema{ + "config": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "config.#": "2", + "config.0": "a", + "config.1": "b", + }, + }, + + Config: map[string]interface{}{ + "config": []interface{}{hcl2shim.UnknownVariableValue, hcl2shim.UnknownVariableValue}, + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "config.#": &terraform.ResourceAttrDiff{ + Old: "2", + New: "", + RequiresNew: true, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "overridden diff with a CustomizeDiff function, ForceNew not in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + if err := d.ForceNew("availability_zone"); err != nil { + return err + } + return nil + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + // NOTE: This case is technically impossible in the current + // implementation, because optional+computed values never show up in the + // diff. In the event behavior changes this test should ensure that the + // intended diff still shows up. + Name: "overridden removed attribute diff with a CustomizeDiff function, ForceNew not in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + if err := d.ForceNew("availability_zone"); err != nil { + return err + } + return nil + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + + Name: "overridden diff with a CustomizeDiff function, ForceNew in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + return nil + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "required field with computed diff added with CustomizeDiff function", + Schema: map[string]*Schema{ + "ami_id": &Schema{ + Type: TypeString, + Required: true, + }, + "instance_id": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ami_id": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("instance_id", "bar"); err != nil { + return err + } + return nil + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ami_id": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "instance_id": &terraform.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set ForceNew only marks the changing element as ForceNew - CustomizeDiffFunc edition", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 6}, + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("ports", []interface{}{5, 2, 1}); err != nil { + return err + } + if err := d.ForceNew("ports"); err != nil { + return err + } + return nil + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "ports.#": &terraform.ResourceAttrDiff{ + Old: "3", + New: "3", + }, + "ports.1": &terraform.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &terraform.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &terraform.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + "ports.4": &terraform.ResourceAttrDiff{ + Old: "4", + New: "0", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "tainted resource does not run CustomizeDiffFunc", + Schema: map[string]*Schema{}, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "id": "someid", + }, + Tainted: true, + }, + + Config: map[string]interface{}{}, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + return errors.New("diff customization should not have run") + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{}, + DestroyTainted: true, + }, + + Err: false, + }, + + { + Name: "NewComputed based on a conditional with CustomizeDiffFunc", + Schema: map[string]*Schema{ + "etag": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + "version_id": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "etag": "foo", + "version_id": "1", + }, + }, + + Config: map[string]interface{}{ + "etag": "bar", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if d.HasChange("etag") { + d.SetNewComputed("version_id") + } + return nil + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "etag": &terraform.ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + "version_id": &terraform.ResourceAttrDiff{ + Old: "1", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "NewComputed should always propagate with CustomizeDiff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "foo": "", + }, + ID: "pre-existing", + }, + + Config: map[string]interface{}{}, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + d.SetNewComputed("foo") + return nil + }, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "vetoing a diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "foo": "baz", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + return fmt.Errorf("diff vetoed") + }, + + Err: true, + }, + + // A lot of resources currently depended on using the empty string as a + // nil/unset value. + // FIXME: We want this to eventually produce a diff, since there + // technically is a new value in the config. + { + Name: "optional, computed, empty string", + Schema: map[string]*Schema{ + "attr": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "attr": "bar", + }, + }, + + Config: map[string]interface{}{ + "attr": "", + }, + }, + + { + Name: "optional, computed, empty string should not crash in CustomizeDiff", + Schema: map[string]*Schema{ + "unrelated_set": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeString}, + }, + "stream_enabled": { + Type: TypeBool, + Optional: true, + }, + "stream_view_type": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "unrelated_set.#": "0", + "stream_enabled": "true", + "stream_view_type": "KEYS_ONLY", + }, + }, + Config: map[string]interface{}{ + "stream_enabled": false, + "stream_view_type": "", + }, + CustomizeDiff: func(diff *ResourceDiff, v interface{}) error { + v, ok := diff.GetOk("unrelated_set") + if ok { + return fmt.Errorf("Didn't expect unrelated_set: %#v", v) + } + return nil + }, + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "stream_enabled": { + Old: "true", + New: "false", + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + c := terraform.NewResourceConfigRaw(tc.Config) + + d, err := schemaMap(tc.Schema).Diff(tc.State, c, tc.CustomizeDiff, nil, true) + if err != nil != tc.Err { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(tc.Diff, d) { + t.Fatalf("expected:\n%#v\n\ngot:\n%#v", tc.Diff, d) + } + }) + } +} + +func TestSchemaMap_Input(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + Config map[string]interface{} + Input map[string]string + Result map[string]interface{} + Err bool + }{ + /* + * String decode + */ + + "no input on optional field with no config": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Input: map[string]string{}, + Result: map[string]interface{}{}, + Err: false, + }, + + "input ignored when config has a value": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Input: map[string]string{ + "availability_zone": "foo", + }, + + Result: map[string]interface{}{}, + + Err: false, + }, + + "input ignored when schema has a default": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Default: "foo", + Optional: true, + }, + }, + + Input: map[string]string{ + "availability_zone": "bar", + }, + + Result: map[string]interface{}{}, + + Err: false, + }, + + "input ignored when default function returns a value": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + Optional: true, + }, + }, + + Input: map[string]string{ + "availability_zone": "bar", + }, + + Result: map[string]interface{}{}, + + Err: false, + }, + + "input ignored when default function returns an empty string": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Default: "", + Optional: true, + }, + }, + + Input: map[string]string{ + "availability_zone": "bar", + }, + + Result: map[string]interface{}{}, + + Err: false, + }, + + "input used when default function returns nil": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + DefaultFunc: func() (interface{}, error) { + return nil, nil + }, + Required: true, + }, + }, + + Input: map[string]string{ + "availability_zone": "bar", + }, + + Result: map[string]interface{}{ + "availability_zone": "bar", + }, + + Err: false, + }, + + "input not used when optional default function returns nil": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + DefaultFunc: func() (interface{}, error) { + return nil, nil + }, + Optional: true, + }, + }, + + Input: map[string]string{}, + Result: map[string]interface{}{}, + Err: false, + }, + } + + for i, tc := range cases { + if tc.Config == nil { + tc.Config = make(map[string]interface{}) + } + + input := new(terraform.MockUIInput) + input.InputReturnMap = tc.Input + + rc := terraform.NewResourceConfigRaw(tc.Config) + rc.Config = make(map[string]interface{}) + + actual, err := schemaMap(tc.Schema).Input(input, rc) + if err != nil != tc.Err { + t.Fatalf("#%v err: %s", i, err) + } + + if !reflect.DeepEqual(tc.Result, actual.Config) { + t.Fatalf("#%v: bad:\n\ngot: %#v\nexpected: %#v", i, actual.Config, tc.Result) + } + } +} + +func TestSchemaMap_InputDefault(t *testing.T) { + emptyConfig := make(map[string]interface{}) + rc := terraform.NewResourceConfigRaw(emptyConfig) + rc.Config = make(map[string]interface{}) + + input := new(terraform.MockUIInput) + input.InputFn = func(opts *terraform.InputOpts) (string, error) { + t.Fatalf("InputFn should not be called on: %#v", opts) + return "", nil + } + + schema := map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Default: "foo", + Optional: true, + }, + } + actual, err := schemaMap(schema).Input(input, rc) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{} + + if !reflect.DeepEqual(expected, actual.Config) { + t.Fatalf("got: %#v\nexpected: %#v", actual.Config, expected) + } +} + +func TestSchemaMap_InputDeprecated(t *testing.T) { + emptyConfig := make(map[string]interface{}) + rc := terraform.NewResourceConfigRaw(emptyConfig) + rc.Config = make(map[string]interface{}) + + input := new(terraform.MockUIInput) + input.InputFn = func(opts *terraform.InputOpts) (string, error) { + t.Fatalf("InputFn should not be called on: %#v", opts) + return "", nil + } + + schema := map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Deprecated: "long gone", + Optional: true, + }, + } + actual, err := schemaMap(schema).Input(input, rc) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{} + + if !reflect.DeepEqual(expected, actual.Config) { + t.Fatalf("got: %#v\nexpected: %#v", actual.Config, expected) + } +} + +func TestSchemaMap_InternalValidate(t *testing.T) { + cases := map[string]struct { + In map[string]*Schema + Err bool + }{ + "nothing": { + nil, + false, + }, + + "Both optional and required": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Required: true, + }, + }, + true, + }, + + "No optional and no required": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + }, + }, + true, + }, + + "Missing Type": { + map[string]*Schema{ + "foo": &Schema{ + Required: true, + }, + }, + true, + }, + + "Required but computed": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Required: true, + Computed: true, + }, + }, + true, + }, + + "Looks good": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Required: true, + }, + }, + false, + }, + + "Computed but has default": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + Default: "foo", + }, + }, + true, + }, + + "Required but has default": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Required: true, + Default: "foo", + }, + }, + true, + }, + + "List element not set": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + }, + }, + true, + }, + + "List default": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + Default: "foo", + }, + }, + true, + }, + + "List element computed": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeInt, + Computed: true, + }, + }, + }, + true, + }, + + "List element with Set set": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + Set: func(interface{}) int { return 0 }, + Optional: true, + }, + }, + true, + }, + + "Set element with no Set set": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Optional: true, + }, + }, + false, + }, + + "Required but computedWhen": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Required: true, + ComputedWhen: []string{"foo"}, + }, + }, + true, + }, + + "Conflicting attributes cannot be required": { + map[string]*Schema{ + "a": &Schema{ + Type: TypeBool, + Required: true, + }, + "b": &Schema{ + Type: TypeBool, + Optional: true, + ConflictsWith: []string{"a"}, + }, + }, + true, + }, + + "Attribute with conflicts cannot be required": { + map[string]*Schema{ + "b": &Schema{ + Type: TypeBool, + Required: true, + ConflictsWith: []string{"a"}, + }, + }, + true, + }, + + "ConflictsWith cannot be used w/ ComputedWhen": { + map[string]*Schema{ + "a": &Schema{ + Type: TypeBool, + ComputedWhen: []string{"foor"}, + }, + "b": &Schema{ + Type: TypeBool, + Required: true, + ConflictsWith: []string{"a"}, + }, + }, + true, + }, + + "Sub-resource invalid": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "foo": new(Schema), + }, + }, + }, + }, + true, + }, + + "Sub-resource valid": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + }, + }, + false, + }, + + "ValidateFunc on non-primitive": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeSet, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + return + }, + }, + }, + true, + }, + + "computed-only field with validateFunc": { + map[string]*Schema{ + "string": &Schema{ + Type: TypeString, + Computed: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + es = append(es, fmt.Errorf("this is not fine")) + return + }, + }, + }, + true, + }, + + "computed-only field with diffSuppressFunc": { + map[string]*Schema{ + "string": &Schema{ + Type: TypeString, + Computed: true, + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + // Always suppress any diff + return false + }, + }, + }, + true, + }, + + "invalid field name format #1": { + map[string]*Schema{ + "with space": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + true, + }, + + "invalid field name format #2": { + map[string]*Schema{ + "WithCapitals": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + true, + }, + + "invalid field name format of a Deprecated field": { + map[string]*Schema{ + "WithCapitals": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "Use with_underscores instead", + }, + }, + false, + }, + + "invalid field name format of a Removed field": { + map[string]*Schema{ + "WithCapitals": &Schema{ + Type: TypeString, + Optional: true, + Removed: "Use with_underscores instead", + }, + }, + false, + }, + + "ConfigModeBlock with Elem *Resource": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeBlock, + Optional: true, + Elem: &Resource{}, + }, + }, + false, + }, + + "ConfigModeBlock Computed with Elem *Resource": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeBlock, + Computed: true, + Elem: &Resource{}, + }, + }, + true, // ConfigMode of block cannot be used for computed schema + }, + + "ConfigModeBlock with Elem *Schema": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeBlock, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + true, + }, + + "ConfigModeBlock with no Elem": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeString, + ConfigMode: SchemaConfigModeBlock, + Optional: true, + }, + }, + true, + }, + + "ConfigModeBlock inside ConfigModeAttr": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeAttr, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "sub": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeBlock, + Elem: &Resource{}, + }, + }, + }, + }, + }, + true, // ConfigMode of block cannot be used in child of schema with ConfigMode of attribute + }, + + "ConfigModeAuto with *Resource inside ConfigModeAttr": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeAttr, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "sub": &Schema{ + Type: TypeList, + Elem: &Resource{}, + }, + }, + }, + }, + }, + true, // in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + err := schemaMap(tc.In).InternalValidate(nil) + if err != nil != tc.Err { + if tc.Err { + t.Fatalf("%q: Expected error did not occur:\n\n%#v", tn, tc.In) + } + t.Fatalf("%q: Unexpected error occurred: %s\n\n%#v", tn, err, tc.In) + } + }) + } + +} + +func TestSchemaMap_DiffSuppress(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + State *terraform.InstanceState + Config map[string]interface{} + ExpectedDiff *terraform.InstanceDiff + Err bool + }{ + "#0 - Suppress otherwise valid diff by returning true": { + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + // Always suppress any diff + return true + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + ExpectedDiff: nil, + + Err: false, + }, + + "#1 - Don't suppress diff by returning false": { + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + // Always suppress any diff + return false + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + ExpectedDiff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + "Default with suppress makes no diff": { + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Default: "foo", + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + return true + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + ExpectedDiff: nil, + + Err: false, + }, + + "Default with false suppress makes diff": { + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Default: "foo", + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + return false + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + ExpectedDiff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + "Complex structure with set of computed string should mark root set as computed": { + Schema: map[string]*Schema{ + "outer": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "outer_str": &Schema{ + Type: TypeString, + Optional: true, + }, + "inner": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "inner_str": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + return 2 + }, + }, + }, + }, + Set: func(v interface{}) int { + return 1 + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "outer": []interface{}{ + map[string]interface{}{ + "outer_str": "foo", + "inner": []interface{}{ + map[string]interface{}{ + "inner_str": hcl2shim.UnknownVariableValue, + }, + }, + }, + }, + }, + + ExpectedDiff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "outer.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "outer.~1.outer_str": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "outer.~1.inner.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "outer.~1.inner.~2.inner_str": &terraform.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + "Complex structure with complex list of computed string should mark root set as computed": { + Schema: map[string]*Schema{ + "outer": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "outer_str": &Schema{ + Type: TypeString, + Optional: true, + }, + "inner": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "inner_str": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + Set: func(v interface{}) int { + return 1 + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "outer": []interface{}{ + map[string]interface{}{ + "outer_str": "foo", + "inner": []interface{}{ + map[string]interface{}{ + "inner_str": hcl2shim.UnknownVariableValue, + }, + }, + }, + }, + }, + + ExpectedDiff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "outer.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "outer.~1.outer_str": &terraform.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "outer.~1.inner.#": &terraform.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "outer.~1.inner.0.inner_str": &terraform.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + c := terraform.NewResourceConfigRaw(tc.Config) + + d, err := schemaMap(tc.Schema).Diff(tc.State, c, nil, nil, true) + if err != nil != tc.Err { + t.Fatalf("#%q err: %s", tn, err) + } + + if !reflect.DeepEqual(tc.ExpectedDiff, d) { + t.Fatalf("#%q:\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.ExpectedDiff, d) + } + }) + } +} + +func TestSchemaMap_Validate(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + Config map[string]interface{} + Err bool + Errors []error + Warnings []string + }{ + "Good": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + }, + + "Good, because the var is not set and that error will come elsewhere": { + Schema: map[string]*Schema{ + "size": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "size": hcl2shim.UnknownVariableValue, + }, + }, + + "Required field not set": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + }, + }, + + Config: map[string]interface{}{}, + + Err: true, + }, + + "Invalid basic type": { + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "port": "I am invalid", + }, + + Err: true, + }, + + "Invalid complex type": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + }, + }, + + Err: true, + }, + + "Bad type": { + Schema: map[string]*Schema{ + "size": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "size": "nope", + }, + + Err: true, + }, + + "Required but has DefaultFunc": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + }, + }, + + Config: nil, + }, + + "Required but has DefaultFunc return nil": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + DefaultFunc: func() (interface{}, error) { + return nil, nil + }, + }, + }, + + Config: nil, + + Err: true, + }, + + "List with promotion": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "ingress": "5", + }, + + Err: false, + }, + + "List with promotion set as list": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{"5"}, + }, + + Err: false, + }, + + "Optional sub-resource": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{}, + + Err: false, + }, + + "Sub-resource is the wrong type": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{"foo"}, + }, + + Err: true, + }, + + "Not a list nested block": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": "foo", + }, + + Err: true, + Errors: []error{ + fmt.Errorf(`ingress: should be a list`), + }, + }, + + "Not a list primitive": { + Schema: map[string]*Schema{ + "strings": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + + Config: map[string]interface{}{ + "strings": "foo", + }, + + Err: true, + Errors: []error{ + fmt.Errorf(`strings: should be a list`), + }, + }, + + "Unknown list": { + Schema: map[string]*Schema{ + "strings": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + + Config: map[string]interface{}{ + "strings": hcl2shim.UnknownVariableValue, + }, + + Err: false, + }, + + "Unknown + Deprecation": { + Schema: map[string]*Schema{ + "old_news": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "please use 'new_news' instead", + }, + }, + + Config: map[string]interface{}{ + "old_news": hcl2shim.UnknownVariableValue, + }, + + Warnings: []string{ + "\"old_news\": [DEPRECATED] please use 'new_news' instead", + }, + }, + + "Required sub-resource field": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{}, + }, + }, + + Err: true, + }, + + "Good sub-resource": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "from": 80, + }, + }, + }, + + Err: false, + }, + + "Good sub-resource, computed value": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "from": hcl2shim.UnknownVariableValue, + }, + }, + }, + + Err: false, + }, + + "Invalid/unknown field": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + Config: map[string]interface{}{ + "foo": "bar", + }, + + Err: true, + }, + + "Invalid/unknown field with computed value": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + Config: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + + Err: true, + }, + + "Computed field set": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Err: true, + }, + + "Not a set": { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + Config: map[string]interface{}{ + "ports": "foo", + }, + + Err: true, + }, + + "Maps": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": "foo", + }, + + Err: true, + }, + + "Good map: data surrounded by extra slice": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + + "Good map": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + + "Map with type specified as value type": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + Elem: TypeBool, + }, + }, + + Config: map[string]interface{}{ + "user_data": map[string]interface{}{ + "foo": "not_a_bool", + }, + }, + + Err: true, + }, + + "Map with type specified as nested Schema": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + Elem: &Schema{Type: TypeBool}, + }, + }, + + Config: map[string]interface{}{ + "user_data": map[string]interface{}{ + "foo": "not_a_bool", + }, + }, + + Err: true, + }, + + "Bad map: just a slice": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": []interface{}{ + "foo", + }, + }, + + Err: true, + }, + + "Good set: config has slice with single interpolated value": { + Schema: map[string]*Schema{ + "security_groups": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &Schema{Type: TypeString}, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + Config: map[string]interface{}{ + "security_groups": []interface{}{"${var.foo}"}, + }, + + Err: false, + }, + + "Bad set: config has single interpolated value": { + Schema: map[string]*Schema{ + "security_groups": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &Schema{Type: TypeString}, + }, + }, + + Config: map[string]interface{}{ + "security_groups": "${var.foo}", + }, + + Err: true, + }, + + "Bad, subresource should not allow unknown elements": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "port": 80, + "other": "yes", + }, + }, + }, + + Err: true, + }, + + "Bad, subresource should not allow invalid types": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "port": "bad", + }, + }, + }, + + Err: true, + }, + + "Bad, should not allow lists to be assigned to string attributes": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": []interface{}{"foo", "bar", "baz"}, + }, + + Err: true, + }, + + "Bad, should not allow maps to be assigned to string attributes": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": map[string]interface{}{"foo": "bar", "baz": "thing"}, + }, + + Err: true, + }, + + "Deprecated attribute usage generates warning, but not error": { + Schema: map[string]*Schema{ + "old_news": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "please use 'new_news' instead", + }, + }, + + Config: map[string]interface{}{ + "old_news": "extra extra!", + }, + + Err: false, + + Warnings: []string{ + "\"old_news\": [DEPRECATED] please use 'new_news' instead", + }, + }, + + "Deprecated generates no warnings if attr not used": { + Schema: map[string]*Schema{ + "old_news": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "please use 'new_news' instead", + }, + }, + + Err: false, + + Warnings: nil, + }, + + "Removed attribute usage generates error": { + Schema: map[string]*Schema{ + "long_gone": &Schema{ + Type: TypeString, + Optional: true, + Removed: "no longer supported by Cloud API", + }, + }, + + Config: map[string]interface{}{ + "long_gone": "still here!", + }, + + Err: true, + Errors: []error{ + fmt.Errorf("\"long_gone\": [REMOVED] no longer supported by Cloud API"), + }, + }, + + "Removed generates no errors if attr not used": { + Schema: map[string]*Schema{ + "long_gone": &Schema{ + Type: TypeString, + Optional: true, + Removed: "no longer supported by Cloud API", + }, + }, + + Err: false, + }, + + "Conflicting attributes generate error": { + Schema: map[string]*Schema{ + "b": &Schema{ + Type: TypeString, + Optional: true, + }, + "a": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b"}, + }, + }, + + Config: map[string]interface{}{ + "b": "b-val", + "a": "a-val", + }, + + Err: true, + Errors: []error{ + fmt.Errorf("\"a\": conflicts with b"), + }, + }, + + "Conflicting attributes okay when unknown 1": { + Schema: map[string]*Schema{ + "b": &Schema{ + Type: TypeString, + Optional: true, + }, + "a": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b"}, + }, + }, + + Config: map[string]interface{}{ + "b": "b-val", + "a": hcl2shim.UnknownVariableValue, + }, + + Err: false, + }, + + "Conflicting attributes okay when unknown 2": { + Schema: map[string]*Schema{ + "b": &Schema{ + Type: TypeString, + Optional: true, + }, + "a": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b"}, + }, + }, + + Config: map[string]interface{}{ + "b": hcl2shim.UnknownVariableValue, + "a": "a-val", + }, + + Err: false, + }, + + "Conflicting attributes generate error even if one is unknown": { + Schema: map[string]*Schema{ + "b": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"a", "c"}, + }, + "a": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b", "c"}, + }, + "c": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b", "a"}, + }, + }, + + Config: map[string]interface{}{ + "b": hcl2shim.UnknownVariableValue, + "a": "a-val", + "c": "c-val", + }, + + Err: true, + Errors: []error{ + fmt.Errorf("\"a\": conflicts with c"), + fmt.Errorf("\"c\": conflicts with a"), + }, + }, + + "Required attribute & undefined conflicting optional are good": { + Schema: map[string]*Schema{ + "required_att": &Schema{ + Type: TypeString, + Required: true, + }, + "optional_att": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"required_att"}, + }, + }, + + Config: map[string]interface{}{ + "required_att": "required-val", + }, + + Err: false, + }, + + "Required conflicting attribute & defined optional generate error": { + Schema: map[string]*Schema{ + "required_att": &Schema{ + Type: TypeString, + Required: true, + }, + "optional_att": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"required_att"}, + }, + }, + + Config: map[string]interface{}{ + "required_att": "required-val", + "optional_att": "optional-val", + }, + + Err: true, + Errors: []error{ + fmt.Errorf(`"optional_att": conflicts with required_att`), + }, + }, + + "Computed + Optional fields conflicting with each other": { + Schema: map[string]*Schema{ + "foo_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"bar_att"}, + }, + "bar_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"foo_att"}, + }, + }, + + Config: map[string]interface{}{ + "foo_att": "foo-val", + "bar_att": "bar-val", + }, + + Err: true, + Errors: []error{ + fmt.Errorf(`"foo_att": conflicts with bar_att`), + fmt.Errorf(`"bar_att": conflicts with foo_att`), + }, + }, + + "Computed + Optional fields NOT conflicting with each other": { + Schema: map[string]*Schema{ + "foo_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"bar_att"}, + }, + "bar_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"foo_att"}, + }, + }, + + Config: map[string]interface{}{ + "foo_att": "foo-val", + }, + + Err: false, + }, + + "Computed + Optional fields that conflict with none set": { + Schema: map[string]*Schema{ + "foo_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"bar_att"}, + }, + "bar_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"foo_att"}, + }, + }, + + Config: map[string]interface{}{}, + + Err: false, + }, + + "Good with ValidateFunc": { + Schema: map[string]*Schema{ + "validate_me": &Schema{ + Type: TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + return + }, + }, + }, + Config: map[string]interface{}{ + "validate_me": "valid", + }, + Err: false, + }, + + "Bad with ValidateFunc": { + Schema: map[string]*Schema{ + "validate_me": &Schema{ + Type: TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + es = append(es, fmt.Errorf("something is not right here")) + return + }, + }, + }, + Config: map[string]interface{}{ + "validate_me": "invalid", + }, + Err: true, + Errors: []error{ + fmt.Errorf(`something is not right here`), + }, + }, + + "ValidateFunc not called when type does not match": { + Schema: map[string]*Schema{ + "number": &Schema{ + Type: TypeInt, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + t.Fatalf("Should not have gotten validate call") + return + }, + }, + }, + Config: map[string]interface{}{ + "number": "NaN", + }, + Err: true, + }, + + "ValidateFunc gets decoded type": { + Schema: map[string]*Schema{ + "maybe": &Schema{ + Type: TypeBool, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + if _, ok := v.(bool); !ok { + t.Fatalf("Expected bool, got: %#v", v) + } + return + }, + }, + }, + Config: map[string]interface{}{ + "maybe": "true", + }, + }, + + "ValidateFunc is not called with a computed value": { + Schema: map[string]*Schema{ + "validate_me": &Schema{ + Type: TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + es = append(es, fmt.Errorf("something is not right here")) + return + }, + }, + }, + Config: map[string]interface{}{ + "validate_me": hcl2shim.UnknownVariableValue, + }, + + Err: false, + }, + + "special timeouts field": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + Config: map[string]interface{}{ + TimeoutsConfigKey: "bar", + }, + + Err: false, + }, + + "invalid bool field": { + Schema: map[string]*Schema{ + "bool_field": { + Type: TypeBool, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "bool_field": "abcdef", + }, + Err: true, + }, + "invalid integer field": { + Schema: map[string]*Schema{ + "integer_field": { + Type: TypeInt, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "integer_field": "abcdef", + }, + Err: true, + }, + "invalid float field": { + Schema: map[string]*Schema{ + "float_field": { + Type: TypeFloat, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "float_field": "abcdef", + }, + Err: true, + }, + + // Invalid map values + "invalid bool map value": { + Schema: map[string]*Schema{ + "boolMap": &Schema{ + Type: TypeMap, + Elem: TypeBool, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "boolMap": map[string]interface{}{ + "boolField": "notbool", + }, + }, + Err: true, + }, + "invalid int map value": { + Schema: map[string]*Schema{ + "intMap": &Schema{ + Type: TypeMap, + Elem: TypeInt, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "intMap": map[string]interface{}{ + "intField": "notInt", + }, + }, + Err: true, + }, + "invalid float map value": { + Schema: map[string]*Schema{ + "floatMap": &Schema{ + Type: TypeMap, + Elem: TypeFloat, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "floatMap": map[string]interface{}{ + "floatField": "notFloat", + }, + }, + Err: true, + }, + + "map with positive validate function": { + Schema: map[string]*Schema{ + "floatInt": &Schema{ + Type: TypeMap, + Elem: TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + return + }, + }, + }, + Config: map[string]interface{}{ + "floatInt": map[string]interface{}{ + "rightAnswer": "42", + "tooMuch": "43", + }, + }, + Err: false, + }, + "map with negative validate function": { + Schema: map[string]*Schema{ + "floatInt": &Schema{ + Type: TypeMap, + Elem: TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + es = append(es, fmt.Errorf("this is not fine")) + return + }, + }, + }, + Config: map[string]interface{}{ + "floatInt": map[string]interface{}{ + "rightAnswer": "42", + "tooMuch": "43", + }, + }, + Err: true, + }, + + // The Validation function should not see interpolation strings from + // non-computed values. + "set with partially computed list and map": { + Schema: map[string]*Schema{ + "outer": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "list": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeString, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + if strings.HasPrefix(v.(string), "${") { + es = append(es, fmt.Errorf("should not have interpolations")) + } + return + }, + }, + }, + }, + }, + }, + }, + Config: map[string]interface{}{ + "outer": []interface{}{ + map[string]interface{}{ + "list": []interface{}{"A", hcl2shim.UnknownVariableValue, "c"}, + }, + }, + }, + Err: false, + }, + "unexpected nils values": { + Schema: map[string]*Schema{ + "strings": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + "block": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "int": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "strings": []interface{}{"1", nil}, + "block": []interface{}{map[string]interface{}{ + "int": nil, + }, + nil, + }, + }, + Err: true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + c := terraform.NewResourceConfigRaw(tc.Config) + + ws, es := schemaMap(tc.Schema).Validate(c) + if len(es) > 0 != tc.Err { + if len(es) == 0 { + t.Errorf("%q: no errors", tn) + } + + for _, e := range es { + t.Errorf("%q: err: %s", tn, e) + } + + t.FailNow() + } + + if !reflect.DeepEqual(ws, tc.Warnings) { + t.Fatalf("%q: warnings:\n\nexpected: %#v\ngot:%#v", tn, tc.Warnings, ws) + } + + if tc.Errors != nil { + sort.Sort(errorSort(es)) + sort.Sort(errorSort(tc.Errors)) + + if !reflect.DeepEqual(es, tc.Errors) { + t.Fatalf("%q: errors:\n\nexpected: %q\ngot: %q", tn, tc.Errors, es) + } + } + }) + + } +} + +func TestSchemaSet_ValidateMaxItems(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + State *terraform.InstanceState + Config map[string]interface{} + ConfigVariables map[string]string + Diff *terraform.InstanceDiff + Err bool + Errors []error + }{ + "#0": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + MaxItems: 1, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo", "bar"}, + }, + Diff: nil, + Err: true, + Errors: []error{ + fmt.Errorf("aliases: attribute supports 1 item maximum, config has 2 declared"), + }, + }, + "#1": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo", "bar"}, + }, + Diff: nil, + Err: false, + Errors: nil, + }, + "#2": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + MaxItems: 1, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo"}, + }, + Diff: nil, + Err: false, + Errors: nil, + }, + } + + for tn, tc := range cases { + c := terraform.NewResourceConfigRaw(tc.Config) + _, es := schemaMap(tc.Schema).Validate(c) + + if len(es) > 0 != tc.Err { + if len(es) == 0 { + t.Errorf("%q: no errors", tn) + } + + for _, e := range es { + t.Errorf("%q: err: %s", tn, e) + } + + t.FailNow() + } + + if tc.Errors != nil { + if !reflect.DeepEqual(es, tc.Errors) { + t.Fatalf("%q: expected: %q\ngot: %q", tn, tc.Errors, es) + } + } + } +} + +func TestSchemaSet_ValidateMinItems(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + State *terraform.InstanceState + Config map[string]interface{} + ConfigVariables map[string]string + Diff *terraform.InstanceDiff + Err bool + Errors []error + }{ + "#0": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + MinItems: 2, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo", "bar"}, + }, + Diff: nil, + Err: false, + Errors: nil, + }, + "#1": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo", "bar"}, + }, + Diff: nil, + Err: false, + Errors: nil, + }, + "#2": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + MinItems: 2, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo"}, + }, + Diff: nil, + Err: true, + Errors: []error{ + fmt.Errorf("aliases: attribute supports 2 item as a minimum, config has 1 declared"), + }, + }, + } + + for tn, tc := range cases { + c := terraform.NewResourceConfigRaw(tc.Config) + _, es := schemaMap(tc.Schema).Validate(c) + + if len(es) > 0 != tc.Err { + if len(es) == 0 { + t.Errorf("%q: no errors", tn) + } + + for _, e := range es { + t.Errorf("%q: err: %s", tn, e) + } + + t.FailNow() + } + + if tc.Errors != nil { + if !reflect.DeepEqual(es, tc.Errors) { + t.Fatalf("%q: expected: %q\ngot: %q", tn, tc.Errors, es) + } + } + } +} + +// errorSort implements sort.Interface to sort errors by their error message +type errorSort []error + +func (e errorSort) Len() int { return len(e) } +func (e errorSort) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e errorSort) Less(i, j int) bool { + return e[i].Error() < e[j].Error() +} + +func TestSchemaMapDeepCopy(t *testing.T) { + schema := map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + }, + } + source := schemaMap(schema) + dest := source.DeepCopy() + dest["foo"].ForceNew = true + if reflect.DeepEqual(source, dest) { + t.Fatalf("source and dest should not match") + } +} diff --git a/internal/legacy/helper/schema/serialize.go b/legacy/helper/schema/serialize.go similarity index 100% rename from internal/legacy/helper/schema/serialize.go rename to legacy/helper/schema/serialize.go diff --git a/internal/legacy/helper/schema/serialize_test.go b/legacy/helper/schema/serialize_test.go similarity index 100% rename from internal/legacy/helper/schema/serialize_test.go rename to legacy/helper/schema/serialize_test.go diff --git a/legacy/helper/schema/set.go b/legacy/helper/schema/set.go new file mode 100644 index 000000000000..df2693dcbfb6 --- /dev/null +++ b/legacy/helper/schema/set.go @@ -0,0 +1,250 @@ +package schema + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + + "github.com/hashicorp/terraform/legacy/helper/hashcode" +) + +// HashString hashes strings. If you want a Set of strings, this is the +// SchemaSetFunc you want. +func HashString(v interface{}) int { + return hashcode.String(v.(string)) +} + +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + +// HashResource hashes complex structures that are described using +// a *Resource. This is the default set implementation used when a set's +// element type is a full resource. +func HashResource(resource *Resource) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeResourceForHash(&buf, v, resource) + return hashcode.String(buf.String()) + } +} + +// HashSchema hashes values that are described using a *Schema. This is the +// default set implementation used when a set's element type is a single +// schema. +func HashSchema(schema *Schema) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeValueForHash(&buf, v, schema) + return hashcode.String(buf.String()) + } +} + +// Set is a set data structure that is returned for elements of type +// TypeSet. +type Set struct { + F SchemaSetFunc + + m map[string]interface{} + once sync.Once +} + +// NewSet is a convenience method for creating a new set with the given +// items. +func NewSet(f SchemaSetFunc, items []interface{}) *Set { + s := &Set{F: f} + for _, i := range items { + s.Add(i) + } + + return s +} + +// CopySet returns a copy of another set. +func CopySet(otherSet *Set) *Set { + return NewSet(otherSet.F, otherSet.List()) +} + +// Add adds an item to the set if it isn't already in the set. +func (s *Set) Add(item interface{}) { + s.add(item, false) +} + +// Remove removes an item if it's already in the set. Idempotent. +func (s *Set) Remove(item interface{}) { + s.remove(item) +} + +// Contains checks if the set has the given item. +func (s *Set) Contains(item interface{}) bool { + _, ok := s.m[s.hash(item)] + return ok +} + +// Len returns the amount of items in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// List returns the elements of this set in slice format. +// +// The order of the returned elements is deterministic. Given the same +// set, the order of this will always be the same. +func (s *Set) List() []interface{} { + result := make([]interface{}, len(s.m)) + for i, k := range s.listCode() { + result[i] = s.m[k] + } + + return result +} + +// Difference performs a set difference of the two sets, returning +// a new third set that has only the elements unique to this set. +func (s *Set) Difference(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; !ok { + result.m[k] = v + } + } + + return result +} + +// Intersection performs the set intersection of the two sets +// and returns a new third set. +func (s *Set) Intersection(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; ok { + result.m[k] = v + } + } + + return result +} + +// Union performs the set union of the two sets and returns a new third +// set. +func (s *Set) Union(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + result.m[k] = v + } + for k, v := range other.m { + result.m[k] = v + } + + return result +} + +func (s *Set) Equal(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + return reflect.DeepEqual(s.m, other.m) +} + +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + +func (s *Set) GoString() string { + return fmt.Sprintf("*Set(%#v)", s.m) +} + +func (s *Set) init() { + s.m = make(map[string]interface{}) +} + +func (s *Set) add(item interface{}, computed bool) string { + s.once.Do(s.init) + + code := s.hash(item) + if computed { + code = "~" + code + + if isProto5() { + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } + } + + if _, ok := s.m[code]; !ok { + s.m[code] = item + } + + return code +} + +func (s *Set) hash(item interface{}) string { + code := s.F(item) + // Always return a nonnegative hashcode. + if code < 0 { + code = -code + } + return strconv.Itoa(code) +} + +func (s *Set) remove(item interface{}) string { + s.once.Do(s.init) + + code := s.hash(item) + delete(s.m, code) + + return code +} + +func (s *Set) index(item interface{}) int { + return sort.SearchStrings(s.listCode(), s.hash(item)) +} + +func (s *Set) listCode() []string { + // Sort the hash codes so the order of the list is deterministic + keys := make([]string, 0, len(s.m)) + for k := range s.m { + keys = append(keys, k) + } + sort.Sort(sort.StringSlice(keys)) + return keys +} diff --git a/internal/legacy/helper/schema/set_test.go b/legacy/helper/schema/set_test.go similarity index 100% rename from internal/legacy/helper/schema/set_test.go rename to legacy/helper/schema/set_test.go diff --git a/internal/legacy/helper/schema/shims.go b/legacy/helper/schema/shims.go similarity index 95% rename from internal/legacy/helper/schema/shims.go rename to legacy/helper/schema/shims.go index 3f9e2e9ffbb8..64ac729e68a6 100644 --- a/internal/legacy/helper/schema/shims.go +++ b/legacy/helper/schema/shims.go @@ -6,9 +6,9 @@ import ( "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/terraform" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/terraform" ) // DiffFromValues takes the current state and desired state as cty.Values and diff --git a/internal/legacy/helper/schema/shims_test.go b/legacy/helper/schema/shims_test.go similarity index 99% rename from internal/legacy/helper/schema/shims_test.go rename to legacy/helper/schema/shims_test.go index 91e24069ddfa..f2c31a3c17c4 100644 --- a/internal/legacy/helper/schema/shims_test.go +++ b/legacy/helper/schema/shims_test.go @@ -11,12 +11,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/legacy/helper/hashcode" - "github.com/hashicorp/terraform/internal/legacy/terraform" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/legacy/helper/hashcode" + "github.com/hashicorp/terraform/legacy/terraform" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/legacy/helper/schema/testing.go b/legacy/helper/schema/testing.go new file mode 100644 index 000000000000..2e62efd61181 --- /dev/null +++ b/legacy/helper/schema/testing.go @@ -0,0 +1,28 @@ +package schema + +import ( + "testing" + + "github.com/hashicorp/terraform/legacy/terraform" +) + +// TestResourceDataRaw creates a ResourceData from a raw configuration map. +func TestResourceDataRaw( + t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + + c := terraform.NewResourceConfigRaw(raw) + + sm := schemaMap(schema) + diff, err := sm.Diff(nil, c, nil, nil, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + result, err := sm.Data(nil, diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + return result +} diff --git a/internal/legacy/helper/schema/valuetype.go b/legacy/helper/schema/valuetype.go similarity index 100% rename from internal/legacy/helper/schema/valuetype.go rename to legacy/helper/schema/valuetype.go diff --git a/internal/legacy/helper/schema/valuetype_string.go b/legacy/helper/schema/valuetype_string.go similarity index 100% rename from internal/legacy/helper/schema/valuetype_string.go rename to legacy/helper/schema/valuetype_string.go diff --git a/internal/legacy/terraform/context_components.go b/legacy/terraform/context_components.go similarity index 91% rename from internal/legacy/terraform/context_components.go rename to legacy/terraform/context_components.go index 31494efb2faf..c893a16b4823 100644 --- a/internal/legacy/terraform/context_components.go +++ b/legacy/terraform/context_components.go @@ -3,9 +3,9 @@ package terraform import ( "fmt" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/provisioners" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" ) // contextComponentFactory is the interface that Context uses diff --git a/legacy/terraform/diff.go b/legacy/terraform/diff.go new file mode 100644 index 000000000000..4e834204d22c --- /dev/null +++ b/legacy/terraform/diff.go @@ -0,0 +1,1451 @@ +package terraform + +import ( + "bufio" + "bytes" + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/zclconf/go-cty/cty" + + "github.com/mitchellh/copystructure" +) + +// DiffChangeType is an enum with the kind of changes a diff has planned. +type DiffChangeType byte + +const ( + DiffInvalid DiffChangeType = iota + DiffNone + DiffCreate + DiffUpdate + DiffDestroy + DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// Diff tracks the changes that are necessary to apply a configuration +// to an existing infrastructure. +type Diff struct { + // Modules contains all the modules that have a diff + Modules []*ModuleDiff +} + +// Prune cleans out unused structures in the diff without affecting +// the behavior of the diff at all. +// +// This is not safe to call concurrently. This is safe to call on a +// nil Diff. +func (d *Diff) Prune() { + if d == nil { + return + } + + // Prune all empty modules + newModules := make([]*ModuleDiff, 0, len(d.Modules)) + for _, m := range d.Modules { + // If the module isn't empty, we keep it + if !m.Empty() { + newModules = append(newModules, m) + } + } + if len(newModules) == 0 { + newModules = nil + } + d.Modules = newModules +} + +// AddModule adds the module with the given path to the diff. +// +// This should be the preferred method to add module diffs since it +// allows us to optimize lookups later as well as control sorting. +func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + legacyPath := make([]string, len(path)) + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("diff cannot represent modules with count or for_each keys") + } + + legacyPath[i] = step.Name + } + + m := &ModuleDiff{Path: legacyPath} + m.init() + d.Modules = append(d.Modules, m) + return m +} + +// ModuleByPath is used to lookup the module diff for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { + if d == nil { + return nil + } + for _, mod := range d.Modules { + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// RootModule returns the ModuleState for the root module +func (d *Diff) RootModule() *ModuleDiff { + root := d.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Empty returns true if the diff has no changes. +func (d *Diff) Empty() bool { + if d == nil { + return true + } + + for _, m := range d.Modules { + if !m.Empty() { + return false + } + } + + return true +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *Diff) Equal(d2 *Diff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Sort the modules + sort.Sort(moduleDiffSort(d.Modules)) + sort.Sort(moduleDiffSort(d2.Modules)) + + // Copy since we have to modify the module destroy flag to false so + // we don't compare that. TODO: delete this when we get rid of the + // destroy flag on modules. + dCopy := d.DeepCopy() + d2Copy := d2.DeepCopy() + for _, m := range dCopy.Modules { + m.Destroy = false + } + for _, m := range d2Copy.Modules { + m.Destroy = false + } + + // Use DeepEqual + return reflect.DeepEqual(dCopy, d2Copy) +} + +// DeepCopy performs a deep copy of all parts of the Diff, making the +// resulting Diff safe to use without modifying this one. +func (d *Diff) DeepCopy() *Diff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*Diff) +} + +func (d *Diff) String() string { + var buf bytes.Buffer + + keys := make([]string, 0, len(d.Modules)) + lookup := make(map[string]*ModuleDiff) + for _, m := range d.Modules { + addr := normalizeModulePath(m.Path) + key := addr.String() + keys = append(keys, key) + lookup[key] = m + } + sort.Strings(keys) + + for _, key := range keys { + m := lookup[key] + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("%s:\n", key)) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return strings.TrimSpace(buf.String()) +} + +func (d *Diff) init() { + if d.Modules == nil { + rootDiff := &ModuleDiff{Path: rootModulePath} + d.Modules = []*ModuleDiff{rootDiff} + } + for _, m := range d.Modules { + m.init() + } +} + +// ModuleDiff tracks the differences between resources to apply within +// a single module. +type ModuleDiff struct { + Path []string + Resources map[string]*InstanceDiff + Destroy bool // Set only by the destroy plan +} + +func (d *ModuleDiff) init() { + if d.Resources == nil { + d.Resources = make(map[string]*InstanceDiff) + } + for _, r := range d.Resources { + r.init() + } +} + +// ChangeType returns the type of changes that the diff for this +// module includes. +// +// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or +// DiffCreate. If an instance within the module has a DiffDestroyCreate +// then this will register as a DiffCreate for a module. +func (d *ModuleDiff) ChangeType() DiffChangeType { + result := DiffNone + for _, r := range d.Resources { + change := r.ChangeType() + switch change { + case DiffCreate, DiffDestroy: + if result == DiffNone { + result = change + } + case DiffDestroyCreate, DiffUpdate: + result = DiffUpdate + } + } + + return result +} + +// Empty returns true if the diff has no changes within this module. +func (d *ModuleDiff) Empty() bool { + if d.Destroy { + return false + } + + if len(d.Resources) == 0 { + return true + } + + for _, rd := range d.Resources { + if !rd.Empty() { + return false + } + } + + return true +} + +// Instances returns the instance diffs for the id given. This can return +// multiple instance diffs if there are counts within the resource. +func (d *ModuleDiff) Instances(id string) []*InstanceDiff { + var result []*InstanceDiff + for k, diff := range d.Resources { + if k == id || strings.HasPrefix(k, id+".") { + if !diff.Empty() { + result = append(result, diff) + } + } + } + + return result +} + +// IsRoot says whether or not this module diff is for the root module. +func (d *ModuleDiff) IsRoot() bool { + return reflect.DeepEqual(d.Path, rootModulePath) +} + +// String outputs the diff in a long but command-line friendly output +// format that users can read to quickly inspect a diff. +func (d *ModuleDiff) String() string { + var buf bytes.Buffer + + names := make([]string, 0, len(d.Resources)) + for name, _ := range d.Resources { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + rdiff := d.Resources[name] + + crud := "UPDATE" + switch { + case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): + crud = "DESTROY/CREATE" + case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): + crud = "DESTROY" + case rdiff.RequiresNew(): + crud = "CREATE" + } + + extra := "" + if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { + extra = " (deposed only)" + } + + buf.WriteString(fmt.Sprintf( + "%s: %s%s\n", + crud, + name, + extra)) + + keyLen := 0 + rdiffAttrs := rdiff.CopyAttributes() + keys := make([]string, 0, len(rdiffAttrs)) + for key, _ := range rdiffAttrs { + if key == "id" { + continue + } + + keys = append(keys, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(keys) + + for _, attrK := range keys { + attrDiff, _ := rdiff.GetAttribute(attrK) + + v := attrDiff.New + u := attrDiff.Old + if attrDiff.NewComputed { + v = "" + } + + if attrDiff.Sensitive { + u = "" + v = "" + } + + updateMsg := "" + if attrDiff.RequiresNew { + updateMsg = " (forces new resource)" + } else if attrDiff.Sensitive { + updateMsg = " (attribute changed)" + } + + buf.WriteString(fmt.Sprintf( + " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, + v, + updateMsg)) + } + } + + return buf.String() +} + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type DiffAttrType +} + +// Empty returns true if the diff for this attr is neutral +func (d *ResourceAttrDiff) Empty() bool { + return d.Old == d.New && !d.NewComputed && !d.NewRemoved +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type DiffAttrType byte + +const ( + DiffAttrUnknown DiffAttrType = iota + DiffAttrInput + DiffAttrOutput +) + +func (d *InstanceDiff) init() { + if d.Attributes == nil { + d.Attributes = make(map[string]*ResourceAttrDiff) + } +} + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +func (d *InstanceDiff) Copy() (*InstanceDiff, error) { + if d == nil { + return nil, nil + } + + dCopy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + return nil, err + } + + return dCopy.(*InstanceDiff), nil +} + +// ChangeType returns the DiffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() DiffChangeType { + if d.Empty() { + return DiffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return DiffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return DiffDestroy + } + + if d.RequiresNew() { + return DiffCreate + } + + return DiffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +// DeepCopy performs a deep copy of all parts of the InstanceDiff +func (d *InstanceDiff) DeepCopy() *InstanceDiff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*InstanceDiff) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) SetDestroyDeposed(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyDeposed = b +} + +// These methods are properly locked, for use outside other InstanceDiff +// methods but everywhere else within the terraform package. +// TODO refactor the locking scheme +func (d *InstanceDiff) SetTainted(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyTainted = b +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) SetDestroy(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Destroy = b +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Attributes[key] = attr +} + +func (d *InstanceDiff) DelAttribute(key string) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.Attributes, key) +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} +func (d *InstanceDiff) GetAttributesLen() int { + d.mu.Lock() + defer d.mu.Unlock() + + return len(d.Attributes) +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2, _ := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k, _ := range d.Attributes { + checkOld[k] = struct{}{} + } + for k, _ := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k, _ := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2, _ := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2, _ := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2, _ := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr, _ := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} + +// moduleDiffSort implements sort.Interface to sort module diffs by path. +type moduleDiffSort []*ModuleDiff + +func (s moduleDiffSort) Len() int { return len(s) } +func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s moduleDiffSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} diff --git a/legacy/terraform/diff_test.go b/legacy/terraform/diff_test.go new file mode 100644 index 000000000000..e7ee0d818542 --- /dev/null +++ b/legacy/terraform/diff_test.go @@ -0,0 +1,1252 @@ +package terraform + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" +) + +func TestDiffEmpty(t *testing.T) { + var diff *Diff + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff = new(Diff) + if !diff.Empty() { + t.Fatal("should be empty") + } + + mod := diff.AddModule(addrs.RootModuleInstance) + mod.Resources["nodeA"] = &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + }, + } + + if diff.Empty() { + t.Fatal("should not be empty") + } +} + +func TestDiffEmpty_taintedIsNotEmpty(t *testing.T) { + diff := new(Diff) + + mod := diff.AddModule(addrs.RootModuleInstance) + mod.Resources["nodeA"] = &InstanceDiff{ + DestroyTainted: true, + } + + if diff.Empty() { + t.Fatal("should not be empty, since DestroyTainted was set") + } +} + +func TestDiffEqual(t *testing.T) { + cases := map[string]struct { + D1, D2 *Diff + Equal bool + }{ + "nil": { + nil, + new(Diff), + false, + }, + + "empty": { + new(Diff), + new(Diff), + true, + }, + + "different module order": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}}, + &ModuleDiff{Path: []string{"root", "bar"}}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "bar"}}, + &ModuleDiff{Path: []string{"root", "foo"}}, + }, + }, + true, + }, + + "different module diff destroys": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: false}, + }, + }, + true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + actual := tc.D1.Equal(tc.D2) + if actual != tc.Equal { + t.Fatalf("expected: %v\n\n%#v\n\n%#v", tc.Equal, tc.D1, tc.D2) + } + }) + } +} + +func TestDiffPrune(t *testing.T) { + cases := map[string]struct { + D1, D2 *Diff + }{ + "nil": { + nil, + nil, + }, + + "empty": { + new(Diff), + new(Diff), + }, + + "empty module": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}}, + }, + }, + &Diff{}, + }, + + "destroy module": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + tc.D1.Prune() + if !tc.D1.Equal(tc.D2) { + t.Fatalf("bad:\n\n%#v\n\n%#v", tc.D1, tc.D2) + } + }) + } +} + +func TestModuleDiff_ChangeType(t *testing.T) { + cases := []struct { + Diff *ModuleDiff + Result DiffChangeType + }{ + { + &ModuleDiff{}, + DiffNone, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{Destroy: true}, + }, + }, + DiffDestroy, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + }, + }, + DiffUpdate, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + }, + }, + DiffCreate, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Destroy: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + }, + }, + DiffUpdate, + }, + } + + for i, tc := range cases { + actual := tc.Diff.ChangeType() + if actual != tc.Result { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestDiff_DeepCopy(t *testing.T) { + cases := map[string]*Diff{ + "empty": &Diff{}, + + "basic diff": &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{ + Path: []string{"root"}, + Resources: map[string]*InstanceDiff{ + "aws_instance.foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "num": &ResourceAttrDiff{ + Old: "0", + New: "2", + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + dup := tc.DeepCopy() + if !reflect.DeepEqual(dup, tc) { + t.Fatalf("\n%#v\n\n%#v", dup, tc) + } + }) + } +} + +func TestModuleDiff_Empty(t *testing.T) { + diff := new(ModuleDiff) + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff.Resources = map[string]*InstanceDiff{ + "nodeA": &InstanceDiff{}, + } + + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff.Resources["nodeA"].Attributes = map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + } + + if diff.Empty() { + t.Fatal("should not be empty") + } + + diff.Resources["nodeA"].Attributes = nil + diff.Resources["nodeA"].Destroy = true + + if diff.Empty() { + t.Fatal("should not be empty") + } +} + +func TestModuleDiff_String(t *testing.T) { + diff := &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "nodeA": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + "bar": &ResourceAttrDiff{ + Old: "foo", + NewComputed: true, + }, + "longfoo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + RequiresNew: true, + }, + "secretfoo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + Sensitive: true, + }, + }, + }, + }, + } + + actual := strings.TrimSpace(diff.String()) + expected := strings.TrimSpace(moduleDiffStrBasic) + if actual != expected { + t.Fatalf("bad:\n%s", actual) + } +} + +func TestInstanceDiff_ChangeType(t *testing.T) { + cases := []struct { + Diff *InstanceDiff + Result DiffChangeType + }{ + { + &InstanceDiff{}, + DiffNone, + }, + { + &InstanceDiff{Destroy: true}, + DiffDestroy, + }, + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + DiffUpdate, + }, + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffCreate, + }, + { + &InstanceDiff{ + Destroy: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffDestroyCreate, + }, + { + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffDestroyCreate, + }, + } + + for i, tc := range cases { + actual := tc.Diff.ChangeType() + if actual != tc.Result { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestInstanceDiff_Empty(t *testing.T) { + var rd *InstanceDiff + + if !rd.Empty() { + t.Fatal("should be empty") + } + + rd = new(InstanceDiff) + + if !rd.Empty() { + t.Fatal("should be empty") + } + + rd = &InstanceDiff{Destroy: true} + + if rd.Empty() { + t.Fatal("should not be empty") + } + + rd = &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + New: "bar", + }, + }, + } + + if rd.Empty() { + t.Fatal("should not be empty") + } +} + +func TestModuleDiff_Instances(t *testing.T) { + yesDiff := &InstanceDiff{Destroy: true} + noDiff := &InstanceDiff{Destroy: true, DestroyTainted: true} + + cases := []struct { + Diff *ModuleDiff + Id string + Result []*InstanceDiff + }{ + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + }, + }, + + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "foo.0": yesDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + yesDiff, + }, + }, + + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "foo.0": yesDiff, + "foo_bar": noDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + yesDiff, + }, + }, + } + + for i, tc := range cases { + actual := tc.Diff.Instances(tc.Id) + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestInstanceDiff_RequiresNew(t *testing.T) { + rd := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + } + + if rd.RequiresNew() { + t.Fatal("should not require new") + } + + rd.Attributes["foo"].RequiresNew = true + + if !rd.RequiresNew() { + t.Fatal("should require new") + } +} + +func TestInstanceDiff_RequiresNew_nil(t *testing.T) { + var rd *InstanceDiff + + if rd.RequiresNew() { + t.Fatal("should not require new") + } +} + +func TestInstanceDiffSame(t *testing.T) { + cases := []struct { + One, Two *InstanceDiff + Same bool + Reason string + }{ + { + &InstanceDiff{}, + &InstanceDiff{}, + true, + "", + }, + + { + nil, + nil, + true, + "", + }, + + { + &InstanceDiff{Destroy: false}, + &InstanceDiff{Destroy: true}, + false, + "diff: Destroy; old: false, new: true", + }, + + { + &InstanceDiff{Destroy: true}, + &InstanceDiff{Destroy: true}, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "bar": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + false, + "attribute mismatch: bar", + }, + + // Extra attributes + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + "bar": &ResourceAttrDiff{}, + }, + }, + false, + "extra attributes: bar", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{RequiresNew: true}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{RequiresNew: false}, + }, + }, + false, + "diff RequiresNew; old: true, new: false", + }, + + // NewComputed on primitive + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + }, + }, + true, + "", + }, + + // NewComputed on primitive, removed + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // NewComputed on set, removed + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.1": &ResourceAttrDiff{ + Old: "foo", + New: "", + NewRemoved: true, + }, + "foo.2": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{NewComputed: true}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.0": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~35964334.bar": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.87654323.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change RequiresNew by removal, and that's okay + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change Destroy by removal, and that's okay + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + + Destroy: true, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change Destroy by elements + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + + Destroy: true, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "foo.12": &ResourceAttrDiff{ + Old: "4", + New: "12", + RequiresNew: true, + }, + }, + + Destroy: true, + }, + true, + "", + }, + + // Computed sets may not contain all fields in the original diff, and + // because multiple entries for the same set can compute to the same + // hash before the values are computed or interpolated, the overall + // count can change as well. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~35964334.bar": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "2", + }, + "foo.87654323.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + "foo.87654325.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + "foo.87654325.baz": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + // Computed values in maps will fail the "Same" check as well + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "0", + New: "1", + NewComputed: false, + }, + "foo.val": &ResourceAttrDiff{ + Old: "", + New: "something", + }, + }, + }, + true, + "", + }, + + // In a DESTROY/CREATE scenario, the plan diff will be run against the + // state of the old instance, while the apply diff will be run against an + // empty state (because the state is cleared when the destroy runs.) + // For complex attributes, this can result in keys that seem to disappear + // between the two diffs, when in reality everything is working just fine. + // + // Same() needs to take into account this scenario by analyzing NewRemoved + // and treating as "Same" a diff that does indeed have that key removed. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + "somemap.newkey": &ResourceAttrDiff{ + Old: "", + New: "brave new world", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "somemap.newkey": &ResourceAttrDiff{ + Old: "", + New: "brave new world", + }, + }, + }, + true, + "", + }, + + // Another thing that can occur in DESTROY/CREATE scenarios is that list + // values that are going to zero have diffs that show up at plan time but + // are gone at apply time. The NewRemoved handling catches the fields and + // treats them as OK, but it also needs to treat the .# field itself as + // okay to be present in the old diff but not in the new one. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "old", + New: "new", + RequiresNew: true, + }, + "somemap.#": &ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "", + New: "new", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "old", + New: "new", + RequiresNew: true, + }, + "somemap.%": &ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "", + New: "new", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + + // Innner computed set should allow outer change in key + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.~1.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.inner.~2.value": &ResourceAttrDiff{ + Old: "", + New: "${var.bar}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.12.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.inner.42.value": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + true, + "", + }, + + // Innner computed list should allow outer change in key + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.~1.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.inner.0.value": &ResourceAttrDiff{ + Old: "", + New: "${var.bar}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.12.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.inner.0.value": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + true, + "", + }, + + // When removing all collection items, the diff is allowed to contain + // nothing when re-creating the resource. This should be the "Same" + // since we said we were going from 1 to 0. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "foo.bar": &ResourceAttrDiff{ + Old: "baz", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{}, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "foo.0": &ResourceAttrDiff{ + Old: "baz", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{}, + true, + "", + }, + + // Make sure that DestroyTainted diffs pass as well, especially when diff + // two works off of no state. + { + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "foo", + }, + }, + }, + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + true, + "", + }, + // RequiresNew in different attribute + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "foo", + }, + "bar": &ResourceAttrDiff{ + Old: "bar", + New: "baz", + RequiresNew: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "bar": &ResourceAttrDiff{ + Old: "", + New: "baz", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + same, reason := tc.One.Same(tc.Two) + if same != tc.Same { + t.Fatalf("%d: expected same: %t, got %t (%s)\n\n one: %#v\n\ntwo: %#v", + i, tc.Same, same, reason, tc.One, tc.Two) + } + if reason != tc.Reason { + t.Fatalf( + "%d: bad reason\n\nexpected: %#v\n\ngot: %#v", i, tc.Reason, reason) + } + }) + } +} + +const moduleDiffStrBasic = ` +CREATE: nodeA + bar: "foo" => "" + foo: "foo" => "bar" + longfoo: "foo" => "bar" (forces new resource) + secretfoo: "" => "" (attribute changed) +` + +func TestCountFlatmapContainerValues(t *testing.T) { + for i, tc := range []struct { + attrs map[string]string + key string + count string + }{ + { + attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.2.list.#", + count: "1", + }, + { + attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.#", + count: "1", + }, + { + attrs: map[string]string{"set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.#", + count: "1", + }, + { + attrs: map[string]string{"map.#": "3", "map.a": "b", "map.a.#": "0", "map.b": "4"}, + key: "map.#", + count: "2", + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + count := countFlatmapContainerValues(tc.key, tc.attrs) + if count != tc.count { + t.Fatalf("expected %q, got %q", tc.count, count) + } + }) + } +} diff --git a/internal/legacy/terraform/features.go b/legacy/terraform/features.go similarity index 100% rename from internal/legacy/terraform/features.go rename to legacy/terraform/features.go diff --git a/internal/legacy/terraform/instancetype.go b/legacy/terraform/instancetype.go similarity index 100% rename from internal/legacy/terraform/instancetype.go rename to legacy/terraform/instancetype.go diff --git a/internal/legacy/terraform/instancetype_string.go b/legacy/terraform/instancetype_string.go similarity index 100% rename from internal/legacy/terraform/instancetype_string.go rename to legacy/terraform/instancetype_string.go diff --git a/legacy/terraform/provider_mock.go b/legacy/terraform/provider_mock.go new file mode 100644 index 000000000000..9603e4377961 --- /dev/null +++ b/legacy/terraform/provider_mock.go @@ -0,0 +1,363 @@ +package terraform + +import ( + "encoding/json" + "sync" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/providers" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetProviderSchemaResponse for compatibility with old tests + + ValidateProviderConfigCalled bool + ValidateProviderConfigResponse providers.ValidateProviderConfigResponse + ValidateProviderConfigRequest providers.ValidateProviderConfigRequest + ValidateProviderConfigFn func(providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse + + ValidateResourceConfigCalled bool + ValidateResourceConfigTypeName string + ValidateResourceConfigResponse providers.ValidateResourceConfigResponse + ValidateResourceConfigRequest providers.ValidateResourceConfigRequest + ValidateResourceConfigFn func(providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse + + ValidateDataResourceConfigCalled bool + ValidateDataResourceConfigTypeName string + ValidateDataResourceConfigResponse providers.ValidateDataResourceConfigResponse + ValidateDataResourceConfigRequest providers.ValidateDataResourceConfigRequest + ValidateDataResourceConfigFn func(providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureProviderCalled bool + ConfigureProviderResponse providers.ConfigureProviderResponse + ConfigureProviderRequest providers.ConfigureProviderRequest + ConfigureProviderFn func(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + // Legacy return type for existing tests, which will be shimmed into an + // ImportResourceStateResponse if set + ImportStateReturn []*InstanceState + + ReadDataSourceCalled bool + ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + CloseCalled bool + CloseError error +} + +func (p *MockProvider) GetProviderSchema() providers.GetProviderSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetSchemaCalled = true + return p.getSchema() +} + +func (p *MockProvider) getSchema() providers.GetProviderSchemaResponse { + // This version of getSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + + ret := providers.GetProviderSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } + if p.GetSchemaReturn != nil { + ret.Provider.Block = p.GetSchemaReturn.Provider + ret.ProviderMeta.Block = p.GetSchemaReturn.ProviderMeta + for n, s := range p.GetSchemaReturn.DataSources { + ret.DataSources[n] = providers.Schema{ + Block: s, + } + } + for n, s := range p.GetSchemaReturn.ResourceTypes { + ret.ResourceTypes[n] = providers.Schema{ + Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), + Block: s, + } + } + } + + return ret +} + +func (p *MockProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProviderConfigCalled = true + p.ValidateProviderConfigRequest = r + if p.ValidateProviderConfigFn != nil { + return p.ValidateProviderConfigFn(r) + } + return p.ValidateProviderConfigResponse +} + +func (p *MockProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateResourceConfigCalled = true + p.ValidateResourceConfigRequest = r + + if p.ValidateResourceConfigFn != nil { + return p.ValidateResourceConfigFn(r) + } + + return p.ValidateResourceConfigResponse +} + +func (p *MockProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateDataResourceConfigCalled = true + p.ValidateDataResourceConfigRequest = r + + if p.ValidateDataResourceConfigFn != nil { + return p.ValidateDataResourceConfigFn(r) + } + + return p.ValidateDataResourceConfigResponse +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + p.Lock() + defer p.Unlock() + + schemas := p.getSchema() + schema := schemas.ResourceTypes[r.TypeName] + schemaType := schema.Block.ImpliedType() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + resp := p.UpgradeResourceStateResponse + + if resp.UpgradedState == cty.NilVal { + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + } + return resp +} + +func (p *MockProvider) ConfigureProvider(r providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + p.Lock() + defer p.Unlock() + + p.ConfigureProviderCalled = true + p.ConfigureProviderRequest = r + + if p.ConfigureProviderFn != nil { + return p.ConfigureProviderFn(r) + } + + return p.ConfigureProviderResponse +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + resp := p.ReadResourceResponse + if resp.NewState != cty.NilVal { + // make sure the NewState fits the schema + // This isn't always the case for the existing tests + newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(resp.NewState) + if err != nil { + panic(err) + } + resp.NewState = newState + return resp + } + + // just return the same state we received + resp.NewState = r.PriorState + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + p.Lock() + defer p.Unlock() + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + return p.PlanResourceChangeResponse +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + p.Lock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + p.Unlock() + + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + return p.ApplyResourceChangeResponse +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + p.Lock() + defer p.Unlock() + + if p.ImportStateReturn != nil { + for _, is := range p.ImportStateReturn { + if is.Attributes == nil { + is.Attributes = make(map[string]string) + } + is.Attributes["id"] = is.ID + + typeName := is.Ephemeral.Type + // Use the requested type if the resource has no type of it's own. + // We still return the empty type, which will error, but this prevents a panic. + if typeName == "" { + typeName = r.TypeName + } + + schema := p.GetSchemaReturn.ResourceTypes[typeName] + if schema == nil { + panic("no schema found for " + typeName) + } + + private, err := json.Marshal(is.Meta) + if err != nil { + panic(err) + } + + state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) + if err != nil { + panic(err) + } + + state, err = schema.CoerceValue(state) + if err != nil { + panic(err) + } + + p.ImportResourceStateResponse.ImportedResources = append( + p.ImportResourceStateResponse.ImportedResources, + providers.ImportedResource{ + TypeName: is.Ephemeral.Type, + State: state, + Private: private, + }) + } + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + return p.ImportResourceStateResponse +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + return p.ReadDataSourceResponse +} + +func (p *MockProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} diff --git a/legacy/terraform/provisioner_mock.go b/legacy/terraform/provisioner_mock.go new file mode 100644 index 000000000000..2a33235411f8 --- /dev/null +++ b/legacy/terraform/provisioner_mock.go @@ -0,0 +1,104 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/legacy/terraform/resource.go b/legacy/terraform/resource.go new file mode 100644 index 000000000000..6273c8ace081 --- /dev/null +++ b/legacy/terraform/resource.go @@ -0,0 +1,516 @@ +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" +) + +// Resource is a legacy way to identify a particular resource instance. +// +// New code should use addrs.ResourceInstance instead. This is still here +// only for codepaths that haven't been updated yet. +type Resource struct { + // These are all used by the new EvalNode stuff. + Name string + Type string + CountIndex int + + // These aren't really used anymore anywhere, but we keep them around + // since we haven't done a proper cleanup yet. + Id string + Info *InstanceInfo + Config *ResourceConfig + Dependencies []string + Diff *InstanceDiff + Provider ResourceProvider + State *InstanceState + Flags ResourceFlag +} + +// NewResource constructs a legacy Resource object from an +// addrs.ResourceInstance value. +// +// This is provided to shim to old codepaths that haven't been updated away +// from this type yet. Since this old type is not able to represent instances +// that have string keys, this function will panic if given a resource address +// that has a string key. +func NewResource(addr addrs.ResourceInstance) *Resource { + ret := &Resource{ + Name: addr.Resource.Name, + Type: addr.Resource.Type, + } + + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + ret.CountIndex = int(tk) + default: + panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) + } + } + + return ret +} + +// ResourceKind specifies what kind of instance we're working with, whether +// its a primary instance, a tainted instance, or an orphan. +type ResourceFlag byte + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string + + // uniqueExtra is an internal field that can be populated to supply + // extra metadata that is used to identify a unique instance in + // the graph walk. This will be appended to HumanID when uniqueId + // is called. + uniqueExtra string +} + +// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. +// +// InstanceInfo is a legacy type, and uses of it should be gradually replaced +// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as +// appropriate. +// +// The legacy InstanceInfo type cannot represent module instances with instance +// keys, so this function will panic if given such a path. Uses of this type +// should all be removed or replaced before implementing "count" and "for_each" +// arguments on modules in order to avoid such panics. +// +// This legacy type also cannot represent resource instances with string +// instance keys. It will panic if the given key is not either NoKey or an +// IntKey. +func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { + // We need an old-style []string module path for InstanceInfo. + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + panic("NewInstanceInfo cannot convert module instance with key") + } + path[i] = step.Name + } + + // This is a funny old meaning of "id" that is no longer current. It should + // not be used for anything users might see. Note that it does not include + // a representation of the resource mode, and so it's impossible to + // determine from an InstanceInfo alone whether it is a managed or data + // resource that is being referred to. + id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + id = "data." + id + } + if addr.Resource.Key != addrs.NoKey { + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + id = id + fmt.Sprintf(".%d", int(k)) + default: + panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) + } + } + + return &InstanceInfo{ + Id: id, + ModulePath: path, + Type: addr.Resource.Resource.Type, + } +} + +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } + } + + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// CheckSet checks that the given list of configuration keys is +// properly set. If not, errors are returned for each unset key. +// +// This is useful to be called in the Validate method of a ResourceProvider. +func (c *ResourceConfig) CheckSet(keys []string) []error { + var errs []error + + for _, k := range keys { + if !c.IsSet(k) { + errs = append(errs, fmt.Errorf("%s must be set", k)) + } + } + + return errs +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +// IsSet checks if the key in the configuration is set. A key is set if +// it has a value or the value is being computed (is unknown currently). +// +// This function should be used rather than checking the keys of the +// raw configuration itself, since a key may be omitted from the raw +// configuration if it is being computed. +func (c *ResourceConfig) IsSet(k string) bool { + if c == nil { + return false + } + + if c.IsComputed(k) { + return true + } + + if _, ok := c.Get(k); ok { + return true + } + + return false +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/internal/legacy/terraform/resource_address.go b/legacy/terraform/resource_address.go similarity index 99% rename from internal/legacy/terraform/resource_address.go rename to legacy/terraform/resource_address.go index 9ab24f9db56d..5f3ea4f8c340 100644 --- a/internal/legacy/terraform/resource_address.go +++ b/legacy/terraform/resource_address.go @@ -7,8 +7,8 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) // ResourceAddress is a way of identifying an individual resource (or, diff --git a/internal/legacy/terraform/resource_address_test.go b/legacy/terraform/resource_address_test.go similarity index 99% rename from internal/legacy/terraform/resource_address_test.go rename to legacy/terraform/resource_address_test.go index e0e8ed0061be..3bb5f2082d24 100644 --- a/internal/legacy/terraform/resource_address_test.go +++ b/legacy/terraform/resource_address_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) func TestParseResourceAddressInternal(t *testing.T) { diff --git a/internal/legacy/terraform/resource_mode.go b/legacy/terraform/resource_mode.go similarity index 100% rename from internal/legacy/terraform/resource_mode.go rename to legacy/terraform/resource_mode.go diff --git a/internal/legacy/terraform/resource_mode_string.go b/legacy/terraform/resource_mode_string.go similarity index 100% rename from internal/legacy/terraform/resource_mode_string.go rename to legacy/terraform/resource_mode_string.go diff --git a/internal/legacy/terraform/resource_provider.go b/legacy/terraform/resource_provider.go similarity index 100% rename from internal/legacy/terraform/resource_provider.go rename to legacy/terraform/resource_provider.go diff --git a/internal/legacy/terraform/resource_provider_mock.go b/legacy/terraform/resource_provider_mock.go similarity index 100% rename from internal/legacy/terraform/resource_provider_mock.go rename to legacy/terraform/resource_provider_mock.go diff --git a/legacy/terraform/resource_provisioner.go b/legacy/terraform/resource_provisioner.go new file mode 100644 index 000000000000..d5f707880362 --- /dev/null +++ b/legacy/terraform/resource_provisioner.go @@ -0,0 +1,69 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" +) + +// ResourceProvisioner is an interface that must be implemented by any +// resource provisioner: the thing that initializes resources in +// a Terraform configuration. +type ResourceProvisioner interface { + // GetConfigSchema returns the schema for the provisioner type's main + // configuration block. This is called prior to Validate to enable some + // basic structural validation to be performed automatically and to allow + // the configuration to be properly extracted from potentially-ambiguous + // configuration file formats. + GetConfigSchema() (*configschema.Block, error) + + // Validate is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + Validate(*ResourceConfig) ([]string, []error) + + // Apply runs the provisioner on a specific resource and returns an error. + // Instead of a diff, the ResourceConfig is provided since provisioners + // only run after a resource has been newly created. + Apply(UIOutput, *InstanceState, *ResourceConfig) error + + // Stop is called when the provisioner should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error +} + +// ResourceProvisionerCloser is an interface that provisioners that can close +// connections that aren't needed anymore must implement. +type ResourceProvisionerCloser interface { + Close() error +} + +// ResourceProvisionerFactory is a function type that creates a new instance +// of a resource provisioner. +type ResourceProvisionerFactory func() (ResourceProvisioner, error) + +// ProvisionerFactory is a function type that creates a new instance +// of a provisioners.Interface. +type ProvisionerFactory = provisioners.Factory diff --git a/internal/legacy/terraform/resource_provisioner_mock.go b/legacy/terraform/resource_provisioner_mock.go similarity index 96% rename from internal/legacy/terraform/resource_provisioner_mock.go rename to legacy/terraform/resource_provisioner_mock.go index 27c07b7dc6b2..7b88cf733391 100644 --- a/internal/legacy/terraform/resource_provisioner_mock.go +++ b/legacy/terraform/resource_provisioner_mock.go @@ -3,7 +3,7 @@ package terraform import ( "sync" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" ) // MockResourceProvisioner implements ResourceProvisioner but mocks out all the diff --git a/legacy/terraform/resource_test.go b/legacy/terraform/resource_test.go new file mode 100644 index 000000000000..835163c4a2ae --- /dev/null +++ b/legacy/terraform/resource_test.go @@ -0,0 +1,674 @@ +package terraform + +import ( + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/mitchellh/reflectwalk" +) + +func TestResourceConfigGet(t *testing.T) { + fooStringSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + } + fooListSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.List(cty.Number), Optional: true}, + }, + } + + cases := []struct { + Config cty.Value + Schema *configschema.Block + Key string + Value interface{} + }{ + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + Schema: fooStringSchema, + Key: "foo", + Value: "bar", + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + Schema: fooStringSchema, + Key: "foo", + Value: hcl2shim.UnknownVariableValue, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.0", + Value: 1, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.5", + Value: nil, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.-1", + Value: nil, + }, + + // get from map + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.NumberIntVal(1), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key", + Value: 1, + }, + + // get from map with dot in key + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name", + Value: 1, + }, + + // get from map with overlapping key names + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + "key.name.2": cty.NumberIntVal(2), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name.2", + Value: 2, + }, + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + "key.name.foo": cty.NumberIntVal(2), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name", + Value: 1, + }, + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "listkey": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.NumberIntVal(3), + }), + }), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.List(cty.Map(cty.Number)))), Optional: true}, + }, + }, + Key: "mapname.0.listkey.0.key", + Value: 3, + }, + } + + for i, tc := range cases { + rc := NewResourceConfigShimmed(tc.Config, tc.Schema) + + // Test getting a key + t.Run(fmt.Sprintf("get-%d", i), func(t *testing.T) { + v, ok := rc.Get(tc.Key) + if ok && v == nil { + t.Fatal("(nil, true) returned from Get") + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("%d bad: %#v", i, v) + } + }) + + // Test copying and equality + t.Run(fmt.Sprintf("copy-and-equal-%d", i), func(t *testing.T) { + copy := rc.DeepCopy() + if !reflect.DeepEqual(copy, rc) { + t.Fatalf("bad:\n\n%#v\n\n%#v", copy, rc) + } + + if !copy.Equal(rc) { + t.Fatalf("copy != rc:\n\n%#v\n\n%#v", copy, rc) + } + if !rc.Equal(copy) { + t.Fatalf("rc != copy:\n\n%#v\n\n%#v", copy, rc) + } + }) + } +} + +func TestResourceConfigDeepCopy_nil(t *testing.T) { + var nilRc *ResourceConfig + actual := nilRc.DeepCopy() + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceConfigDeepCopy_nilComputed(t *testing.T) { + rc := &ResourceConfig{} + actual := rc.DeepCopy() + if actual.ComputedKeys != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceConfigEqual_nil(t *testing.T) { + var nilRc *ResourceConfig + notNil := NewResourceConfigShimmed(cty.EmptyObjectVal, &configschema.Block{}) + + if nilRc.Equal(notNil) { + t.Fatal("should not be equal") + } + + if notNil.Equal(nilRc) { + t.Fatal("should not be equal") + } +} + +func TestResourceConfigEqual_computedKeyOrder(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }) + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + } + rc := NewResourceConfigShimmed(v, schema) + rc2 := NewResourceConfigShimmed(v, schema) + + // Set the computed keys manually to force ordering to differ + rc.ComputedKeys = []string{"foo", "bar"} + rc2.ComputedKeys = []string{"bar", "foo"} + + if !rc.Equal(rc2) { + t.Fatal("should be equal") + } +} + +func TestUnknownCheckWalker(t *testing.T) { + cases := []struct { + Name string + Input interface{} + Result bool + }{ + { + "primitive", + 42, + false, + }, + + { + "primitive computed", + hcl2shim.UnknownVariableValue, + true, + }, + + { + "list", + []interface{}{"foo", hcl2shim.UnknownVariableValue}, + true, + }, + + { + "nested list", + []interface{}{ + "foo", + []interface{}{hcl2shim.UnknownVariableValue}, + }, + true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + var w unknownCheckWalker + if err := reflectwalk.Walk(tc.Input, &w); err != nil { + t.Fatalf("err: %s", err) + } + + if w.Unknown != tc.Result { + t.Fatalf("bad: %v", w.Unknown) + } + }) + } +} + +func TestNewResourceConfigShimmed(t *testing.T) { + for _, tc := range []struct { + Name string + Val cty.Value + Schema *configschema.Block + Expected *ResourceConfig + }{ + { + Name: "empty object", + Val: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "basic", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{ + "foo": "bar", + }, + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + { + Name: "null string", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "unknown string", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"foo"}, + Raw: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "unknown collections", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.UnknownVal(cty.Map(cty.String)), + "baz": cty.UnknownVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.Map(cty.String), + Required: true, + }, + "baz": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar", "baz"}, + Raw: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "null collections", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.Map(cty.String)), + "baz": cty.NullVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.Map(cty.String), + Required: true, + }, + "baz": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "unknown blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.UnknownVal(cty.Map(cty.String)), + "baz": cty.UnknownVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{}, + Nesting: configschema.NestingList, + }, + "baz": { + Block: configschema.Block{}, + Nesting: configschema.NestingSet, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar", "baz"}, + Raw: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "unknown in nested blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "list": cty.UnknownVal(cty.List(cty.String)), + }), + }), + }), + }), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "baz": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list": {Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.baz.0.list"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "baz": []interface{}{map[string]interface{}{ + "list": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }}, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "baz": []interface{}{map[string]interface{}{ + "list": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }}, + }, + }, + }, + { + Name: "unknown in set", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "val": cty.UnknownVal(cty.String), + }), + }), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "val": { + Type: cty.String, + Optional: true, + }, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.val"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }, + }, + }, + { + Name: "unknown in attribute sets", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "val": cty.UnknownVal(cty.String), + }), + }), + "baz": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "attr": cty.List(cty.String), + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.UnknownVal(cty.List(cty.String)), + }), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": &configschema.Attribute{ + Type: cty.Set(cty.Object(map[string]cty.Type{ + "val": cty.String, + })), + }, + "baz": &configschema.Attribute{ + Type: cty.Set(cty.Object(map[string]cty.Type{ + "obj": cty.Object(map[string]cty.Type{ + "attr": cty.List(cty.String), + }), + })), + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.val", "baz.0.obj.attr", "baz.1.obj"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + "baz": []interface{}{ + map[string]interface{}{ + "obj": map[string]interface{}{ + "attr": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + map[string]interface{}{ + "obj": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + "baz": []interface{}{ + map[string]interface{}{ + "obj": map[string]interface{}{ + "attr": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + map[string]interface{}{ + "obj": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + }, + }, + }, + { + Name: "null blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.Map(cty.String)), + "baz": cty.NullVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{}, + Nesting: configschema.NestingMap, + }, + "baz": { + Block: configschema.Block{}, + Nesting: configschema.NestingSingle, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + } { + t.Run(tc.Name, func(*testing.T) { + cfg := NewResourceConfigShimmed(tc.Val, tc.Schema) + if !tc.Expected.Equal(cfg) { + t.Fatalf("expected:\n%#v\ngot:\n%#v", tc.Expected, cfg) + } + }) + } +} diff --git a/legacy/terraform/schemas.go b/legacy/terraform/schemas.go new file mode 100644 index 000000000000..0044606536bf --- /dev/null +++ b/legacy/terraform/schemas.go @@ -0,0 +1,285 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Schemas is a container for various kinds of schema that Terraform needs +// during processing. +type Schemas struct { + Providers map[addrs.Provider]*ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(provider addrs.Provider) *ProviderSchema { + if ss.Providers == nil { + return nil + } + return ss.Providers[provider] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(provider addrs.Provider) *configschema.Block { + ps := ss.ProviderSchema(provider) + if ps == nil { + return nil + } + return ps.Provider +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(provider addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(provider) + if ps == nil || ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] +} + +// LoadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. +// +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[addrs.Provider]*ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, components) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(fqn addrs.Provider) { + name := fqn.String() + + if _, exists := schemas[fqn]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", name) + provider, err := components.ResourceProvider(fqn) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[fqn] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + provider.Close() + }() + + resp := provider.GetProviderSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[fqn] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + s := &ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: make(map[string]*configschema.Block), + DataSources: make(map[string]*configschema.Block), + + ResourceTypeSchemaVersions: make(map[string]uint64), + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version provider configuration for provider %q", name), + ) + } + + for t, r := range resp.ResourceTypes { + s.ResourceTypes[t] = r.Block + s.ResourceTypeSchemaVersions[t] = uint64(r.Version) + if r.Version < 0 { + diags = diags.Append( + fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, name), + ) + } + } + + for t, d := range resp.DataSources { + s.DataSources[t] = d.Block + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, name), + ) + } + } + + schemas[fqn] = s + + if resp.ProviderMeta.Block != nil { + s.ProviderMeta = resp.ProviderMeta.Block + } + } + + if config != nil { + for _, fqn := range config.ProviderTypes() { + ensure(fqn) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeAddr := range needed { + ensure(typeAddr) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + provisioner, err := components.ResourceProvisioner(name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + if closer, ok := provisioner.(ResourceProvisionerCloser); ok { + closer.Close() + } + }() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + schemas[name] = resp.Provisioner + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, components) + diags = diags.Append(childDiags) + } + } + + return diags +} + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ProviderMeta *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ps.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ps.SchemaForResourceType(addr.Mode, addr.Type) +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/legacy/terraform/state.go b/legacy/terraform/state.go new file mode 100644 index 000000000000..73b916404942 --- /dev/null +++ b/legacy/terraform/state.go @@ -0,0 +1,2254 @@ +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" + "github.com/mitchellh/copystructure" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +const ( + // StateVersion is the current version for our state file + StateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &StateFilter{State: s} + results, err := filter.Filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +// MarshalEqual is similar to Equal but provides a stronger definition of +// "equal", where two states are equal if and only if their serialized form +// is byte-for-byte identical. +// +// This is primarily useful for callers that are trying to save snapshots +// of state to persistent storage, allowing them to detect when a new +// snapshot must be taken. +// +// Note that the serial number and lineage are included in the serialized form, +// so it's the caller's responsibility to properly manage these attributes +// so that this method is only called on two states that have the same +// serial and lineage, unless detecting such differences is desired. +func (s *State) MarshalEqual(other *State) bool { + if s == nil && other == nil { + return true + } else if s == nil || other == nil { + return false + } + + recvBuf := &bytes.Buffer{} + otherBuf := &bytes.Buffer{} + + err := WriteState(s, recvBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + err = WriteState(other, otherBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +// FromFutureTerraform checks if this state was written by a Terraform +// version from the future. +func (s *State) FromFutureTerraform() bool { + s.Lock() + defer s.Unlock() + + // No TF version means it is certainly from the past + if s.TFVersion == "" { + return false + } + + v := version.Must(version.NewVersion(s.TFVersion)) + return tfversion.SemVer.LessThan(v) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = StateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } else { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// Empty returns true if BackendState has no state. +func (s *BackendState) Empty() bool { + return s == nil || s.Type == "" +} + +// Config decodes the type-specific configuration object using the provided +// schema and returns the result as a cty.Value. +// +// An error is returned if the stored configuration does not conform to the +// given schema. +func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { + ty := schema.ImpliedType() + if s == nil { + return cty.NullVal(ty), nil + } + return ctyjson.Unmarshal(s.ConfigRaw, ty) +} + +// SetConfig replaces (in-place) the type-specific configuration object using +// the provided value and associated schema. +// +// An error is returned if the given value does not conform to the implied +// type of the schema. +func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { + ty := schema.ImpliedType() + buf, err := ctyjson.Marshal(val, ty) + if err != nil { + return err + } + s.ConfigRaw = buf + return nil +} + +// ForPlan produces an alternative representation of the reciever that is +// suitable for storing in a plan. The current workspace must additionally +// be provided, to be stored alongside the backend configuration. +// +// The backend configuration schema is required in order to properly +// encode the backend-specific configuration settings. +func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { + if s == nil { + return nil, nil + } + + configVal, err := s.Config(schema) + if err != nil { + return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) + } + return plans.NewBackend(s.Type, configVal, schema, workspaceName) +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) deepcopy() *RemoteState { + r.Lock() + defer r.Unlock() + + confCopy := make(map[string]string, len(r.Config)) + for k, v := range r.Config { + confCopy[k] = v + } + return &RemoteState{ + Type: r.Type, + Config: confCopy, + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +func (r *RemoteState) Equals(other *RemoteState) bool { + r.Lock() + defer r.Unlock() + + if r.Type != other.Type { + return false + } + if len(r.Config) != len(other.Config) { + return false + } + for k, v := range r.Config { + if other.Config[k] != v { + return false + } + } + return true +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +func (s *OutputState) deepcopy() *OutputState { + if s == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(fmt.Errorf("Error copying output value: %s", err)) + } + + return stateCopy.(*OutputState) +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +// IsRoot says whether or not this module diff is for the root module. +func (m *ModuleState) IsRoot() bool { + m.Lock() + defer m.Unlock() + return reflect.DeepEqual(m.Path, rootModulePath) +} + +// IsDescendent returns true if other is a descendent of this module. +func (m *ModuleState) IsDescendent(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + i := len(m.Path) + return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) +} + +// Orphans returns a list of keys of resources that are in the State +// but aren't present in the configuration itself. Hence, these keys +// represent the state of resources that are orphans. +func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { + m.Lock() + defer m.Unlock() + + inConfig := make(map[string]struct{}) + if c != nil { + for _, r := range c.ManagedResources { + inConfig[r.Addr().String()] = struct{}{} + } + for _, r := range c.DataResources { + inConfig[r.Addr().String()] = struct{}{} + } + } + + var result []addrs.ResourceInstance + for k := range m.Resources { + // Since we've not yet updated state to use our new address format, + // we need to do some shimming here. + legacyAddr, err := parseResourceAddressInternal(k) + if err != nil { + // Suggests that the user tampered with the state, since we always + // generate valid internal addresses. + log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) + continue + } + + addr := legacyAddr.AbsResourceInstanceAddr().Resource + compareKey := addr.Resource.String() // compare by resource address, ignoring instance key + if _, exists := inConfig[compareKey]; !exists { + result = append(result, addr) + } + } + return result +} + +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { + if outputs == nil { + // If we got no output map at all then we'll just treat our set of + // configured outputs as empty, since that suggests that they've all + // been removed by removing their containing module. + outputs = make(map[string]*configs.Output) + } + + s.Lock() + defer s.Unlock() + + var ret []addrs.OutputValue + for n := range s.Outputs { + if _, declared := outputs[n]; !declared { + ret = append(ret, addrs.OutputValue{ + Name: n, + }) + } + } + + return ret +} + +// View returns a view with the given resource prefix. +func (m *ModuleState) View(id string) *ModuleState { + if m == nil { + return m + } + + r := m.deepcopy() + for k, _ := range r.Resources { + if id == k || strings.HasPrefix(k, id+".") { + continue + } + + delete(r.Resources, k) + } + + return r +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +func (m *ModuleState) deepcopy() *ModuleState { + if m == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + + return stateCopy.(*ModuleState) +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name, _ := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k, _ := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key, _ := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func ParseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +// ProviderAddr returns the provider address for the receiver, by parsing the +// string representation saved in state. An error can be returned if the +// value in state is corrupt. +func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { + var diags tfdiags.Diagnostics + + str := s.Provider + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + return addrs.AbsProviderConfig{}, diags.Err() + } + + addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags.Err() +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +func (s *ResourceState) deepcopy() *ResourceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*ResourceState) +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + ProviderMeta cty.Value + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +func (e *EphemeralState) DeepCopy() *EphemeralState { + copy, err := copystructure.Config{Lock: true}.Copy(e) + if err != nil { + panic(err) + } + + return copy.(*EphemeralState) +} + +type jsonStateVersionIdentifier struct { + Version int `json:"version"` +} + +// Check if this is a V0 format - the magic bytes at the start of the file +// should be "tfstate" if so. We no longer support upgrading this type of +// state but return an error message explaining to a user how they can +// upgrade via the 0.6.x series. +func testForV0State(buf *bufio.Reader) error { + start, err := buf.Peek(len("tfstate")) + if err != nil { + return fmt.Errorf("Failed to check for magic bytes: %v", err) + } + if string(start) == "tfstate" { + return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + + "format which was used prior to Terraform 0.3. Please upgrade\n" + + "this state file using Terraform 0.6.16 prior to using it with\n" + + "Terraform 0.7.") + } + + return nil +} + +// ErrNoState is returned by ReadState when the io.Reader contains no data +var ErrNoState = errors.New("no state") + +// ReadState reads a state structure out of a reader in the format that +// was written by WriteState. +func ReadState(src io.Reader) (*State, error) { + // check for a nil file specifically, since that produces a platform + // specific error if we try to use it in a bufio.Reader. + if f, ok := src.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + buf := bufio.NewReader(src) + + if _, err := buf.Peek(1); err != nil { + if err == io.EOF { + return nil, ErrNoState + } + return nil, err + } + + if err := testForV0State(buf); err != nil { + return nil, err + } + + // If we are JSON we buffer the whole thing in memory so we can read it twice. + // This is suboptimal, but will work for now. + jsonBytes, err := ioutil.ReadAll(buf) + if err != nil { + return nil, fmt.Errorf("Reading state file failed: %v", err) + } + + versionIdentifier := &jsonStateVersionIdentifier{} + if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { + return nil, fmt.Errorf("Decoding state file version failed: %v", err) + } + + var result *State + switch versionIdentifier.Version { + case 0: + return nil, fmt.Errorf("State version 0 is not supported as JSON.") + case 1: + v1State, err := ReadStateV1(jsonBytes) + if err != nil { + return nil, err + } + + v2State, err := upgradeStateV1ToV2(v1State) + if err != nil { + return nil, err + } + + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + // increment the Serial whenever we upgrade state + v3State.Serial++ + result = v3State + case 2: + v2State, err := ReadStateV2(jsonBytes) + if err != nil { + return nil, err + } + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + v3State.Serial++ + result = v3State + case 3: + v3State, err := ReadStateV3(jsonBytes) + if err != nil { + return nil, err + } + + result = v3State + default: + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), versionIdentifier.Version) + } + + // If we reached this place we must have a result set + if result == nil { + panic("resulting state in load not set, assertion failed") + } + + // Prune the state when read it. Its possible to write unpruned states or + // for a user to make a state unpruned (nil-ing a module state for example). + result.prune() + + // Validate the state file is valid + if err := result.Validate(); err != nil { + return nil, err + } + + return result, nil +} + +func ReadStateV1(jsonBytes []byte) (*stateV1, error) { + v1State := &stateV1{} + if err := json.Unmarshal(jsonBytes, v1State); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + if v1State.Version != 1 { + return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ + "read %d, expected 1", v1State.Version) + } + + return v1State, nil +} + +func ReadStateV2(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + return state, nil +} + +func ReadStateV3(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + // Now we write the state back out to detect any changes in normaliztion. + // If our state is now written out differently, bump the serial number to + // prevent conflicts. + var buf bytes.Buffer + err := WriteState(state, &buf) + if err != nil { + return nil, err + } + + if !bytes.Equal(jsonBytes, buf.Bytes()) { + log.Println("[INFO] state modified during read or write. incrementing serial number") + state.Serial++ + } + + return state, nil +} + +// WriteState writes a state somewhere in a binary format. +func WriteState(d *State, dst io.Writer) error { + // writing a nil state is a noop. + if d == nil { + return nil + } + + // make sure we have no uninitialized fields + d.init() + + // Make sure it is sorted + d.sort() + + // Ensure the version is set + d.Version = StateVersion + + // If the TFVersion is set, verify it. We used to just set the version + // here, but this isn't safe since it changes the MD5 sum on some remote + // state storage backends such as Atlas. We now leave it be if needed. + if d.TFVersion != "" { + if _, err := version.NewVersion(d.TFVersion); err != nil { + return fmt.Errorf( + "Error writing state, invalid version: %s\n\n"+ + "The Terraform version when writing the state must be a semantic\n"+ + "version.", + d.TFVersion) + } + } + + // Encode the data in a human-friendly way + data, err := json.MarshalIndent(d, "", " ") + if err != nil { + return fmt.Errorf("Failed to encode state: %s", err) + } + + // We append a newline to the data because MarshalIndent doesn't + data = append(data, '\n') + + // Write the data out to the dst + if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { + return fmt.Errorf("Failed to write state: %v", err) + } + + return nil +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/internal/legacy/terraform/state_filter.go b/legacy/terraform/state_filter.go similarity index 100% rename from internal/legacy/terraform/state_filter.go rename to legacy/terraform/state_filter.go diff --git a/legacy/terraform/state_test.go b/legacy/terraform/state_test.go new file mode 100644 index 000000000000..beac79705266 --- /dev/null +++ b/legacy/terraform/state_test.go @@ -0,0 +1,1894 @@ +package terraform + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "reflect" + "sort" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/hcl2shim" +) + +func TestStateValidate(t *testing.T) { + cases := map[string]struct { + In *State + Err bool + }{ + "empty state": { + &State{}, + false, + }, + + "multiple modules": { + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root", "foo"}, + }, + &ModuleState{ + Path: []string{"root", "foo"}, + }, + }, + }, + true, + }, + } + + for name, tc := range cases { + // Init the state + tc.In.init() + + err := tc.In.Validate() + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + } +} + +func TestStateAddModule(t *testing.T) { + cases := []struct { + In []addrs.ModuleInstance + Out [][]string + }{ + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("child", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "child"}, + }, + }, + + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("bar", addrs.NoKey), + addrs.RootModuleInstance.Child("foo", addrs.NoKey), + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("bar", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "bar"}, + []string{"root", "foo"}, + []string{"root", "foo", "bar"}, + }, + }, + // Same last element, different middle element + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("bar", addrs.NoKey), // This one should sort after... + addrs.RootModuleInstance.Child("foo", addrs.NoKey), + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("bar", addrs.NoKey).Child("bar", addrs.NoKey), // ...this one. + addrs.RootModuleInstance.Child("bar", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "bar"}, + []string{"root", "foo"}, + []string{"root", "bar", "bar"}, + []string{"root", "foo", "bar"}, + }, + }, + } + + for _, tc := range cases { + s := new(State) + for _, p := range tc.In { + s.AddModule(p) + } + + actual := make([][]string, 0, len(tc.In)) + for _, m := range s.Modules { + actual = append(actual, m.Path) + } + + if !reflect.DeepEqual(actual, tc.Out) { + t.Fatalf("wrong result\ninput: %sgot: %#v\nwant: %#v", spew.Sdump(tc.In), actual, tc.Out) + } + } +} + +func TestStateOutputTypeRoundTrip(t *testing.T) { + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + Outputs: map[string]*OutputState{ + "string_output": &OutputState{ + Value: "String Value", + Type: "string", + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(state, roundTripped) { + t.Logf("expected:\n%#v", state) + t.Fatalf("got:\n%#v", roundTripped) + } +} + +func TestStateDeepCopy(t *testing.T) { + cases := []struct { + State *State + }{ + // Nil + {nil}, + + // Version + { + &State{Version: 5}, + }, + // TFVersion + { + &State{TFVersion: "5"}, + }, + // Modules + { + &State{ + Version: 6, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{}, + }, + }, + }, + }, + }, + }, + }, + // Deposed + // The nil values shouldn't be there if the State was properly init'ed, + // but the Copy should still work anyway. + { + &State{ + Version: 6, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{}, + }, + Deposed: []*InstanceState{ + {ID: "test"}, + nil, + }, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("copy-%d", i), func(t *testing.T) { + actual := tc.State.DeepCopy() + expected := tc.State + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Expected: %#v\nRecevied: %#v\n", expected, actual) + } + }) + } +} + +func TestStateEqual(t *testing.T) { + cases := []struct { + Name string + Result bool + One, Two *State + }{ + // Nils + { + "one nil", + false, + nil, + &State{Version: 2}, + }, + + { + "both nil", + true, + nil, + nil, + }, + + // Different versions + { + "different state versions", + false, + &State{Version: 5}, + &State{Version: 2}, + }, + + // Different modules + { + "different module states", + false, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + &State{}, + }, + + { + "same module states", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + }, + + // Meta differs + { + "differing meta values with primitives", + false, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "schema_version": "1", + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "schema_version": "2", + }, + }, + }, + }, + }, + }, + }, + }, + + // Meta with complex types + { + "same meta with complex types", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": 42, + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": 42, + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + }, + + // Meta with complex types that have been altered during serialization + { + "same meta with complex types that have been json-ified", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": int(42), + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": float64(42), + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + if tc.Two.Equal(tc.One) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + }) + } +} + +func TestStateCompareAges(t *testing.T) { + cases := []struct { + Result StateAgeComparison + Err bool + One, Two *State + }{ + { + StateAgeEqual, false, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "1", + Serial: 2, + }, + }, + { + StateAgeReceiverOlder, false, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "1", + Serial: 3, + }, + }, + { + StateAgeReceiverNewer, false, + &State{ + Lineage: "1", + Serial: 3, + }, + &State{ + Lineage: "1", + Serial: 2, + }, + }, + { + StateAgeEqual, true, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "2", + Serial: 2, + }, + }, + { + StateAgeEqual, true, + &State{ + Lineage: "1", + Serial: 3, + }, + &State{ + Lineage: "2", + Serial: 2, + }, + }, + } + + for i, tc := range cases { + result, err := tc.One.CompareAges(tc.Two) + + if err != nil && !tc.Err { + t.Errorf( + "%d: got error, but want success\n\n%s\n\n%s", + i, tc.One, tc.Two, + ) + continue + } + + if err == nil && tc.Err { + t.Errorf( + "%d: got success, but want error\n\n%s\n\n%s", + i, tc.One, tc.Two, + ) + continue + } + + if result != tc.Result { + t.Errorf( + "%d: got result %d, but want %d\n\n%s\n\n%s", + i, result, tc.Result, tc.One, tc.Two, + ) + continue + } + } +} + +func TestStateSameLineage(t *testing.T) { + cases := []struct { + Result bool + One, Two *State + }{ + { + true, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "1", + }, + }, + { + // Empty lineage is compatible with all + true, + &State{ + Lineage: "", + }, + &State{ + Lineage: "1", + }, + }, + { + // Empty lineage is compatible with all + true, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "", + }, + }, + { + false, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "2", + }, + }, + } + + for i, tc := range cases { + result := tc.One.SameLineage(tc.Two) + + if result != tc.Result { + t.Errorf( + "%d: got %v, but want %v\n\n%s\n\n%s", + i, result, tc.Result, tc.One, tc.Two, + ) + continue + } + } +} + +func TestStateMarshalEqual(t *testing.T) { + tests := map[string]struct { + S1, S2 *State + Want bool + }{ + "both nil": { + nil, + nil, + true, + }, + "first zero, second nil": { + &State{}, + nil, + false, + }, + "first nil, second zero": { + nil, + &State{}, + false, + }, + "both zero": { + // These are not equal because they both implicitly init with + // different lineage. + &State{}, + &State{}, + false, + }, + "both set, same lineage": { + &State{ + Lineage: "abc123", + }, + &State{ + Lineage: "abc123", + }, + true, + }, + "both set, same lineage, different serial": { + &State{ + Lineage: "abc123", + Serial: 1, + }, + &State{ + Lineage: "abc123", + Serial: 2, + }, + false, + }, + "both set, same lineage, same serial, same resources": { + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + true, + }, + "both set, same lineage, same serial, different resources": { + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "pizza_crust.tasty": {}, + }, + }, + }, + }, + false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.S1.MarshalEqual(test.S2) + if got != test.Want { + t.Errorf("wrong result %#v; want %#v", got, test.Want) + s1Buf := &bytes.Buffer{} + s2Buf := &bytes.Buffer{} + _ = WriteState(test.S1, s1Buf) + _ = WriteState(test.S2, s2Buf) + t.Logf("\nState 1: %s\nState 2: %s", s1Buf.Bytes(), s2Buf.Bytes()) + } + }) + } +} + +func TestStateRemove(t *testing.T) { + cases := map[string]struct { + Address string + One, Two *State + }{ + "simple resource": { + "test_instance.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "single instance": { + "test_instance.foo.primary", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "single instance in multi-count": { + "test_instance.foo[0]", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.0": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "single resource, multi-count": { + "test_instance.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.0": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "full module": { + "module.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "module and children": { + "module.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo", "bar"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + } + + for k, tc := range cases { + if err := tc.One.Remove(tc.Address); err != nil { + t.Fatalf("bad: %s\n\n%s", k, err) + } + + if !tc.One.Equal(tc.Two) { + t.Fatalf("Bad: %s\n\n%s\n\n%s", k, tc.One.String(), tc.Two.String()) + } + } +} + +func TestResourceStateEqual(t *testing.T) { + cases := []struct { + Result bool + One, Two *ResourceState + }{ + // Different types + { + false, + &ResourceState{Type: "foo"}, + &ResourceState{Type: "bar"}, + }, + + // Different dependencies + { + false, + &ResourceState{Dependencies: []string{"foo"}}, + &ResourceState{Dependencies: []string{"bar"}}, + }, + + { + false, + &ResourceState{Dependencies: []string{"foo", "bar"}}, + &ResourceState{Dependencies: []string{"foo"}}, + }, + + { + true, + &ResourceState{Dependencies: []string{"bar", "foo"}}, + &ResourceState{Dependencies: []string{"foo", "bar"}}, + }, + + // Different primaries + { + false, + &ResourceState{Primary: nil}, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + }, + + { + true, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + }, + + // Different tainted + { + false, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + + { + true, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + } + + for i, tc := range cases { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + if tc.Two.Equal(tc.One) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + } +} + +func TestResourceStateTaint(t *testing.T) { + cases := map[string]struct { + Input *ResourceState + Output *ResourceState + }{ + "no primary": { + &ResourceState{}, + &ResourceState{}, + }, + + "primary, not tainted": { + &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + + "primary, tainted": { + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + } + + for k, tc := range cases { + tc.Input.Taint() + if !reflect.DeepEqual(tc.Input, tc.Output) { + t.Fatalf( + "Failure: %s\n\nExpected: %#v\n\nGot: %#v", + k, tc.Output, tc.Input) + } + } +} + +func TestResourceStateUntaint(t *testing.T) { + cases := map[string]struct { + Input *ResourceState + ExpectedOutput *ResourceState + }{ + "no primary, err": { + Input: &ResourceState{}, + ExpectedOutput: &ResourceState{}, + }, + + "primary, not tainted": { + Input: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + ExpectedOutput: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + }, + "primary, tainted": { + Input: &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + ExpectedOutput: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + }, + } + + for k, tc := range cases { + tc.Input.Untaint() + if !reflect.DeepEqual(tc.Input, tc.ExpectedOutput) { + t.Fatalf( + "Failure: %s\n\nExpected: %#v\n\nGot: %#v", + k, tc.ExpectedOutput, tc.Input) + } + } +} + +func TestInstanceStateEmpty(t *testing.T) { + cases := map[string]struct { + In *InstanceState + Result bool + }{ + "nil is empty": { + nil, + true, + }, + "non-nil but without ID is empty": { + &InstanceState{}, + true, + }, + "with ID is not empty": { + &InstanceState{ + ID: "i-abc123", + }, + false, + }, + } + + for tn, tc := range cases { + if tc.In.Empty() != tc.Result { + t.Fatalf("%q expected %#v to be empty: %#v", tn, tc.In, tc.Result) + } + } +} + +func TestInstanceStateEqual(t *testing.T) { + cases := []struct { + Result bool + One, Two *InstanceState + }{ + // Nils + { + false, + nil, + &InstanceState{}, + }, + + { + false, + &InstanceState{}, + nil, + }, + + // Different IDs + { + false, + &InstanceState{ID: "foo"}, + &InstanceState{ID: "bar"}, + }, + + // Different Attributes + { + false, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + &InstanceState{Attributes: map[string]string{"foo": "baz"}}, + }, + + // Different Attribute keys + { + false, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + &InstanceState{Attributes: map[string]string{"bar": "baz"}}, + }, + + { + false, + &InstanceState{Attributes: map[string]string{"bar": "baz"}}, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + }, + } + + for i, tc := range cases { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + } +} + +func TestStateEmpty(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + true, + }, + { + &State{}, + true, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + true, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + }, + }, + false, + }, + } + + for i, tc := range cases { + if tc.In.Empty() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestStateHasResources(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + false, + }, + { + &State{}, + false, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + }, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + &ModuleState{}, + }, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + &ModuleState{ + Resources: map[string]*ResourceState{ + "foo.foo": &ResourceState{}, + }, + }, + }, + }, + true, + }, + } + + for i, tc := range cases { + if tc.In.HasResources() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestStateFromFutureTerraform(t *testing.T) { + cases := []struct { + In string + Result bool + }{ + { + "", + false, + }, + { + "0.1", + false, + }, + { + "999.15.1", + true, + }, + } + + for _, tc := range cases { + state := &State{TFVersion: tc.In} + actual := state.FromFutureTerraform() + if actual != tc.Result { + t.Fatalf("%s: bad: %v", tc.In, actual) + } + } +} + +func TestStateIsRemote(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + false, + }, + { + &State{}, + false, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + true, + }, + } + + for i, tc := range cases { + if tc.In.IsRemote() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestInstanceState_MergeDiff(t *testing.T) { + is := InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "bar", + "port": "8000", + }, + } + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + "bar": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "baz": &ResourceAttrDiff{ + Old: "", + New: "foo", + NewComputed: true, + }, + "port": &ResourceAttrDiff{ + NewRemoved: true, + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "foo": "baz", + "bar": "foo", + "baz": hcl2shim.UnknownVariableValue, + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +// GH-12183. This tests that a list with a computed set generates the +// right partial state. This never failed but is put here for completion +// of the test case for GH-12183. +func TestInstanceState_MergeDiff_computedSet(t *testing.T) { + is := InstanceState{} + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "config.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + + "config.0.name": &ResourceAttrDiff{ + Old: "", + New: "hello", + }, + + "config.0.rules.#": &ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "config.#": "1", + "config.0.name": "hello", + "config.0.rules.#": hcl2shim.UnknownVariableValue, + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestInstanceState_MergeDiff_nil(t *testing.T) { + var is *InstanceState + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "foo": "baz", + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestInstanceState_MergeDiff_nilDiff(t *testing.T) { + is := InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "bar", + }, + } + + is2 := is.MergeDiff(nil) + + expected := map[string]string{ + "foo": "bar", + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestReadWriteState(t *testing.T) { + state := &State{ + Serial: 9, + Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", + Remote: &RemoteState{ + Type: "http", + Config: map[string]string{ + "url": "http://my-cool-server.com/", + }, + }, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Dependencies: []string{ + "aws_instance.bar", + }, + Resources: map[string]*ResourceState{ + "foo": &ResourceState{ + Primary: &InstanceState{ + ID: "bar", + Ephemeral: EphemeralState{ + ConnInfo: map[string]string{ + "type": "ssh", + "user": "root", + "password": "supersecret", + }, + }, + }, + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify that the version and serial are set + if state.Version != StateVersion { + t.Fatalf("bad version number: %d", state.Version) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + // ReadState should not restore sensitive information! + mod := state.RootModule() + mod.Resources["foo"].Primary.Ephemeral = EphemeralState{} + mod.Resources["foo"].Primary.Ephemeral.init() + + if !reflect.DeepEqual(actual, state) { + t.Logf("expected:\n%#v", state) + t.Fatalf("got:\n%#v", actual) + } +} + +func TestReadStateNewVersion(t *testing.T) { + type out struct { + Version int + } + + buf, err := json.Marshal(&out{StateVersion + 1}) + if err != nil { + t.Fatalf("err: %v", err) + } + + s, err := ReadState(bytes.NewReader(buf)) + if s != nil { + t.Fatalf("unexpected: %#v", s) + } + if !strings.Contains(err.Error(), "does not support state version") { + t.Fatalf("err: %v", err) + } +} + +func TestReadStateEmptyOrNilFile(t *testing.T) { + var emptyState bytes.Buffer + _, err := ReadState(&emptyState) + if err != ErrNoState { + t.Fatal("expected ErrNostate, got", err) + } + + var nilFile *os.File + _, err = ReadState(nilFile) + if err != ErrNoState { + t.Fatal("expected ErrNostate, got", err) + } +} + +func TestReadStateTFVersion(t *testing.T) { + type tfVersion struct { + Version int `json:"version"` + TFVersion string `json:"terraform_version"` + } + + cases := []struct { + Written string + Read string + Err bool + }{ + { + "0.0.0", + "0.0.0", + false, + }, + { + "", + "", + false, + }, + { + "bad", + "", + true, + }, + } + + for _, tc := range cases { + buf, err := json.Marshal(&tfVersion{ + Version: 2, + TFVersion: tc.Written, + }) + if err != nil { + t.Fatalf("err: %v", err) + } + + s, err := ReadState(bytes.NewReader(buf)) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", tc.Written, err) + } + if err != nil { + continue + } + + if s.TFVersion != tc.Read { + t.Fatalf("%s: bad: %s", tc.Written, s.TFVersion) + } + } +} + +func TestWriteStateTFVersion(t *testing.T) { + cases := []struct { + Write string + Read string + Err bool + }{ + { + "0.0.0", + "0.0.0", + false, + }, + { + "", + "", + false, + }, + { + "bad", + "", + true, + }, + } + + for _, tc := range cases { + var buf bytes.Buffer + err := WriteState(&State{TFVersion: tc.Write}, &buf) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", tc.Write, err) + } + if err != nil { + continue + } + + s, err := ReadState(&buf) + if err != nil { + t.Fatalf("%s: err: %s", tc.Write, err) + } + + if s.TFVersion != tc.Read { + t.Fatalf("%s: bad: %s", tc.Write, s.TFVersion) + } + } +} + +func TestParseResourceStateKey(t *testing.T) { + cases := []struct { + Input string + Expected *ResourceStateKey + ExpectedErr bool + }{ + { + Input: "aws_instance.foo.3", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: 3, + }, + }, + { + Input: "aws_instance.foo.0", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: 0, + }, + }, + { + Input: "aws_instance.foo", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: -1, + }, + }, + { + Input: "data.aws_ami.foo", + Expected: &ResourceStateKey{ + Mode: DataResourceMode, + Type: "aws_ami", + Name: "foo", + Index: -1, + }, + }, + { + Input: "aws_instance.foo.malformed", + ExpectedErr: true, + }, + { + Input: "aws_instance.foo.malformedwithnumber.123", + ExpectedErr: true, + }, + { + Input: "malformed", + ExpectedErr: true, + }, + } + for _, tc := range cases { + rsk, err := ParseResourceStateKey(tc.Input) + if rsk != nil && tc.Expected != nil && !rsk.Equal(tc.Expected) { + t.Fatalf("%s: expected %s, got %s", tc.Input, tc.Expected, rsk) + } + if (err != nil) != tc.ExpectedErr { + t.Fatalf("%s: expected err: %t, got %s", tc.Input, tc.ExpectedErr, err) + } + } +} + +func TestReadState_prune(t *testing.T) { + state := &State{ + Modules: []*ModuleState{ + &ModuleState{Path: rootModulePath}, + nil, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &State{ + Version: state.Version, + Lineage: state.Lineage, + } + expected.init() + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("got:\n%#v", actual) + } +} + +func TestReadState_pruneDependencies(t *testing.T) { + state := &State{ + Serial: 9, + Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", + Remote: &RemoteState{ + Type: "http", + Config: map[string]string{ + "url": "http://my-cool-server.com/", + }, + }, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Dependencies: []string{ + "aws_instance.bar", + "aws_instance.bar", + }, + Resources: map[string]*ResourceState{ + "foo": &ResourceState{ + Dependencies: []string{ + "aws_instance.baz", + "aws_instance.baz", + }, + Primary: &InstanceState{ + ID: "bar", + }, + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + // make sure the duplicate Dependencies are filtered + modDeps := actual.Modules[0].Dependencies + resourceDeps := actual.Modules[0].Resources["foo"].Dependencies + + if len(modDeps) > 1 || modDeps[0] != "aws_instance.bar" { + t.Fatalf("expected 1 module depends_on entry, got %q", modDeps) + } + + if len(resourceDeps) > 1 || resourceDeps[0] != "aws_instance.baz" { + t.Fatalf("expected 1 resource depends_on entry, got %q", resourceDeps) + } +} + +func TestReadState_bigHash(t *testing.T) { + expected := uint64(14885267135666261723) + s := strings.NewReader(`{"version": 3, "backend":{"hash":14885267135666261723}}`) + + actual, err := ReadState(s) + if err != nil { + t.Fatal(err) + } + + if actual.Backend.Hash != expected { + t.Fatalf("expected backend hash %d, got %d", expected, actual.Backend.Hash) + } +} + +func TestResourceNameSort(t *testing.T) { + names := []string{ + "a", + "b", + "a.0", + "a.c", + "a.d", + "c", + "a.b.0", + "a.b.1", + "a.b.10", + "a.b.2", + } + + sort.Sort(resourceNameSort(names)) + + expected := []string{ + "a", + "a.0", + "a.b.0", + "a.b.1", + "a.b.2", + "a.b.10", + "a.c", + "a.d", + "b", + "c", + } + + if !reflect.DeepEqual(names, expected) { + t.Fatalf("got: %q\nexpected: %q\n", names, expected) + } +} diff --git a/internal/legacy/terraform/state_upgrade_v1_to_v2.go b/legacy/terraform/state_upgrade_v1_to_v2.go similarity index 100% rename from internal/legacy/terraform/state_upgrade_v1_to_v2.go rename to legacy/terraform/state_upgrade_v1_to_v2.go diff --git a/internal/legacy/terraform/state_upgrade_v2_to_v3.go b/legacy/terraform/state_upgrade_v2_to_v3.go similarity index 100% rename from internal/legacy/terraform/state_upgrade_v2_to_v3.go rename to legacy/terraform/state_upgrade_v2_to_v3.go diff --git a/internal/legacy/terraform/state_v1.go b/legacy/terraform/state_v1.go similarity index 100% rename from internal/legacy/terraform/state_v1.go rename to legacy/terraform/state_v1.go diff --git a/internal/legacy/terraform/testing.go b/legacy/terraform/testing.go similarity index 100% rename from internal/legacy/terraform/testing.go rename to legacy/terraform/testing.go diff --git a/internal/legacy/terraform/ui_input.go b/legacy/terraform/ui_input.go similarity index 100% rename from internal/legacy/terraform/ui_input.go rename to legacy/terraform/ui_input.go diff --git a/internal/legacy/terraform/ui_input_mock.go b/legacy/terraform/ui_input_mock.go similarity index 100% rename from internal/legacy/terraform/ui_input_mock.go rename to legacy/terraform/ui_input_mock.go diff --git a/internal/legacy/terraform/ui_input_prefix.go b/legacy/terraform/ui_input_prefix.go similarity index 100% rename from internal/legacy/terraform/ui_input_prefix.go rename to legacy/terraform/ui_input_prefix.go diff --git a/internal/legacy/terraform/ui_input_prefix_test.go b/legacy/terraform/ui_input_prefix_test.go similarity index 100% rename from internal/legacy/terraform/ui_input_prefix_test.go rename to legacy/terraform/ui_input_prefix_test.go diff --git a/internal/legacy/terraform/ui_output.go b/legacy/terraform/ui_output.go similarity index 100% rename from internal/legacy/terraform/ui_output.go rename to legacy/terraform/ui_output.go diff --git a/internal/legacy/terraform/ui_output_callback.go b/legacy/terraform/ui_output_callback.go similarity index 100% rename from internal/legacy/terraform/ui_output_callback.go rename to legacy/terraform/ui_output_callback.go diff --git a/internal/legacy/terraform/ui_output_callback_test.go b/legacy/terraform/ui_output_callback_test.go similarity index 100% rename from internal/legacy/terraform/ui_output_callback_test.go rename to legacy/terraform/ui_output_callback_test.go diff --git a/internal/legacy/terraform/ui_output_mock.go b/legacy/terraform/ui_output_mock.go similarity index 100% rename from internal/legacy/terraform/ui_output_mock.go rename to legacy/terraform/ui_output_mock.go diff --git a/internal/legacy/terraform/ui_output_mock_test.go b/legacy/terraform/ui_output_mock_test.go similarity index 100% rename from internal/legacy/terraform/ui_output_mock_test.go rename to legacy/terraform/ui_output_mock_test.go diff --git a/internal/legacy/terraform/upgrade_state_v1_test.go b/legacy/terraform/upgrade_state_v1_test.go similarity index 100% rename from internal/legacy/terraform/upgrade_state_v1_test.go rename to legacy/terraform/upgrade_state_v1_test.go diff --git a/internal/legacy/terraform/upgrade_state_v2_test.go b/legacy/terraform/upgrade_state_v2_test.go similarity index 100% rename from internal/legacy/terraform/upgrade_state_v2_test.go rename to legacy/terraform/upgrade_state_v2_test.go diff --git a/internal/legacy/terraform/util.go b/legacy/terraform/util.go similarity index 100% rename from internal/legacy/terraform/util.go rename to legacy/terraform/util.go diff --git a/internal/legacy/terraform/util_test.go b/legacy/terraform/util_test.go similarity index 100% rename from internal/legacy/terraform/util_test.go rename to legacy/terraform/util_test.go diff --git a/internal/legacy/terraform/version.go b/legacy/terraform/version.go similarity index 100% rename from internal/legacy/terraform/version.go rename to legacy/terraform/version.go diff --git a/legacy/terraform/version_required.go b/legacy/terraform/version_required.go new file mode 100644 index 000000000000..4c9cb34a4133 --- /dev/null +++ b/legacy/terraform/version_required.go @@ -0,0 +1,62 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/configs" + + tfversion "github.com/hashicorp/terraform/version" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { + if config == nil { + return nil + } + + var diags tfdiags.Diagnostics + module := config.Module + + for _, constraint := range module.CoreVersionConstraints { + if !constraint.Required.Check(tfversion.SemVer) { + switch { + case len(config.Path) == 0: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + Subject: constraint.DeclRange.Ptr(), + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + config.Path, config.SourceAddr, tfversion.String(), + ), + Subject: constraint.DeclRange.Ptr(), + }) + } + } + } + + for _, c := range config.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) + } + + return diags +} diff --git a/internal/logging/indent.go b/logging/indent.go similarity index 100% rename from internal/logging/indent.go rename to logging/indent.go diff --git a/internal/logging/indent_test.go b/logging/indent_test.go similarity index 100% rename from internal/logging/indent_test.go rename to logging/indent_test.go diff --git a/internal/logging/logging.go b/logging/logging.go similarity index 100% rename from internal/logging/logging.go rename to logging/logging.go diff --git a/internal/logging/panic.go b/logging/panic.go similarity index 100% rename from internal/logging/panic.go rename to logging/panic.go diff --git a/internal/logging/panic_test.go b/logging/panic_test.go similarity index 100% rename from internal/logging/panic_test.go rename to logging/panic_test.go diff --git a/main.go b/main.go index c807bb5a40da..385e05b7829a 100644 --- a/main.go +++ b/main.go @@ -12,19 +12,19 @@ import ( "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/cliconfig" - "github.com/hashicorp/terraform/internal/command/format" - "github.com/hashicorp/terraform/internal/didyoumean" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/terminal" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/cliconfig" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/didyoumean" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/terminal" "github.com/hashicorp/terraform/version" "github.com/mattn/go-shellwords" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" - backendInit "github.com/hashicorp/terraform/internal/backend/init" + backendInit "github.com/hashicorp/terraform/backend/init" ) const ( diff --git a/internal/modsdir/doc.go b/modsdir/doc.go similarity index 100% rename from internal/modsdir/doc.go rename to modsdir/doc.go diff --git a/internal/modsdir/manifest.go b/modsdir/manifest.go similarity index 99% rename from internal/modsdir/manifest.go rename to modsdir/manifest.go index fbb35919cc79..c72b220889c6 100644 --- a/internal/modsdir/manifest.go +++ b/modsdir/manifest.go @@ -13,7 +13,7 @@ import ( version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // Record represents some metadata about an installed module, as part diff --git a/internal/modsdir/paths.go b/modsdir/paths.go similarity index 100% rename from internal/modsdir/paths.go rename to modsdir/paths.go diff --git a/internal/moduledeps/dependencies.go b/moduledeps/dependencies.go similarity index 93% rename from internal/moduledeps/dependencies.go rename to moduledeps/dependencies.go index 6de7aff0ba59..dd21a0a2555f 100644 --- a/internal/moduledeps/dependencies.go +++ b/moduledeps/dependencies.go @@ -1,8 +1,8 @@ package moduledeps import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/plugin/discovery" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plugin/discovery" ) // Providers describes a set of provider dependencies for a given module. diff --git a/internal/moduledeps/doc.go b/moduledeps/doc.go similarity index 100% rename from internal/moduledeps/doc.go rename to moduledeps/doc.go diff --git a/moduledeps/module.go b/moduledeps/module.go new file mode 100644 index 000000000000..4db9b3f20576 --- /dev/null +++ b/moduledeps/module.go @@ -0,0 +1,199 @@ +package moduledeps + +import ( + "sort" + "strings" + + "github.com/hashicorp/terraform/plugin/discovery" +) + +// Module represents the dependencies of a single module, as well being +// a node in a tree of such structures representing the dependencies of +// an entire configuration. +type Module struct { + Name string + Providers Providers + Children []*Module +} + +// WalkFunc is a callback type for use with Module.WalkTree +type WalkFunc func(path []string, parent *Module, current *Module) error + +// WalkTree calls the given callback once for the receiver and then +// once for each descendent, in an order such that parents are called +// before their children and siblings are called in the order they +// appear in the Children slice. +// +// When calling the callback, parent will be nil for the first call +// for the receiving module, and then set to the direct parent of +// each module for the subsequent calls. +// +// The path given to the callback is valid only until the callback +// returns, after which it will be mutated and reused. Callbacks must +// therefore copy the path slice if they wish to retain it. +// +// If the given callback returns an error, the walk will be aborted at +// that point and that error returned to the caller. +// +// This function is not thread-safe for concurrent modifications of the +// data structure, so it's the caller's responsibility to arrange for that +// should it be needed. +// +// It is safe for a callback to modify the descendents of the "current" +// module, including the ordering of the Children slice itself, but the +// callback MUST NOT modify the parent module. +func (m *Module) WalkTree(cb WalkFunc) error { + return walkModuleTree(make([]string, 0, 1), nil, m, cb) +} + +func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { + path = append(path, current.Name) + err := cb(path, parent, current) + if err != nil { + return err + } + + for _, child := range current.Children { + err := walkModuleTree(path, current, child, cb) + if err != nil { + return err + } + } + return nil +} + +// SortChildren sorts the Children slice into lexicographic order by +// name, in-place. +// +// This is primarily useful prior to calling WalkTree so that the walk +// will proceed in a consistent order. +func (m *Module) SortChildren() { + sort.Sort(sortModules{m.Children}) +} + +// SortDescendents is a convenience wrapper for calling SortChildren on +// the receiver and all of its descendent modules. +func (m *Module) SortDescendents() { + m.WalkTree(func(path []string, parent *Module, current *Module) error { + current.SortChildren() + return nil + }) +} + +type sortModules struct { + modules []*Module +} + +func (s sortModules) Len() int { + return len(s.modules) +} + +func (s sortModules) Less(i, j int) bool { + cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) + return cmp < 0 +} + +func (s sortModules) Swap(i, j int) { + s.modules[i], s.modules[j] = s.modules[j], s.modules[i] +} + +// ProviderRequirements produces a PluginRequirements structure that can +// be used with discovery.PluginMetaSet.ConstrainVersions to identify +// suitable plugins to satisfy the module's provider dependencies. +// +// This method only considers the direct requirements of the receiver. +// Use AllPluginRequirements to flatten the dependencies for the +// entire tree of modules. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) ProviderRequirements() discovery.PluginRequirements { + ret := make(discovery.PluginRequirements) + for pFqn, dep := range m.Providers { + providerType := pFqn.Type + if existing, exists := ret[providerType]; exists { + ret[providerType].Versions = existing.Versions.Append(dep.Constraints) + } else { + ret[providerType] = &discovery.PluginConstraints{ + Versions: dep.Constraints, + } + } + } + return ret +} + +// AllProviderRequirements calls ProviderRequirements for the receiver and all +// of its descendents, and merges the result into a single PluginRequirements +// structure that would satisfy all of the modules together. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) AllProviderRequirements() discovery.PluginRequirements { + var ret discovery.PluginRequirements + m.WalkTree(func(path []string, parent *Module, current *Module) error { + ret = ret.Merge(current.ProviderRequirements()) + return nil + }) + return ret +} + +// Equal returns true if the receiver is the root of an identical tree +// to the other given Module. This is a deep comparison that considers +// the equality of all downstream modules too. +// +// The children are considered to be ordered, so callers may wish to use +// SortDescendents first to normalize the order of the slices of child nodes. +// +// The implementation of this function is not optimized since it is provided +// primarily for use in tests. +func (m *Module) Equal(other *Module) bool { + // take care of nils first + if m == nil && other == nil { + return true + } else if (m == nil && other != nil) || (m != nil && other == nil) { + return false + } + + if m.Name != other.Name { + return false + } + + if len(m.Providers) != len(other.Providers) { + return false + } + if len(m.Children) != len(other.Children) { + return false + } + + // Can't use reflect.DeepEqual on this provider structure because + // the nested Constraints objects contain function pointers that + // never compare as equal. So we'll need to walk it the long way. + for inst, dep := range m.Providers { + if _, exists := other.Providers[inst]; !exists { + return false + } + + if dep.Reason != other.Providers[inst].Reason { + return false + } + + // Constraints are not too easy to compare robustly, so + // we'll just use their string representations as a proxy + // for now. + if dep.Constraints.String() != other.Providers[inst].Constraints.String() { + return false + } + } + + // Above we already checked that we have the same number of children + // in each module, so now we just need to check that they are + // recursively equal. + for i := range m.Children { + if !m.Children[i].Equal(other.Children[i]) { + return false + } + } + + // If we fall out here then they are equal + return true +} diff --git a/moduledeps/module_test.go b/moduledeps/module_test.go new file mode 100644 index 000000000000..d835e526750a --- /dev/null +++ b/moduledeps/module_test.go @@ -0,0 +1,214 @@ +package moduledeps + +import ( + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plugin/discovery" +) + +func TestModuleWalkTree(t *testing.T) { + type walkStep struct { + Path []string + ParentName string + } + + tests := []struct { + Root *Module + WalkOrder []walkStep + }{ + { + &Module{ + Name: "root", + Children: nil, + }, + []walkStep{ + { + Path: []string{"root"}, + ParentName: "", + }, + }, + }, + { + &Module{ + Name: "root", + Children: []*Module{ + { + Name: "child", + }, + }, + }, + []walkStep{ + { + Path: []string{"root"}, + ParentName: "", + }, + { + Path: []string{"root", "child"}, + ParentName: "root", + }, + }, + }, + { + &Module{ + Name: "root", + Children: []*Module{ + { + Name: "child", + Children: []*Module{ + { + Name: "grandchild", + }, + }, + }, + }, + }, + []walkStep{ + { + Path: []string{"root"}, + ParentName: "", + }, + { + Path: []string{"root", "child"}, + ParentName: "root", + }, + { + Path: []string{"root", "child", "grandchild"}, + ParentName: "child", + }, + }, + }, + { + &Module{ + Name: "root", + Children: []*Module{ + { + Name: "child1", + Children: []*Module{ + { + Name: "grandchild1", + }, + }, + }, + { + Name: "child2", + Children: []*Module{ + { + Name: "grandchild2", + }, + }, + }, + }, + }, + []walkStep{ + { + Path: []string{"root"}, + ParentName: "", + }, + { + Path: []string{"root", "child1"}, + ParentName: "root", + }, + { + Path: []string{"root", "child1", "grandchild1"}, + ParentName: "child1", + }, + { + Path: []string{"root", "child2"}, + ParentName: "root", + }, + { + Path: []string{"root", "child2", "grandchild2"}, + ParentName: "child2", + }, + }, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { + wo := test.WalkOrder + test.Root.WalkTree(func(path []string, parent *Module, current *Module) error { + if len(wo) == 0 { + t.Fatalf("ran out of walk steps while expecting one for %#v", path) + } + step := wo[0] + wo = wo[1:] + if got, want := path, step.Path; !reflect.DeepEqual(got, want) { + t.Errorf("wrong path %#v; want %#v", got, want) + } + parentName := "" + if parent != nil { + parentName = parent.Name + } + if got, want := parentName, step.ParentName; got != want { + t.Errorf("wrong parent name %q; want %q", got, want) + } + + if got, want := current.Name, path[len(path)-1]; got != want { + t.Errorf("mismatching current.Name %q and final path element %q", got, want) + } + return nil + }) + }) + } +} + +func TestModuleSortChildren(t *testing.T) { + m := &Module{ + Name: "root", + Children: []*Module{ + { + Name: "apple", + }, + { + Name: "zebra", + }, + { + Name: "xylophone", + }, + { + Name: "pig", + }, + }, + } + + m.SortChildren() + + want := []string{"apple", "pig", "xylophone", "zebra"} + var got []string + for _, c := range m.Children { + got = append(got, c.Name) + } + + if !reflect.DeepEqual(want, got) { + t.Errorf("wrong order %#v; want %#v", want, got) + } +} + +func TestModuleProviderRequirements(t *testing.T) { + m := &Module{ + Name: "root", + Providers: Providers{ + addrs.NewDefaultProvider("foo"): ProviderDependency{ + Constraints: discovery.ConstraintStr(">=1.0.0").MustParse(), + }, + addrs.NewDefaultProvider("baz"): ProviderDependency{ + Constraints: discovery.ConstraintStr(">=3.0.0").MustParse(), + }, + }, + } + + reqd := m.ProviderRequirements() + if len(reqd) != 2 { + t.Errorf("wrong number of elements in %#v; want 2", reqd) + } + if got, want := reqd["foo"].Versions.String(), ">=1.0.0"; got != want { + t.Errorf("wrong combination of versions for 'foo' %q; want %q", got, want) + } + if got, want := reqd["baz"].Versions.String(), ">=3.0.0"; got != want { + t.Errorf("wrong combination of versions for 'baz' %q; want %q", got, want) + } +} diff --git a/internal/moduletest/assertion.go b/moduletest/assertion.go similarity index 97% rename from internal/moduletest/assertion.go rename to moduletest/assertion.go index 1bacbfac92b4..007772ed6cec 100644 --- a/internal/moduletest/assertion.go +++ b/moduletest/assertion.go @@ -1,7 +1,7 @@ package moduletest import ( - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) // Assertion is the description of a single test assertion, whether diff --git a/internal/moduletest/doc.go b/moduletest/doc.go similarity index 100% rename from internal/moduletest/doc.go rename to moduletest/doc.go diff --git a/moduletest/provider.go b/moduletest/provider.go new file mode 100644 index 000000000000..bd40893fd642 --- /dev/null +++ b/moduletest/provider.go @@ -0,0 +1,575 @@ +package moduletest + +import ( + "fmt" + "log" + "sync" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/tfdiags" +) + +// Provider is an implementation of providers.Interface which we're +// using as a likely-only-temporary vehicle for research on an opinionated +// module testing workflow in Terraform. +// +// We expose this to configuration as "terraform.io/builtin/test", but +// any attempt to configure it will emit a warning that it is experimental +// and likely to change or be removed entirely in future Terraform CLI +// releases. +// +// The testing provider exists to gather up test results during a Terraform +// apply operation. Its "test_results" managed resource type doesn't have any +// user-visible effect on its own, but when used in conjunction with the +// "terraform test" experimental command it is the intermediary that holds +// the test results while the test runs, so that the test command can then +// report them. +// +// For correct behavior of the assertion tracking, the "terraform test" +// command must be sure to use the same instance of Provider for both the +// plan and apply steps, so that the assertions that were planned can still +// be tracked during apply. For other commands that don't explicitly support +// test assertions, the provider will still succeed but the assertions data +// may not be complete if the apply step fails. +type Provider struct { + // components tracks all of the "component" names that have been + // used in test assertions resources so far. Each resource must have + // a unique component name. + components map[string]*Component + + // Must lock mutex in order to interact with the components map, because + // test assertions can potentially run concurrently. + mutex sync.RWMutex +} + +var _ providers.Interface = (*Provider)(nil) + +// NewProvider returns a new instance of the test provider. +func NewProvider() *Provider { + return &Provider{ + components: make(map[string]*Component), + } +} + +// TestResults returns the current record of test results tracked inside the +// provider. +// +// The result is a direct reference to the internal state of the provider, +// so the caller mustn't modify it nor store it across calls to provider +// operations. +func (p *Provider) TestResults() map[string]*Component { + return p.components +} + +// Reset returns the recieving provider back to its original state, with no +// recorded test results. +// +// It additionally detaches the instance from any data structure previously +// returned by method TestResults, freeing the caller from the constraints +// in its documentation about mutability and storage. +// +// For convenience in the presumed common case of resetting as part of +// capturing the results for storage, this method also returns the result +// that method TestResults would've returned if called prior to the call +// to Reset. +func (p *Provider) Reset() map[string]*Component { + p.mutex.Lock() + log.Print("[TRACE] moduletest.Provider: Reset") + ret := p.components + p.components = make(map[string]*Component) + p.mutex.Unlock() + return ret +} + +// GetProviderSchema returns the complete schema for the provider. +func (p *Provider) GetProviderSchema() providers.GetProviderSchemaResponse { + return providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_assertions": testAssertionsSchema, + }, + } +} + +// ValidateProviderConfig validates the provider configuration. +func (p *Provider) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { + // This provider has no configurable settings, so nothing to validate. + var res providers.ValidateProviderConfigResponse + return res +} + +// ConfigureProvider configures and initializes the provider. +func (p *Provider) ConfigureProvider(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + // This provider has no configurable settings, but we use the configure + // request as an opportunity to generate a warning about it being + // experimental. + var res providers.ConfigureProviderResponse + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Warning, + "The test provider is experimental", + "The Terraform team is using the test provider (terraform.io/builtin/test) as part of ongoing research about declarative testing of Terraform modules.\n\nThe availability and behavior of this provider is expected to change significantly even in patch releases, so we recommend using this provider only in test configurations and constraining your test configurations to an exact Terraform version.", + nil, + )) + return res +} + +// ValidateResourceConfig is used to validate configuration values for a resource. +func (p *Provider) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + log.Print("[TRACE] moduletest.Provider: ValidateResourceConfig") + + var res providers.ValidateResourceConfigResponse + if req.TypeName != "test_assertions" { // we only have one resource type + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) + return res + } + + config := req.Config + if !config.GetAttr("component").IsKnown() { + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid component expression", + "The component name must be a static value given in the configuration, and may not be derived from a resource type attribute that will only be known during the apply step.", + cty.GetAttrPath("component"), + )) + } + if !hclsyntax.ValidIdentifier(config.GetAttr("component").AsString()) { + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid component name", + "The component name must be a valid identifier, starting with a letter followed by zero or more letters, digits, and underscores.", + cty.GetAttrPath("component"), + )) + } + for it := config.GetAttr("equal").ElementIterator(); it.Next(); { + k, obj := it.Element() + if !hclsyntax.ValidIdentifier(k.AsString()) { + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid assertion name", + "An assertion name must be a valid identifier, starting with a letter followed by zero or more letters, digits, and underscores.", + cty.GetAttrPath("equal").Index(k), + )) + } + if !obj.GetAttr("description").IsKnown() { + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid description expression", + "The description must be a static value given in the configuration, and may not be derived from a resource type attribute that will only be known during the apply step.", + cty.GetAttrPath("equal").Index(k).GetAttr("description"), + )) + } + } + for it := config.GetAttr("check").ElementIterator(); it.Next(); { + k, obj := it.Element() + if !hclsyntax.ValidIdentifier(k.AsString()) { + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid assertion name", + "An assertion name must be a valid identifier, starting with a letter followed by zero or more letters, digits, and underscores.", + cty.GetAttrPath("check").Index(k), + )) + } + if !obj.GetAttr("description").IsKnown() { + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid description expression", + "The description must be a static value given in the configuration, and may not be derived from a resource type attribute that will only be known during the apply step.", + cty.GetAttrPath("equal").Index(k).GetAttr("description"), + )) + } + } + + return res +} + +// ReadResource refreshes a resource and returns its current state. +func (p *Provider) ReadResource(req providers.ReadResourceRequest) providers.ReadResourceResponse { + log.Print("[TRACE] moduletest.Provider: ReadResource") + + var res providers.ReadResourceResponse + if req.TypeName != "test_assertions" { // we only have one resource type + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) + return res + } + // Test assertions are not a real remote object, so there isn't actually + // anything to refresh here. + res.NewState = req.PriorState + return res +} + +// UpgradeResourceState is called to allow the provider to adapt the raw value +// stored in the state in case the schema has changed since it was originally +// written. +func (p *Provider) UpgradeResourceState(req providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + log.Print("[TRACE] moduletest.Provider: UpgradeResourceState") + + var res providers.UpgradeResourceStateResponse + if req.TypeName != "test_assertions" { // we only have one resource type + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) + return res + } + + // We assume here that there can never be a flatmap version of this + // resource type's data, because this provider was never included in a + // version of Terraform that used flatmap and this provider's schema + // contains attributes that are not flatmap-compatible anyway. + if len(req.RawStateFlatmap) != 0 { + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("can't upgrade a flatmap state for %q", req.TypeName)) + return res + } + if req.Version != 0 { + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("the state for this %s was created by a newer version of the provider", req.TypeName)) + return res + } + + v, err := ctyjson.Unmarshal(req.RawStateJSON, testAssertionsSchema.Block.ImpliedType()) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("failed to decode state for %s: %s", req.TypeName, err)) + return res + } + + res.UpgradedState = v + return res +} + +// PlanResourceChange takes the current state and proposed state of a +// resource, and returns the planned final state. +func (p *Provider) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + log.Print("[TRACE] moduletest.Provider: PlanResourceChange") + + // this is a destroy plan, + if req.ProposedNewState.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + var res providers.PlanResourceChangeResponse + if req.TypeName != "test_assertions" { // we only have one resource type + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) + return res + } + + // During planning, our job is to gather up all of the planned test + // assertions marked as pending, which will then allow us to include + // all of them in test results even if there's a failure during apply + // that prevents the full completion of the graph walk. + // + // In a sense our plan phase is similar to the compile step for a + // test program written in another language. Planning itself can fail, + // which means we won't be able to form a complete test plan at all, + // but if we succeed in planning then subsequent problems can be treated + // as test failures at "runtime", while still keeping a full manifest + // of all of the tests that ought to have run if the apply had run to + // completion. + + proposed := req.ProposedNewState + res.PlannedState = proposed + componentName := proposed.GetAttr("component").AsString() // proven known during validate + p.mutex.Lock() + defer p.mutex.Unlock() + // NOTE: Ideally we'd do something here to verify if two assertions + // resources in the configuration attempt to declare the same component, + // but we can't actually do that because Terraform calls PlanResourceChange + // during both plan and apply, and so the second one would always fail. + // Since this is just providing a temporary pseudo-syntax for writing tests + // anyway, we'll live with this for now and aim to solve it with a future + // iteration of testing that's better integrated into the Terraform + // language. + /* + if _, exists := p.components[componentName]; exists { + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Duplicate test component", + fmt.Sprintf("Another test_assertions resource already declared assertions for the component name %q.", componentName), + cty.GetAttrPath("component"), + )) + return res + } + */ + + component := Component{ + Assertions: make(map[string]*Assertion), + } + + for it := proposed.GetAttr("equal").ElementIterator(); it.Next(); { + k, obj := it.Element() + name := k.AsString() + if _, exists := component.Assertions[name]; exists { + // We can't actually get here in practice because so far we've + // only been pulling keys from one map, and so any duplicates + // would've been caught during config decoding, but this is here + // just to make these two blocks symmetrical to avoid mishaps in + // future refactoring/reorganization. + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Duplicate test assertion", + fmt.Sprintf("Another assertion block in this resource already declared an assertion named %q.", name), + cty.GetAttrPath("equal").Index(k), + )) + continue + } + + var desc string + descVal := obj.GetAttr("description") + if descVal.IsNull() { + descVal = cty.StringVal("") + } + err := gocty.FromCtyValue(descVal, &desc) + if err != nil { + // We shouldn't get here because we've already validated everything + // that would make FromCtyValue fail above and during validate. + res.Diagnostics = res.Diagnostics.Append(err) + } + + component.Assertions[name] = &Assertion{ + Outcome: Pending, + Description: desc, + } + } + + for it := proposed.GetAttr("check").ElementIterator(); it.Next(); { + k, obj := it.Element() + name := k.AsString() + if _, exists := component.Assertions[name]; exists { + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Duplicate test assertion", + fmt.Sprintf("Another assertion block in this resource already declared an assertion named %q.", name), + cty.GetAttrPath("check").Index(k), + )) + continue + } + + var desc string + descVal := obj.GetAttr("description") + if descVal.IsNull() { + descVal = cty.StringVal("") + } + err := gocty.FromCtyValue(descVal, &desc) + if err != nil { + // We shouldn't get here because we've already validated everything + // that would make FromCtyValue fail above and during validate. + res.Diagnostics = res.Diagnostics.Append(err) + } + + component.Assertions[name] = &Assertion{ + Outcome: Pending, + Description: desc, + } + } + + p.components[componentName] = &component + return res +} + +// ApplyResourceChange takes the planned state for a resource, which may +// yet contain unknown computed values, and applies the changes returning +// the final state. +func (p *Provider) ApplyResourceChange(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + log.Print("[TRACE] moduletest.Provider: ApplyResourceChange") + + var res providers.ApplyResourceChangeResponse + if req.TypeName != "test_assertions" { // we only have one resource type + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported resource type %s", req.TypeName)) + return res + } + + // During apply we actually check the assertions and record the results. + // An assertion failure isn't reflected as an error from the apply call + // because if possible we'd like to continue exercising other objects + // downstream in case that allows us to gather more information to report. + // (If something downstream returns an error then that could prevent us + // from completing other assertions, though.) + + planned := req.PlannedState + res.NewState = planned + if res.NewState.IsNull() { + // If we're destroying then we'll just quickly return success to + // allow the test process to clean up after itself. + return res + } + componentName := planned.GetAttr("component").AsString() // proven known during validate + + p.mutex.Lock() + defer p.mutex.Unlock() + component := p.components[componentName] + if component == nil { + // We might get here when using this provider outside of the + // "terraform test" command, where there won't be any mechanism to + // preserve the test provider instance between the plan and apply + // phases. In that case, we assume that nobody will come looking to + // collect the results anyway, and so we can just silently skip + // checking. + return res + } + + for it := planned.GetAttr("equal").ElementIterator(); it.Next(); { + k, obj := it.Element() + name := k.AsString() + var desc string + if plan, exists := component.Assertions[name]; exists { + desc = plan.Description + } + assert := &Assertion{ + Outcome: Pending, + Description: desc, + } + + gotVal := obj.GetAttr("got") + wantVal := obj.GetAttr("want") + switch { + case wantVal.RawEquals(gotVal): + assert.Outcome = Passed + gotStr := repl.FormatValue(gotVal, 4) + assert.Message = fmt.Sprintf("correct value\n got: %s\n", gotStr) + default: + assert.Outcome = Failed + gotStr := repl.FormatValue(gotVal, 4) + wantStr := repl.FormatValue(wantVal, 4) + assert.Message = fmt.Sprintf("wrong value\n got: %s\n want: %s\n", gotStr, wantStr) + } + + component.Assertions[name] = assert + } + + for it := planned.GetAttr("check").ElementIterator(); it.Next(); { + k, obj := it.Element() + name := k.AsString() + var desc string + if plan, exists := component.Assertions[name]; exists { + desc = plan.Description + } + assert := &Assertion{ + Outcome: Pending, + Description: desc, + } + + condVal := obj.GetAttr("condition") + switch { + case condVal.IsNull(): + res.Diagnostics = res.Diagnostics.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid check condition", + "The condition value must be a boolean expression, not null.", + cty.GetAttrPath("check").Index(k).GetAttr("condition"), + )) + continue + case condVal.True(): + assert.Outcome = Passed + assert.Message = "condition passed" + default: + assert.Outcome = Failed + // For "check" we can't really return a decent error message + // because we've lost all of the context by the time we get here. + // "equal" will be better for most tests for that reason, and also + // this is one reason why in the long run it would be better for + // test assertions to be a first-class language feature rather than + // just a provider-based concept. + assert.Message = "condition failed" + } + + component.Assertions[name] = assert + } + + return res +} + +// ImportResourceState requests that the given resource be imported. +func (p *Provider) ImportResourceState(req providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + var res providers.ImportResourceStateResponse + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("%s is not importable", req.TypeName)) + return res +} + +// ValidateDataResourceConfig is used to to validate the resource configuration values. +func (p *Provider) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { + // This provider has no data resouce types at all. + var res providers.ValidateDataResourceConfigResponse + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported data source %s", req.TypeName)) + return res +} + +// ReadDataSource returns the data source's current state. +func (p *Provider) ReadDataSource(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + // This provider has no data resouce types at all. + var res providers.ReadDataSourceResponse + res.Diagnostics = res.Diagnostics.Append(fmt.Errorf("unsupported data source %s", req.TypeName)) + return res +} + +// Stop is called when the provider should halt any in-flight actions. +func (p *Provider) Stop() error { + // This provider doesn't do anything that can be cancelled. + return nil +} + +// Close is a noop for this provider, since it's run in-process. +func (p *Provider) Close() error { + return nil +} + +var testAssertionsSchema = providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "component": { + Type: cty.String, + Description: "The name of the component being tested. This is just for namespacing assertions in a result report.", + DescriptionKind: configschema.StringPlain, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "equal": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "description": { + Type: cty.String, + Description: "An optional human-readable description of what's being tested by this assertion.", + DescriptionKind: configschema.StringPlain, + Required: true, + }, + "got": { + Type: cty.DynamicPseudoType, + Description: "The actual result value generated by the relevant component.", + DescriptionKind: configschema.StringPlain, + Required: true, + }, + "want": { + Type: cty.DynamicPseudoType, + Description: "The value that the component is expected to have generated.", + DescriptionKind: configschema.StringPlain, + Required: true, + }, + }, + }, + }, + "check": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "description": { + Type: cty.String, + Description: "An optional (but strongly recommended) human-readable description of what's being tested by this assertion.", + DescriptionKind: configschema.StringPlain, + Required: true, + }, + "condition": { + Type: cty.Bool, + Description: "An expression that must be true in order for the test to pass.", + DescriptionKind: configschema.StringPlain, + Required: true, + }, + }, + }, + }, + }, + }, +} diff --git a/moduletest/provider_test.go b/moduletest/provider_test.go new file mode 100644 index 000000000000..33891c534262 --- /dev/null +++ b/moduletest/provider_test.go @@ -0,0 +1,155 @@ +package moduletest + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/providers" + "github.com/zclconf/go-cty-debug/ctydebug" + "github.com/zclconf/go-cty/cty" +) + +func TestProvider(t *testing.T) { + + assertionConfig := cty.ObjectVal(map[string]cty.Value{ + "component": cty.StringVal("spline_reticulator"), + "equal": cty.MapVal(map[string]cty.Value{ + "match": cty.ObjectVal(map[string]cty.Value{ + "description": cty.StringVal("this should match"), + "got": cty.StringVal("a"), + "want": cty.StringVal("a"), + }), + "unmatch": cty.ObjectVal(map[string]cty.Value{ + "description": cty.StringVal("this should not match"), + "got": cty.StringVal("a"), + "want": cty.StringVal("b"), + }), + }), + "check": cty.MapVal(map[string]cty.Value{ + "pass": cty.ObjectVal(map[string]cty.Value{ + "description": cty.StringVal("this should pass"), + "condition": cty.True, + }), + "fail": cty.ObjectVal(map[string]cty.Value{ + "description": cty.StringVal("this should fail"), + "condition": cty.False, + }), + }), + }) + + // The provider code expects to receive an object that was decoded from + // HCL using the schema, so to make sure we're testing a more realistic + // situation here we'll require the config to conform to the schema. If + // this fails, it's a bug in the configuration definition above rather + // than in the provider itself. + for _, err := range assertionConfig.Type().TestConformance(testAssertionsSchema.Block.ImpliedType()) { + t.Error(err) + } + + p := NewProvider() + + configureResp := p.ConfigureProvider(providers.ConfigureProviderRequest{ + Config: cty.EmptyObjectVal, + }) + if got, want := len(configureResp.Diagnostics), 1; got != want { + t.Fatalf("got %d Configure diagnostics, but want %d", got, want) + } + if got, want := configureResp.Diagnostics[0].Description().Summary, "The test provider is experimental"; got != want { + t.Fatalf("wrong diagnostic message\ngot: %s\nwant: %s", got, want) + } + + validateResp := p.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ + TypeName: "test_assertions", + Config: assertionConfig, + }) + if got, want := len(validateResp.Diagnostics), 0; got != want { + t.Fatalf("got %d ValidateResourceTypeConfig diagnostics, but want %d", got, want) + } + + planResp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "test_assertions", + Config: assertionConfig, + PriorState: cty.NullVal(assertionConfig.Type()), + ProposedNewState: assertionConfig, + }) + if got, want := len(planResp.Diagnostics), 0; got != want { + t.Fatalf("got %d PlanResourceChange diagnostics, but want %d", got, want) + } + planned := planResp.PlannedState + if got, want := planned, assertionConfig; !want.RawEquals(got) { + t.Fatalf("wrong planned new value\n%s", ctydebug.DiffValues(want, got)) + } + + gotComponents := p.TestResults() + wantComponents := map[string]*Component{ + "spline_reticulator": { + Assertions: map[string]*Assertion{ + "pass": { + Outcome: Pending, + Description: "this should pass", + }, + "fail": { + Outcome: Pending, + Description: "this should fail", + }, + "match": { + Outcome: Pending, + Description: "this should match", + }, + "unmatch": { + Outcome: Pending, + Description: "this should not match", + }, + }, + }, + } + if diff := cmp.Diff(wantComponents, gotComponents); diff != "" { + t.Fatalf("wrong test results after planning\n%s", diff) + } + + applyResp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "test_assertions", + Config: assertionConfig, + PriorState: cty.NullVal(assertionConfig.Type()), + PlannedState: planned, + }) + if got, want := len(applyResp.Diagnostics), 0; got != want { + t.Fatalf("got %d ApplyResourceChange diagnostics, but want %d", got, want) + } + final := applyResp.NewState + if got, want := final, assertionConfig; !want.RawEquals(got) { + t.Fatalf("wrong new value\n%s", ctydebug.DiffValues(want, got)) + } + + gotComponents = p.TestResults() + wantComponents = map[string]*Component{ + "spline_reticulator": { + Assertions: map[string]*Assertion{ + "pass": { + Outcome: Passed, + Description: "this should pass", + Message: "condition passed", + }, + "fail": { + Outcome: Failed, + Description: "this should fail", + Message: "condition failed", + }, + "match": { + Outcome: Passed, + Description: "this should match", + Message: "correct value\n got: \"a\"\n", + }, + "unmatch": { + Outcome: Failed, + Description: "this should not match", + Message: "wrong value\n got: \"a\"\n want: \"b\"\n", + }, + }, + }, + } + if diff := cmp.Diff(wantComponents, gotComponents); diff != "" { + t.Fatalf("wrong test results after applying\n%s", diff) + } + +} diff --git a/internal/moduletest/status_string.go b/moduletest/status_string.go similarity index 100% rename from internal/moduletest/status_string.go rename to moduletest/status_string.go diff --git a/internal/moduletest/suite.go b/moduletest/suite.go similarity index 100% rename from internal/moduletest/suite.go rename to moduletest/suite.go diff --git a/internal/plans/action.go b/plans/action.go similarity index 100% rename from internal/plans/action.go rename to plans/action.go diff --git a/internal/plans/action_string.go b/plans/action_string.go similarity index 100% rename from internal/plans/action_string.go rename to plans/action_string.go diff --git a/internal/plans/changes.go b/plans/changes.go similarity index 99% rename from internal/plans/changes.go rename to plans/changes.go index 7c54928331e6..0fa7309ef628 100644 --- a/internal/plans/changes.go +++ b/plans/changes.go @@ -3,8 +3,8 @@ package plans import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" ) // Changes describes various actions that Terraform will attempt to take if diff --git a/internal/plans/changes_src.go b/plans/changes_src.go similarity index 98% rename from internal/plans/changes_src.go rename to plans/changes_src.go index 396493956771..37462350f8fa 100644 --- a/internal/plans/changes_src.go +++ b/plans/changes_src.go @@ -3,8 +3,8 @@ package plans import ( "fmt" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/plans/changes_state.go b/plans/changes_state.go similarity index 90% rename from internal/plans/changes_state.go rename to plans/changes_state.go index 8446e9be66c8..543e6c2bd89d 100644 --- a/internal/plans/changes_state.go +++ b/plans/changes_state.go @@ -1,7 +1,7 @@ package plans import ( - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/states" ) // PlannedState merges the set of changes described by the receiver into the diff --git a/internal/plans/changes_sync.go b/plans/changes_sync.go similarity index 98% rename from internal/plans/changes_sync.go rename to plans/changes_sync.go index 2b4254b04494..fae09145d1d1 100644 --- a/internal/plans/changes_sync.go +++ b/plans/changes_sync.go @@ -4,8 +4,8 @@ import ( "fmt" "sync" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" ) // ChangesSync is a wrapper around a Changes that provides a concurrency-safe diff --git a/internal/plans/changes_test.go b/plans/changes_test.go similarity index 97% rename from internal/plans/changes_test.go rename to plans/changes_test.go index 5dbe10f08a93..110119389373 100644 --- a/internal/plans/changes_test.go +++ b/plans/changes_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/plans/doc.go b/plans/doc.go similarity index 100% rename from internal/plans/doc.go rename to plans/doc.go diff --git a/internal/plans/dynamic_value.go b/plans/dynamic_value.go similarity index 100% rename from internal/plans/dynamic_value.go rename to plans/dynamic_value.go diff --git a/internal/plans/internal/planproto/doc.go b/plans/internal/planproto/doc.go similarity index 100% rename from internal/plans/internal/planproto/doc.go rename to plans/internal/planproto/doc.go diff --git a/internal/plans/internal/planproto/planfile.pb.go b/plans/internal/planproto/planfile.pb.go similarity index 100% rename from internal/plans/internal/planproto/planfile.pb.go rename to plans/internal/planproto/planfile.pb.go diff --git a/internal/plans/internal/planproto/planfile.proto b/plans/internal/planproto/planfile.proto similarity index 100% rename from internal/plans/internal/planproto/planfile.proto rename to plans/internal/planproto/planfile.proto diff --git a/internal/plans/mode.go b/plans/mode.go similarity index 100% rename from internal/plans/mode.go rename to plans/mode.go diff --git a/internal/plans/mode_string.go b/plans/mode_string.go similarity index 100% rename from internal/plans/mode_string.go rename to plans/mode_string.go diff --git a/plans/objchange/action.go b/plans/objchange/action.go new file mode 100644 index 000000000000..2ca32097cf78 --- /dev/null +++ b/plans/objchange/action.go @@ -0,0 +1,40 @@ +package objchange + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/plans" +) + +// ActionForChange determines which plans.Action value best describes a +// change from the value given in before to the value given in after. +// +// Because it has no context aside from the values, it can only return the +// basic actions NoOp, Create, Update, and Delete. Other codepaths with +// additional information might make this decision differently, such as by +// using the Replace action instead of the Update action where that makes +// sense. +// +// If the after value is unknown then the action can't be properly decided, and +// so ActionForChange will conservatively return either Create or Update +// depending on whether the before value is null. The before value must always +// be fully known; ActionForChange will panic if it contains any unknown values. +func ActionForChange(before, after cty.Value) plans.Action { + switch { + case !after.IsKnown(): + if before.IsNull() { + return plans.Create + } + return plans.Update + case after.IsNull() && before.IsNull(): + return plans.NoOp + case after.IsNull() && !before.IsNull(): + return plans.Delete + case before.IsNull() && !after.IsNull(): + return plans.Create + case after.RawEquals(before): + return plans.NoOp + default: + return plans.Update + } +} diff --git a/internal/plans/objchange/compatible.go b/plans/objchange/compatible.go similarity index 99% rename from internal/plans/objchange/compatible.go rename to plans/objchange/compatible.go index ca5026380115..8ead63fa0af6 100644 --- a/internal/plans/objchange/compatible.go +++ b/plans/objchange/compatible.go @@ -7,8 +7,8 @@ import ( "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/marks" ) // AssertObjectCompatible checks whether the given "actual" value is a valid diff --git a/internal/plans/objchange/compatible_test.go b/plans/objchange/compatible_test.go similarity index 99% rename from internal/plans/objchange/compatible_test.go rename to plans/objchange/compatible_test.go index 213d3e103fb9..65390345a7b9 100644 --- a/internal/plans/objchange/compatible_test.go +++ b/plans/objchange/compatible_test.go @@ -7,9 +7,9 @@ import ( "github.com/apparentlymart/go-dump/dump" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/tfdiags" ) func TestAssertObjectCompatible(t *testing.T) { diff --git a/internal/plans/objchange/doc.go b/plans/objchange/doc.go similarity index 100% rename from internal/plans/objchange/doc.go rename to plans/objchange/doc.go diff --git a/internal/plans/objchange/lcs.go b/plans/objchange/lcs.go similarity index 100% rename from internal/plans/objchange/lcs.go rename to plans/objchange/lcs.go diff --git a/internal/plans/objchange/lcs_test.go b/plans/objchange/lcs_test.go similarity index 98% rename from internal/plans/objchange/lcs_test.go rename to plans/objchange/lcs_test.go index ea31ac226f59..5af7e7c950f9 100644 --- a/internal/plans/objchange/lcs_test.go +++ b/plans/objchange/lcs_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/plans/objchange/normalize_obj.go b/plans/objchange/normalize_obj.go similarity index 98% rename from internal/plans/objchange/normalize_obj.go rename to plans/objchange/normalize_obj.go index 3db3f66f58c3..47229b4748d2 100644 --- a/internal/plans/objchange/normalize_obj.go +++ b/plans/objchange/normalize_obj.go @@ -1,7 +1,7 @@ package objchange import ( - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/plans/objchange/normalize_obj_test.go b/plans/objchange/normalize_obj_test.go similarity index 99% rename from internal/plans/objchange/normalize_obj_test.go rename to plans/objchange/normalize_obj_test.go index e350e181c48f..abc9c3770971 100644 --- a/internal/plans/objchange/normalize_obj_test.go +++ b/plans/objchange/normalize_obj_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/apparentlymart/go-dump/dump" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/plans/objchange/objchange.go b/plans/objchange/objchange.go similarity index 99% rename from internal/plans/objchange/objchange.go rename to plans/objchange/objchange.go index 88f044edac65..44b83ce58cad 100644 --- a/internal/plans/objchange/objchange.go +++ b/plans/objchange/objchange.go @@ -6,7 +6,7 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" ) // ProposedNew constructs a proposed new object value by combining the diff --git a/internal/plans/objchange/objchange_test.go b/plans/objchange/objchange_test.go similarity index 99% rename from internal/plans/objchange/objchange_test.go rename to plans/objchange/objchange_test.go index 27b79d5379c4..99b20b99ebd1 100644 --- a/internal/plans/objchange/objchange_test.go +++ b/plans/objchange/objchange_test.go @@ -6,7 +6,7 @@ import ( "github.com/apparentlymart/go-dump/dump" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" ) func TestProposedNew(t *testing.T) { diff --git a/internal/plans/objchange/plan_valid.go b/plans/objchange/plan_valid.go similarity index 99% rename from internal/plans/objchange/plan_valid.go rename to plans/objchange/plan_valid.go index 305bc10b18f9..b9c22ce490a3 100644 --- a/internal/plans/objchange/plan_valid.go +++ b/plans/objchange/plan_valid.go @@ -5,7 +5,7 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/configs/configschema" ) // AssertPlanValid checks checks whether a planned new state returned by a diff --git a/internal/plans/objchange/plan_valid_test.go b/plans/objchange/plan_valid_test.go similarity index 99% rename from internal/plans/objchange/plan_valid_test.go rename to plans/objchange/plan_valid_test.go index 00a1602ac2bc..714751a28a77 100644 --- a/internal/plans/objchange/plan_valid_test.go +++ b/plans/objchange/plan_valid_test.go @@ -6,8 +6,8 @@ import ( "github.com/apparentlymart/go-dump/dump" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/tfdiags" ) func TestAssertPlanValid(t *testing.T) { diff --git a/plans/plan.go b/plans/plan.go new file mode 100644 index 000000000000..469901caa5a7 --- /dev/null +++ b/plans/plan.go @@ -0,0 +1,197 @@ +package plans + +import ( + "sort" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/globalref" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" +) + +// Plan is the top-level type representing a planned set of changes. +// +// A plan is a summary of the set of changes required to move from a current +// state to a goal state derived from configuration. The described changes +// are not applied directly, but contain an approximation of the final +// result that will be completed during apply by resolving any values that +// cannot be predicted. +// +// A plan must always be accompanied by the configuration it was built from, +// since the plan does not itself include all of the information required to +// make the changes indicated. +type Plan struct { + // Mode is the mode under which this plan was created. + // + // This is only recorded to allow for UI differences when presenting plans + // to the end-user, and so it must not be used to influence apply-time + // behavior. The actions during apply must be described entirely by + // the Changes field, regardless of how the plan was created. + // + // FIXME: destroy operations still rely on DestroyMode being set, because + // there is no other source of this information in the plan. New behavior + // should not be added based on this flag, and changing the flag should be + // checked carefully against existing destroy behaviors. + UIMode Mode + + VariableValues map[string]DynamicValue + Changes *Changes + DriftedResources []*ResourceInstanceChangeSrc + TargetAddrs []addrs.Targetable + ForceReplaceAddrs []addrs.AbsResourceInstance + Backend Backend + + // Errored is true if the Changes information is incomplete because + // the planning operation failed. An errored plan cannot be applied, + // but can be cautiously inspected for debugging purposes. + Errored bool + + // Checks captures a snapshot of the (probably-incomplete) check results + // at the end of the planning process. + // + // If this plan is applyable (that is, if the planning process completed + // without errors) then the set of checks here should be complete even + // though some of them will likely have StatusUnknown where the check + // condition depends on values we won't know until the apply step. + Checks *states.CheckResults + + // RelevantAttributes is a set of resource instance addresses and + // attributes that are either directly affected by proposed changes or may + // have indirectly contributed to them via references in expressions. + // + // This is the result of a heuristic and is intended only as a hint to + // the UI layer in case it wants to emphasize or de-emphasize certain + // resources. Don't use this to drive any non-cosmetic behavior, especially + // including anything that would be subject to compatibility constraints. + RelevantAttributes []globalref.ResourceAttr + + // PrevRunState and PriorState both describe the situation that the plan + // was derived from: + // + // PrevRunState is a representation of the outcome of the previous + // Terraform operation, without any updates from the remote system but + // potentially including some changes that resulted from state upgrade + // actions. + // + // PriorState is a representation of the current state of remote objects, + // which will differ from PrevRunState if the "refresh" step returned + // different data, which might reflect drift. + // + // PriorState is the main snapshot we use for actions during apply. + // PrevRunState is only here so that we can diff PriorState against it in + // order to report to the user any out-of-band changes we've detected. + PrevRunState *states.State + PriorState *states.State +} + +// CanApply returns true if and only if the recieving plan includes content +// that would make sense to apply. If it returns false, the plan operation +// should indicate that there's nothing to do and Terraform should exit +// without prompting the user to confirm the changes. +// +// This function represents our main business logic for making the decision +// about whether a given plan represents meaningful "changes", and so its +// exact definition may change over time; the intent is just to centralize the +// rules for that rather than duplicating different versions of it at various +// locations in the UI code. +func (p *Plan) CanApply() bool { + switch { + case p.Errored: + // An errored plan can never be applied, because it is incomplete. + // Such a plan is only useful for describing the subset of actions + // planned so far in case they are useful for understanding the + // causes of the errors. + return false + + case !p.Changes.Empty(): + // "Empty" means that everything in the changes is a "NoOp", so if + // not empty then there's at least one non-NoOp change. + return true + + case !p.PriorState.ManagedResourcesEqual(p.PrevRunState): + // If there are no changes planned but we detected some + // outside-Terraform changes while refreshing then we consider + // that applyable in isolation only if this was a refresh-only + // plan where we expect updating the state to include these + // changes was the intended goal. + // + // (We don't treat a "refresh only" plan as applyable in normal + // planning mode because historically the refresh result wasn't + // considered part of a plan at all, and so it would be + // a disruptive breaking change if refreshing alone suddenly + // became applyable in the normal case and an existing configuration + // was relying on ignore_changes in order to be convergent in spite + // of intentional out-of-band operations.) + return p.UIMode == RefreshOnlyMode + + default: + // Otherwise, there are either no changes to apply or they are changes + // our cases above don't consider as worthy of applying in isolation. + return false + } +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving plan. +// +// The result is de-duplicated so that each distinct address appears only once. +func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { + if p == nil || p.Changes == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, rc := range p.Changes.Resources { + m[rc.ProviderAddr.String()] = rc.ProviderAddr + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} + +// Backend represents the backend-related configuration and other data as it +// existed when a plan was created. +type Backend struct { + // Type is the type of backend that the plan will apply against. + Type string + + // Config is the configuration of the backend, whose schema is decided by + // the backend Type. + Config DynamicValue + + // Workspace is the name of the workspace that was active when the plan + // was created. It is illegal to apply a plan created for one workspace + // to the state of another workspace. + // (This constraint is already enforced by the statefile lineage mechanism, + // but storing this explicitly allows us to return a better error message + // in the situation where the user has the wrong workspace selected.) + Workspace string +} + +func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { + dv, err := NewDynamicValue(config, configSchema.ImpliedType()) + if err != nil { + return nil, err + } + + return &Backend{ + Type: typeName, + Config: dv, + Workspace: workspaceName, + }, nil +} diff --git a/plans/plan_test.go b/plans/plan_test.go new file mode 100644 index 000000000000..b8a0e4501ba0 --- /dev/null +++ b/plans/plan_test.go @@ -0,0 +1,95 @@ +package plans + +import ( + "testing" + + "github.com/go-test/deep" + + "github.com/hashicorp/terraform/addrs" +) + +func TestProviderAddrs(t *testing.T) { + + plan := &Plan{ + VariableValues: map[string]DynamicValue{}, + Changes: &Changes{ + Resources: []*ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + DeposedKey: "foodface", + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "what", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule.Child("foo"), + Provider: addrs.NewDefaultProvider("test"), + }, + }, + }, + }, + } + + got := plan.ProviderAddrs() + want := []addrs.AbsProviderConfig{ + addrs.AbsProviderConfig{ + Module: addrs.RootModule.Child("foo"), + Provider: addrs.NewDefaultProvider("test"), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + } + + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} + +// Module outputs should not effect the result of Empty +func TestModuleOutputChangesEmpty(t *testing.T) { + changes := &Changes{ + Outputs: []*OutputChangeSrc{ + { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("child", addrs.NoKey), + OutputValue: addrs.OutputValue{ + Name: "output", + }, + }, + ChangeSrc: ChangeSrc{ + Action: Update, + Before: []byte("a"), + After: []byte("b"), + }, + }, + }, + } + + if !changes.Empty() { + t.Fatal("plan has no visible changes") + } +} diff --git a/internal/plans/planfile/config_snapshot.go b/plans/planfile/config_snapshot.go similarity index 99% rename from internal/plans/planfile/config_snapshot.go rename to plans/planfile/config_snapshot.go index 163366d9bcf9..a78a99b31fce 100644 --- a/internal/plans/planfile/config_snapshot.go +++ b/plans/planfile/config_snapshot.go @@ -11,7 +11,7 @@ import ( "time" version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/configs/configload" + "github.com/hashicorp/terraform/configs/configload" ) const configSnapshotPrefix = "tfconfig/" diff --git a/internal/plans/planfile/config_snapshot_test.go b/plans/planfile/config_snapshot_test.go similarity index 94% rename from internal/plans/planfile/config_snapshot_test.go rename to plans/planfile/config_snapshot_test.go index 91069ea38c77..2efd0031f66c 100644 --- a/internal/plans/planfile/config_snapshot_test.go +++ b/plans/planfile/config_snapshot_test.go @@ -9,7 +9,7 @@ import ( "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/internal/configs/configload" + "github.com/hashicorp/terraform/configs/configload" ) func TestConfigSnapshotRoundtrip(t *testing.T) { diff --git a/internal/plans/planfile/doc.go b/plans/planfile/doc.go similarity index 100% rename from internal/plans/planfile/doc.go rename to plans/planfile/doc.go diff --git a/internal/plans/planfile/planfile_test.go b/plans/planfile/planfile_test.go similarity index 92% rename from internal/plans/planfile/planfile_test.go rename to plans/planfile/planfile_test.go index af3615cfcf90..767b608733f0 100644 --- a/internal/plans/planfile/planfile_test.go +++ b/plans/planfile/planfile_test.go @@ -6,13 +6,13 @@ import ( "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" tfversion "github.com/hashicorp/terraform/version" ) diff --git a/internal/plans/planfile/reader.go b/plans/planfile/reader.go similarity index 96% rename from internal/plans/planfile/reader.go rename to plans/planfile/reader.go index ff6e129e0bac..d37546836257 100644 --- a/internal/plans/planfile/reader.go +++ b/plans/planfile/reader.go @@ -6,12 +6,12 @@ import ( "fmt" "io/ioutil" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/tfdiags" ) const tfstateFilename = "tfstate" diff --git a/internal/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_a.tf b/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_a.tf similarity index 100% rename from internal/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_a.tf rename to plans/planfile/testdata/test-config/.terraform/modules/child_a/child_a.tf diff --git a/internal/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_c/child_c.tf b/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_c/child_c.tf similarity index 100% rename from internal/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_c/child_c.tf rename to plans/planfile/testdata/test-config/.terraform/modules/child_a/child_c/child_c.tf diff --git a/internal/plans/planfile/testdata/test-config/.terraform/modules/child_b.child_d/child_d.tf b/plans/planfile/testdata/test-config/.terraform/modules/child_b.child_d/child_d.tf similarity index 100% rename from internal/plans/planfile/testdata/test-config/.terraform/modules/child_b.child_d/child_d.tf rename to plans/planfile/testdata/test-config/.terraform/modules/child_b.child_d/child_d.tf diff --git a/internal/plans/planfile/testdata/test-config/.terraform/modules/child_b/child_b.tf b/plans/planfile/testdata/test-config/.terraform/modules/child_b/child_b.tf similarity index 100% rename from internal/plans/planfile/testdata/test-config/.terraform/modules/child_b/child_b.tf rename to plans/planfile/testdata/test-config/.terraform/modules/child_b/child_b.tf diff --git a/internal/plans/planfile/testdata/test-config/.terraform/modules/modules.json b/plans/planfile/testdata/test-config/.terraform/modules/modules.json similarity index 100% rename from internal/plans/planfile/testdata/test-config/.terraform/modules/modules.json rename to plans/planfile/testdata/test-config/.terraform/modules/modules.json diff --git a/internal/plans/planfile/testdata/test-config/root.tf b/plans/planfile/testdata/test-config/root.tf similarity index 100% rename from internal/plans/planfile/testdata/test-config/root.tf rename to plans/planfile/testdata/test-config/root.tf diff --git a/internal/plans/planfile/tfplan.go b/plans/planfile/tfplan.go similarity index 98% rename from internal/plans/planfile/tfplan.go rename to plans/planfile/tfplan.go index 19866b6714e9..6cea17568fa4 100644 --- a/internal/plans/planfile/tfplan.go +++ b/plans/planfile/tfplan.go @@ -7,13 +7,13 @@ import ( "google.golang.org/protobuf/proto" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/lang/globalref" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/plans/internal/planproto" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/checks" + "github.com/hashicorp/terraform/lang/globalref" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/internal/planproto" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/version" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/plans/planfile/tfplan_test.go b/plans/planfile/tfplan_test.go similarity index 96% rename from internal/plans/planfile/tfplan_test.go rename to plans/planfile/tfplan_test.go index 6984ceafdcad..68e1481dd201 100644 --- a/internal/plans/planfile/tfplan_test.go +++ b/plans/planfile/tfplan_test.go @@ -7,12 +7,12 @@ import ( "github.com/go-test/deep" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/lang/globalref" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/checks" + "github.com/hashicorp/terraform/lang/globalref" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" ) func TestTFPlanRoundTrip(t *testing.T) { diff --git a/internal/plans/planfile/writer.go b/plans/planfile/writer.go similarity index 94% rename from internal/plans/planfile/writer.go rename to plans/planfile/writer.go index bdf84c86db44..a33ee24b6e41 100644 --- a/internal/plans/planfile/writer.go +++ b/plans/planfile/writer.go @@ -6,10 +6,10 @@ import ( "os" "time" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/plans" - "github.com/hashicorp/terraform/internal/states/statefile" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statefile" ) type CreateArgs struct { diff --git a/internal/plans/resourceinstancechangeactionreason_string.go b/plans/resourceinstancechangeactionreason_string.go similarity index 100% rename from internal/plans/resourceinstancechangeactionreason_string.go rename to plans/resourceinstancechangeactionreason_string.go diff --git a/plugin/convert/diagnostics.go b/plugin/convert/diagnostics.go new file mode 100644 index 000000000000..8aa9e0fb9b77 --- /dev/null +++ b/plugin/convert/diagnostics.go @@ -0,0 +1,132 @@ +package convert + +import ( + "github.com/hashicorp/terraform/tfdiags" + proto "github.com/hashicorp/terraform/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +// WarnsAndErrorsToProto converts the warnings and errors return by the legacy +// provider to protobuf diagnostics. +func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { + for _, w := range warns { + diags = AppendProtoDiag(diags, w) + } + + for _, e := range errs { + diags = AppendProtoDiag(diags, e) + } + + return diags +} + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. +func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, d := range ds { + var severity tfdiags.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = tfdiags.Error + case proto.Diagnostic_WARNING: + severity = tfdiags.Warning + } + + var newDiag tfdiags.Diagnostic + + // if there's an attribute path, we need to create a AttributeValue diagnostic + if d.Attribute != nil && len(d.Attribute.Steps) > 0 { + path := AttributePathToPath(d.Attribute) + newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) + } else { + newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + } + + diags = diags.Append(newDiag) + } + + return diags +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/plugin/convert/diagnostics_test.go b/plugin/convert/diagnostics_test.go new file mode 100644 index 000000000000..d700ab699d3e --- /dev/null +++ b/plugin/convert/diagnostics_test.go @@ -0,0 +1,411 @@ +package convert + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/tfdiags" + proto "github.com/hashicorp/terraform/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +var ignoreUnexported = cmpopts.IgnoreUnexported( + proto.Diagnostic{}, + proto.Schema_Block{}, + proto.Schema_NestedBlock{}, + proto.Schema_Attribute{}, +) + +func TestProtoDiagnostics(t *testing.T) { + diags := WarnsAndErrsToProto( + []string{ + "warning 1", + "warning 2", + }, + []error{ + errors.New("error 1"), + errors.New("error 2"), + }, + ) + + expected := []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_WARNING, + Summary: "warning 1", + }, + { + Severity: proto.Diagnostic_WARNING, + Summary: "warning 2", + }, + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + }, + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 2", + }, + } + + if !cmp.Equal(expected, diags, ignoreUnexported) { + t.Fatal(cmp.Diff(expected, diags, ignoreUnexported)) + } +} + +func TestDiagnostics(t *testing.T) { + type diagFlat struct { + Severity tfdiags.Severity + Attr []interface{} + Summary string + Detail string + } + + tests := map[string]struct { + Cons func([]*proto.Diagnostic) []*proto.Diagnostic + Want []diagFlat + }{ + "nil": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return diags + }, + nil, + }, + "error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "simple error", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "simple error", + }, + }, + }, + "detailed error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "simple error", + Detail: "detailed error", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "simple error", + Detail: "detailed error", + }, + }, + }, + "warning": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "simple warning", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "simple warning", + }, + }, + }, + "detailed warning": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "simple warning", + Detail: "detailed warning", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "simple warning", + Detail: "detailed warning", + }, + }, + }, + "multi error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "first error", + }, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "second error", + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "first error", + }, + { + Severity: tfdiags.Error, + Summary: "second error", + }, + }, + }, + "warning and error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "warning", + }, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error", + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "warning", + }, + { + Severity: tfdiags.Error, + Summary: "error", + }, + }, + }, + "attr error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error", + Detail: "error detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attribute_name", + }, + }, + }, + }, + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "error", + Detail: "error detail", + Attr: []interface{}{"attribute_name"}, + }, + }, + }, + "multi attr": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + Detail: "error 1 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 2", + Detail: "error 2 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "warning", + Detail: "warning detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: 1, + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 3", + Detail: "error 3 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: "idx", + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + ) + + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "error 1", + Detail: "error 1 detail", + Attr: []interface{}{"attr"}, + }, + { + Severity: tfdiags.Error, + Summary: "error 2", + Detail: "error 2 detail", + Attr: []interface{}{"attr", "sub"}, + }, + { + Severity: tfdiags.Warning, + Summary: "warning", + Detail: "warning detail", + Attr: []interface{}{"attr", 1, "sub"}, + }, + { + Severity: tfdiags.Error, + Summary: "error 3", + Detail: "error 3 detail", + Attr: []interface{}{"attr", "idx", "sub"}, + }, + }, + }, + } + + flattenTFDiags := func(ds tfdiags.Diagnostics) []diagFlat { + var flat []diagFlat + for _, item := range ds { + desc := item.Description() + + var attr []interface{} + + for _, a := range tfdiags.GetAttribute(item) { + switch step := a.(type) { + case cty.GetAttrStep: + attr = append(attr, step.Name) + case cty.IndexStep: + switch step.Key.Type() { + case cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + attr = append(attr, int(i)) + case cty.String: + attr = append(attr, step.Key.AsString()) + } + } + } + + flat = append(flat, diagFlat{ + Severity: item.Severity(), + Attr: attr, + Summary: desc.Summary, + Detail: desc.Detail, + }) + } + return flat + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + // we take the + tfDiags := ProtoToDiagnostics(tc.Cons(nil)) + + flat := flattenTFDiags(tfDiags) + + if !cmp.Equal(flat, tc.Want, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(flat, tc.Want, typeComparer, valueComparer, equateEmpty)) + } + }) + } +} + +// Test that a diagnostic with a present but empty attribute results in a +// whole body diagnostic. We verify this by inspecting the resulting Subject +// from the diagnostic when considered in the context of a config body. +func TestProtoDiagnostics_emptyAttributePath(t *testing.T) { + protoDiags := []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + Detail: "error 1 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + // this slice is intentionally left empty + }, + }, + }, + } + tfDiags := ProtoToDiagnostics(protoDiags) + + testConfig := `provider "test" { + foo = "bar" +}` + f, parseDiags := hclsyntax.ParseConfig([]byte(testConfig), "test.tf", hcl.Pos{Line: 1, Column: 1}) + if parseDiags.HasErrors() { + t.Fatal(parseDiags) + } + diags := tfDiags.InConfigBody(f.Body, "") + + if len(tfDiags) != 1 { + t.Fatalf("expected 1 diag, got %d", len(tfDiags)) + } + got := diags[0].Source().Subject + want := &tfdiags.SourceRange{ + Filename: "test.tf", + Start: tfdiags.SourcePos{Line: 1, Column: 1}, + End: tfdiags.SourcePos{Line: 1, Column: 1}, + } + + if !cmp.Equal(got, want, typeComparer, valueComparer) { + t.Fatal(cmp.Diff(got, want, typeComparer, valueComparer)) + } +} diff --git a/plugin/convert/schema.go b/plugin/convert/schema.go new file mode 100644 index 000000000000..65d0f14b7c68 --- /dev/null +++ b/plugin/convert/schema.go @@ -0,0 +1,185 @@ +package convert + +import ( + "encoding/json" + "reflect" + "sort" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + proto "github.com/hashicorp/terraform/tfplugin5" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{ + Description: b.Description, + DescriptionKind: protoStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + + attr.Type = ty + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoStringKind(k configschema.StringKind) proto.StringKind { + switch k { + default: + return proto.StringKind_PLAIN + case configschema.StringMarkdown: + return proto.StringKind_MARKDOWN + } +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. +func ProtoToProviderSchema(s *proto.Schema) providers.Schema { + return providers.Schema{ + Version: s.Version, + Block: ProtoToConfigSchema(s.Block), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + + Description: b.Description, + DescriptionKind: schemaStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaStringKind(k proto.StringKind) configschema.StringKind { + switch k { + default: + return configschema.StringPlain + case proto.StringKind_MARKDOWN: + return configschema.StringMarkdown + } +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/plugin/convert/schema_test.go b/plugin/convert/schema_test.go new file mode 100644 index 000000000000..0464cf15d5f6 --- /dev/null +++ b/plugin/convert/schema_test.go @@ -0,0 +1,361 @@ +package convert + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform/configs/configschema" + proto "github.com/hashicorp/terraform/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +var ( + equateEmpty = cmpopts.EquateEmpty() + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + +// Test that we can convert configschema to protobuf types and back again. +func TestConvertSchemaBlocks(t *testing.T) { + tests := map[string]struct { + Block *proto.Schema_Block + Want *configschema.Block + }{ + "attributes": { + &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + }, + }, + "blocks": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "map", + Nesting: proto.Schema_NestedBlock_MAP, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "foo", + Type: []byte(`"dynamic"`), + Required: true, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + }, + "map": &configschema.NestedBlock{ + Nesting: configschema.NestingMap, + }, + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "deep block nesting": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + converted := ProtoToConfigSchema(tc.Block) + if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty)) + } + }) + } +} + +// Test that we can convert configschema to protobuf types and back again. +func TestConvertProtoSchemaBlocks(t *testing.T) { + tests := map[string]struct { + Want *proto.Schema_Block + Block *configschema.Block + }{ + "attributes": { + &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + }, + }, + "blocks": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "map", + Nesting: proto.Schema_NestedBlock_MAP, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "foo", + Type: []byte(`"dynamic"`), + Required: true, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + }, + "map": &configschema.NestedBlock{ + Nesting: configschema.NestingMap, + }, + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "deep block nesting": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + converted := ConfigSchemaToProto(tc.Block) + if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported)) + } + }) + } +} diff --git a/internal/plugin/discovery/find.go b/plugin/discovery/find.go similarity index 100% rename from internal/plugin/discovery/find.go rename to plugin/discovery/find.go diff --git a/internal/plugin/discovery/find_test.go b/plugin/discovery/find_test.go similarity index 100% rename from internal/plugin/discovery/find_test.go rename to plugin/discovery/find_test.go diff --git a/internal/plugin/discovery/get_cache.go b/plugin/discovery/get_cache.go similarity index 100% rename from internal/plugin/discovery/get_cache.go rename to plugin/discovery/get_cache.go diff --git a/internal/plugin/discovery/get_cache_test.go b/plugin/discovery/get_cache_test.go similarity index 100% rename from internal/plugin/discovery/get_cache_test.go rename to plugin/discovery/get_cache_test.go diff --git a/internal/plugin/discovery/meta.go b/plugin/discovery/meta.go similarity index 100% rename from internal/plugin/discovery/meta.go rename to plugin/discovery/meta.go diff --git a/internal/plugin/discovery/meta_set.go b/plugin/discovery/meta_set.go similarity index 100% rename from internal/plugin/discovery/meta_set.go rename to plugin/discovery/meta_set.go diff --git a/internal/plugin/discovery/meta_set_test.go b/plugin/discovery/meta_set_test.go similarity index 100% rename from internal/plugin/discovery/meta_set_test.go rename to plugin/discovery/meta_set_test.go diff --git a/internal/plugin/discovery/meta_test.go b/plugin/discovery/meta_test.go similarity index 100% rename from internal/plugin/discovery/meta_test.go rename to plugin/discovery/meta_test.go diff --git a/internal/plugin/discovery/requirements.go b/plugin/discovery/requirements.go similarity index 100% rename from internal/plugin/discovery/requirements.go rename to plugin/discovery/requirements.go diff --git a/internal/plugin/discovery/requirements_test.go b/plugin/discovery/requirements_test.go similarity index 100% rename from internal/plugin/discovery/requirements_test.go rename to plugin/discovery/requirements_test.go diff --git a/internal/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v0.0.1 b/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v0.0.1 similarity index 100% rename from internal/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v0.0.1 rename to plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v0.0.1 diff --git a/internal/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-missing-version b/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-missing-version similarity index 100% rename from internal/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-missing-version rename to plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-missing-version diff --git a/internal/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-notfoo-bar_v0.0.1 b/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-notfoo-bar_v0.0.1 similarity index 100% rename from internal/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-notfoo-bar_v0.0.1 rename to plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-notfoo-bar_v0.0.1 diff --git a/internal/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-bar b/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-bar similarity index 100% rename from internal/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-bar rename to plugin/discovery/testdata/legacy-style-plugins/terraform-foo-bar diff --git a/internal/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-baz b/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-baz similarity index 100% rename from internal/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-baz rename to plugin/discovery/testdata/legacy-style-plugins/terraform-foo-baz diff --git a/internal/plugin/discovery/testdata/legacy-style-plugins/terraform-notfoo-bar b/plugin/discovery/testdata/legacy-style-plugins/terraform-notfoo-bar similarity index 100% rename from internal/plugin/discovery/testdata/legacy-style-plugins/terraform-notfoo-bar rename to plugin/discovery/testdata/legacy-style-plugins/terraform-notfoo-bar diff --git a/internal/plugin/discovery/testdata/not-a-dir b/plugin/discovery/testdata/not-a-dir similarity index 100% rename from internal/plugin/discovery/testdata/not-a-dir rename to plugin/discovery/testdata/not-a-dir diff --git a/internal/plugin/discovery/testdata/plugin-cache/terraform-provider-foo_v0.0.1_x4 b/plugin/discovery/testdata/plugin-cache/terraform-provider-foo_v0.0.1_x4 similarity index 100% rename from internal/plugin/discovery/testdata/plugin-cache/terraform-provider-foo_v0.0.1_x4 rename to plugin/discovery/testdata/plugin-cache/terraform-provider-foo_v0.0.1_x4 diff --git a/internal/plugin/discovery/version.go b/plugin/discovery/version.go similarity index 100% rename from internal/plugin/discovery/version.go rename to plugin/discovery/version.go diff --git a/internal/plugin/discovery/version_set.go b/plugin/discovery/version_set.go similarity index 100% rename from internal/plugin/discovery/version_set.go rename to plugin/discovery/version_set.go diff --git a/internal/plugin/discovery/version_set_test.go b/plugin/discovery/version_set_test.go similarity index 100% rename from internal/plugin/discovery/version_set_test.go rename to plugin/discovery/version_set_test.go diff --git a/internal/plugin/discovery/version_test.go b/plugin/discovery/version_test.go similarity index 100% rename from internal/plugin/discovery/version_test.go rename to plugin/discovery/version_test.go diff --git a/plugin/grpc_error.go b/plugin/grpc_error.go new file mode 100644 index 000000000000..99ce8c8b8d68 --- /dev/null +++ b/plugin/grpc_error.go @@ -0,0 +1,74 @@ +package plugin + +import ( + "fmt" + "path" + "runtime" + + "github.com/hashicorp/terraform/tfdiags" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcErr extracts some known error types and formats them into better +// representations for core. This must only be called from plugin methods. +// Since we don't use RPC status errors for the plugin protocol, these do not +// contain any useful details, and we can return some text that at least +// indicates the plugin call and possible error condition. +func grpcErr(err error) (diags tfdiags.Diagnostics) { + if err == nil { + return + } + + // extract the method name from the caller. + pc, _, _, ok := runtime.Caller(1) + if !ok { + logger.Error("unknown grpc call", "error", err) + return diags.Append(err) + } + + f := runtime.FuncForPC(pc) + + // Function names will contain the full import path. Take the last + // segment, which will let users know which method was being called. + _, requestName := path.Split(f.Name()) + + // Here we can at least correlate the error in the logs to a particular binary. + logger.Error(requestName, "error", err) + + // TODO: while this expands the error codes into somewhat better messages, + // this still does not easily link the error to an actual user-recognizable + // plugin. The grpc plugin does not know its configured name, and the + // errors are in a list of diagnostics, making it hard for the caller to + // annotate the returned errors. + switch status.Code(err) { + case codes.Unavailable: + // This case is when the plugin has stopped running for some reason, + // and is usually the result of a crash. + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Plugin did not respond", + fmt.Sprintf("The plugin encountered an error, and failed to respond to the %s call. "+ + "The plugin logs may contain more details.", requestName), + )) + case codes.Canceled: + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Request cancelled", + fmt.Sprintf("The %s request was cancelled.", requestName), + )) + case codes.Unimplemented: + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Unsupported plugin method", + fmt.Sprintf("The %s method is not supported by this plugin.", requestName), + )) + default: + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Plugin error", + fmt.Sprintf("The plugin returned an unexpected error from %s: %v", requestName, err), + )) + } + return +} diff --git a/plugin/grpc_provider.go b/plugin/grpc_provider.go new file mode 100644 index 000000000000..a909952ebad3 --- /dev/null +++ b/plugin/grpc_provider.go @@ -0,0 +1,697 @@ +package plugin + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/providers" + proto "github.com/hashicorp/terraform/tfplugin5" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +var logger = logging.HCLogger() + +// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. +type GRPCProviderPlugin struct { + plugin.Plugin + GRPCProvider func() proto.ProviderServer +} + +func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvider{ + client: proto.NewProviderClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} + +// GRPCProvider handles the client, or core side of the plugin rpc connection. +// The GRPCProvider methods are mostly a translation layer between the +// terraform providers types and the grpc proto types, directly converting +// between the two. +type GRPCProvider struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + // TestServer contains a grpc.Server to close when the GRPCProvider is being + // used in an end to end test of a provider. + TestServer *grpc.Server + + // Proto client use to make the grpc service calls. + client proto.ProviderClient + + // this context is created by the plugin package, and is canceled when the + // plugin process ends. + ctx context.Context + + // schema stores the schema for this provider. This is used to properly + // serialize the state for requests. + mu sync.Mutex + schemas providers.GetProviderSchemaResponse +} + +// getSchema is used internally to get the cached provider schema +func (p *GRPCProvider) getSchema() providers.GetProviderSchemaResponse { + p.mu.Lock() + // unlock inline in case GetSchema needs to be called + if p.schemas.Provider.Block != nil { + p.mu.Unlock() + return p.schemas + } + p.mu.Unlock() + + return p.GetProviderSchema() +} + +func (p *GRPCProvider) GetProviderSchema() (resp providers.GetProviderSchemaResponse) { + logger.Trace("GRPCProvider: GetProviderSchema") + p.mu.Lock() + defer p.mu.Unlock() + + if p.schemas.Provider.Block != nil { + return p.schemas + } + + resp.ResourceTypes = make(map[string]providers.Schema) + resp.DataSources = make(map[string]providers.Schema) + + // Some providers may generate quite large schemas, and the internal default + // grpc response size limit is 4MB. 64MB should cover most any use case, and + // if we get providers nearing that we may want to consider a finer-grained + // API to fetch individual resource schemas. + // Note: this option is marked as EXPERIMENTAL in the grpc API. We keep + // this for compatibility, but recent providers all set the max message + // size much higher on the server side, which is the supported method for + // determining payload size. + const maxRecvSize = 64 << 20 + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if resp.Diagnostics.HasErrors() { + return resp + } + + if protoResp.Provider == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) + return resp + } + + resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) + if protoResp.ProviderMeta == nil { + logger.Debug("No provider meta schema returned") + } else { + resp.ProviderMeta = convert.ProtoToProviderSchema(protoResp.ProviderMeta) + } + + for name, res := range protoResp.ResourceSchemas { + resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) + } + + for name, data := range protoResp.DataSourceSchemas { + resp.DataSources[name] = convert.ProtoToProviderSchema(data) + } + + if protoResp.ServerCapabilities != nil { + resp.ServerCapabilities.PlanDestroy = protoResp.ServerCapabilities.PlanDestroy + } + + p.schemas = resp + + return resp +} + +func (p *GRPCProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + logger.Trace("GRPCProvider: ValidateProviderConfig") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + ty := schema.Provider.Block.ImpliedType() + + mp, err := msgpack.Marshal(r.Config, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PrepareProviderConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + config, err := decodeDynamicValue(protoResp.PreparedConfig, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.PreparedConfig = config + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + logger.Trace("GRPCProvider: ValidateResourceConfig") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resourceSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateResourceTypeConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + logger.Trace("GRPCProvider: ValidateDataResourceConfig") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + dataSchema, ok := schema.DataSources[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) + return resp + } + + mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateDataSourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + logger.Trace("GRPCProvider: UpgradeResourceState") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + protoReq := &proto.UpgradeResourceState_Request{ + TypeName: r.TypeName, + Version: int64(r.Version), + RawState: &proto.RawState{ + Json: r.RawStateJSON, + Flatmap: r.RawStateFlatmap, + }, + } + + protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + ty := resSchema.Block.ImpliedType() + resp.UpgradedState = cty.NullVal(ty) + if protoResp.UpgradedState == nil { + return resp + } + + state, err := decodeDynamicValue(protoResp.UpgradedState, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = state + + return resp +} + +func (p *GRPCProvider) ConfigureProvider(r providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + logger.Trace("GRPCProvider: ConfigureProvider") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + var mp []byte + + // we don't have anything to marshal if there's no config + mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.Configure_Request{ + TerraformVersion: r.TerraformVersion, + Config: &proto.DynamicValue{ + Msgpack: mp, + }, + } + + protoResp, err := p.client.Configure(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) Stop() error { + logger.Trace("GRPCProvider: Stop") + + resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) + if err != nil { + return err + } + + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + logger.Trace("GRPCProvider: ReadResource") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type " + r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + + mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadResource_Request{ + TypeName: r.TypeName, + CurrentState: &proto.DynamicValue{Msgpack: mp}, + Private: r.Private, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ReadResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.NewState = state + resp.Private = protoResp.Private + + return resp +} + +func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + logger.Trace("GRPCProvider: PlanResourceChange") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + capabilities := schema.ServerCapabilities + + // If the provider doesn't support planning a destroy operation, we can + // return immediately. + if r.ProposedNewState.IsNull() && !capabilities.PlanDestroy { + resp.PlannedState = r.ProposedNewState + resp.PlannedPrivate = r.PriorPrivate + return resp + } + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PlanResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, + PriorPrivate: r.PriorPrivate, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.PlannedState = state + + for _, p := range protoResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) + } + + resp.PlannedPrivate = protoResp.PlannedPrivate + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + logger.Trace("GRPCProvider: ApplyResourceChange") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ApplyResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + PlannedPrivate: r.PlannedPrivate, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + resp.Private = protoResp.Private + + state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.NewState = state + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + logger.Trace("GRPCProvider: ImportResourceState") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + protoReq := &proto.ImportResourceState_Request{ + TypeName: r.TypeName, + Id: r.ID, + } + + protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + for _, imported := range protoResp.ImportedResources { + resource := providers.ImportedResource{ + TypeName: imported.TypeName, + Private: imported.Private, + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + continue + } + + state, err := decodeDynamicValue(imported.State, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resource.State = state + resp.ImportedResources = append(resp.ImportedResources, resource) + } + + return resp +} + +func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + logger.Trace("GRPCProvider: ReadDataSource") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + dataSchema, ok := schema.DataSources[r.TypeName] + if !ok { + schema.Diagnostics = schema.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) + } + + metaSchema := schema.ProviderMeta + + config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadDataSource_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{ + Msgpack: config, + }, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.State, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.State = state + + return resp +} + +// closing the grpc connection is final, and terraform will call it at the end of every phase. +func (p *GRPCProvider) Close() error { + logger.Trace("GRPCProvider: Close") + + // Make sure to stop the server if we're not running within go-plugin. + if p.TestServer != nil { + p.TestServer.Stop() + } + + // Check this since it's not automatically inserted during plugin creation. + // It's currently only inserted by the command package, because that is + // where the factory is built and is the only point with access to the + // plugin.Client. + if p.PluginClient == nil { + logger.Debug("provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} + +// Decode a DynamicValue from either the JSON or MsgPack encoding. +func decodeDynamicValue(v *proto.DynamicValue, ty cty.Type) (cty.Value, error) { + // always return a valid value + var err error + res := cty.NullVal(ty) + if v == nil { + return res, nil + } + + switch { + case len(v.Msgpack) > 0: + res, err = msgpack.Unmarshal(v.Msgpack, ty) + case len(v.Json) > 0: + res, err = ctyjson.Unmarshal(v.Json, ty) + } + return res, err +} diff --git a/plugin/grpc_provider_test.go b/plugin/grpc_provider_test.go new file mode 100644 index 000000000000..9f02af341a8a --- /dev/null +++ b/plugin/grpc_provider_test.go @@ -0,0 +1,777 @@ +package plugin + +import ( + "bytes" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + mockproto "github.com/hashicorp/terraform/plugin/mock_proto" + proto "github.com/hashicorp/terraform/tfplugin5" +) + +var _ providers.Interface = (*GRPCProvider)(nil) + +func mockProviderClient(t *testing.T) *mockproto.MockProviderClient { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + // we always need a GetSchema method + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(providerProtoSchema(), nil) + + return client +} + +func checkDiags(t *testing.T, d tfdiags.Diagnostics) { + t.Helper() + if d.HasErrors() { + t.Fatal(d.Err()) + } +} + +// checkDiagsHasError ensures error diagnostics are present or fails the test. +func checkDiagsHasError(t *testing.T, d tfdiags.Diagnostics) { + t.Helper() + + if !d.HasErrors() { + t.Fatal("expected error diagnostics") + } +} + +func providerProtoSchema() *proto.GetProviderSchema_Response { + return &proto.GetProviderSchema_Response{ + Provider: &proto.Schema{ + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + ResourceSchemas: map[string]*proto.Schema{ + "resource": &proto.Schema{ + Version: 1, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + }, + DataSourceSchemas: map[string]*proto.Schema{ + "data": &proto.Schema{ + Version: 1, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + }, + } +} + +func TestGRPCProvider_GetSchema(t *testing.T) { + p := &GRPCProvider{ + client: mockProviderClient(t), + } + + resp := p.GetProviderSchema() + checkDiags(t, resp.Diagnostics) +} + +// Ensure that gRPC errors are returned early. +// Reference: https://github.com/hashicorp/terraform/issues/31047 +func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&proto.GetProviderSchema_Response{}, fmt.Errorf("test error")) + + p := &GRPCProvider{ + client: client, + } + + resp := p.GetProviderSchema() + + checkDiagsHasError(t, resp.Diagnostics) +} + +// Ensure that provider error diagnostics are returned early. +// Reference: https://github.com/hashicorp/terraform/issues/31047 +func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&proto.GetProviderSchema_Response{ + Diagnostics: []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_ERROR, + Summary: "error summary", + Detail: "error detail", + }, + }, + // Trigger potential panics + Provider: &proto.Schema{}, + }, nil) + + p := &GRPCProvider{ + client: client, + } + + resp := p.GetProviderSchema() + + checkDiagsHasError(t, resp.Diagnostics) +} + +func TestGRPCProvider_PrepareProviderConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().PrepareProviderConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PrepareProviderConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateProviderConfig(providers.ValidateProviderConfigRequest{Config: cfg}) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_ValidateResourceConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateResourceTypeConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateResourceTypeConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ + TypeName: "resource", + Config: cfg, + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_ValidateDataSourceConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateDataSourceConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateDataSourceConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ + TypeName: "data", + Config: cfg, + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_UpgradeResourceState(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().UpgradeResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.UpgradeResourceState_Response{ + UpgradedState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: "resource", + Version: 0, + RawStateJSON: []byte(`{"old_attr":"bar"}`), + }) + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().UpgradeResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.UpgradeResourceState_Response{ + UpgradedState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: "resource", + Version: 0, + RawStateJSON: []byte(`{"old_attr":"bar"}`), + }) + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_Configure(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().Configure( + gomock.Any(), + gomock.Any(), + ).Return(&proto.Configure_Response{}, nil) + + resp := p.ConfigureProvider(providers.ConfigureProviderRequest{ + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_Stop(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().Stop( + gomock.Any(), + gomock.Any(), + ).Return(&proto.Stop_Response{}, nil) + + err := p.Stop() + if err != nil { + t.Fatal(err) + } +} + +func TestGRPCProvider_ReadResource(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadResourceJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadEmptyJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(``), + }, + }, nil) + + obj := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }) + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: obj, + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.NullVal(obj.Type()) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_PlanResourceChange(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().PlanResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PlanResourceChange_Response{ + PlannedState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + RequiresReplace: []*proto.AttributePath{ + { + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + PlannedPrivate: expectedPrivate, + }, nil) + + resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) + } + + expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` + replace := fmt.Sprintf("%#v", resp.RequiresReplace) + if expectedReplace != replace { + t.Fatalf("expected %q, got %q", expectedReplace, replace) + } + + if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) + } +} + +func TestGRPCProvider_PlanResourceChangeJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().PlanResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PlanResourceChange_Response{ + PlannedState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + RequiresReplace: []*proto.AttributePath{ + { + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + PlannedPrivate: expectedPrivate, + }, nil) + + resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) + } + + expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` + replace := fmt.Sprintf("%#v", resp.RequiresReplace) + if expectedReplace != replace { + t.Fatalf("expected %q, got %q", expectedReplace, replace) + } + + if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) + } +} + +func TestGRPCProvider_ApplyResourceChange(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ApplyResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ApplyResourceChange_Response{ + NewState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + Private: expectedPrivate, + }, nil) + + resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + PlannedPrivate: expectedPrivate, + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } + + if !bytes.Equal(expectedPrivate, resp.Private) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) + } +} +func TestGRPCProvider_ApplyResourceChangeJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ApplyResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ApplyResourceChange_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + Private: expectedPrivate, + }, nil) + + resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + PlannedPrivate: expectedPrivate, + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } + + if !bytes.Equal(expectedPrivate, resp.Private) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) + } +} + +func TestGRPCProvider_ImportResourceState(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ImportResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ImportResourceState_Response{ + ImportedResources: []*proto.ImportResourceState_ImportedResource{ + { + TypeName: "resource", + State: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + Private: expectedPrivate, + }, + }, + }, nil) + + resp := p.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: "resource", + ID: "foo", + }) + + checkDiags(t, resp.Diagnostics) + + expectedResource := providers.ImportedResource{ + TypeName: "resource", + State: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Private: expectedPrivate, + } + + imported := resp.ImportedResources[0] + if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) + } +} +func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ImportResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ImportResourceState_Response{ + ImportedResources: []*proto.ImportResourceState_ImportedResource{ + { + TypeName: "resource", + State: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + Private: expectedPrivate, + }, + }, + }, nil) + + resp := p.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: "resource", + ID: "foo", + }) + + checkDiags(t, resp.Diagnostics) + + expectedResource := providers.ImportedResource{ + TypeName: "resource", + State: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Private: expectedPrivate, + } + + imported := resp.ImportedResources[0] + if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadDataSource(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadDataSource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadDataSource_Response{ + State: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: "data", + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadDataSource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadDataSource_Response{ + State: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: "data", + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) + } +} diff --git a/internal/plugin/grpc_provisioner.go b/plugin/grpc_provisioner.go similarity index 95% rename from internal/plugin/grpc_provisioner.go rename to plugin/grpc_provisioner.go index 0a6ad8e632cc..d73214348813 100644 --- a/internal/plugin/grpc_provisioner.go +++ b/plugin/grpc_provisioner.go @@ -7,10 +7,10 @@ import ( "sync" plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/plugin/convert" - "github.com/hashicorp/terraform/internal/provisioners" - proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/provisioners" + proto "github.com/hashicorp/terraform/tfplugin5" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/msgpack" "google.golang.org/grpc" diff --git a/internal/plugin/grpc_provisioner_test.go b/plugin/grpc_provisioner_test.go similarity index 92% rename from internal/plugin/grpc_provisioner_test.go rename to plugin/grpc_provisioner_test.go index 848c9460fa43..dc128f10eda2 100644 --- a/internal/plugin/grpc_provisioner_test.go +++ b/plugin/grpc_provisioner_test.go @@ -7,12 +7,12 @@ import ( "github.com/golang/mock/gomock" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" - "github.com/hashicorp/terraform/internal/provisioners" - proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/provisioners" + proto "github.com/hashicorp/terraform/tfplugin5" "github.com/zclconf/go-cty/cty" - mockproto "github.com/hashicorp/terraform/internal/plugin/mock_proto" + mockproto "github.com/hashicorp/terraform/plugin/mock_proto" ) var _ provisioners.Interface = (*GRPCProvisioner)(nil) diff --git a/plugin/mock_proto/generate.go b/plugin/mock_proto/generate.go new file mode 100644 index 000000000000..8490530d128c --- /dev/null +++ b/plugin/mock_proto/generate.go @@ -0,0 +1,3 @@ +//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/hashicorp/terraform/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer + +package mock_tfplugin5 diff --git a/plugin/mock_proto/mock.go b/plugin/mock_proto/mock.go new file mode 100644 index 000000000000..b91153789933 --- /dev/null +++ b/plugin/mock_proto/mock.go @@ -0,0 +1,623 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/hashicorp/terraform/tfplugin5 (interfaces: ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer) + +// Package mock_tfplugin5 is a generated GoMock package. +package mock_tfplugin5 + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + tfplugin5 "github.com/hashicorp/terraform/tfplugin5" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" +) + +// MockProviderClient is a mock of ProviderClient interface. +type MockProviderClient struct { + ctrl *gomock.Controller + recorder *MockProviderClientMockRecorder +} + +// MockProviderClientMockRecorder is the mock recorder for MockProviderClient. +type MockProviderClientMockRecorder struct { + mock *MockProviderClient +} + +// NewMockProviderClient creates a new mock instance. +func NewMockProviderClient(ctrl *gomock.Controller) *MockProviderClient { + mock := &MockProviderClient{ctrl: ctrl} + mock.recorder = &MockProviderClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder { + return m.recorder +} + +// ApplyResourceChange mocks base method. +func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin5.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.ApplyResourceChange_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ApplyResourceChange", varargs...) + ret0, _ := ret[0].(*tfplugin5.ApplyResourceChange_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApplyResourceChange indicates an expected call of ApplyResourceChange. +func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyResourceChange", reflect.TypeOf((*MockProviderClient)(nil).ApplyResourceChange), varargs...) +} + +// Configure mocks base method. +func (m *MockProviderClient) Configure(arg0 context.Context, arg1 *tfplugin5.Configure_Request, arg2 ...grpc.CallOption) (*tfplugin5.Configure_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Configure", varargs...) + ret0, _ := ret[0].(*tfplugin5.Configure_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Configure indicates an expected call of Configure. +func (mr *MockProviderClientMockRecorder) Configure(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Configure", reflect.TypeOf((*MockProviderClient)(nil).Configure), varargs...) +} + +// GetSchema mocks base method. +func (m *MockProviderClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProviderSchema_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSchema", varargs...) + ret0, _ := ret[0].(*tfplugin5.GetProviderSchema_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSchema indicates an expected call of GetSchema. +func (mr *MockProviderClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockProviderClient)(nil).GetSchema), varargs...) +} + +// ImportResourceState mocks base method. +func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin5.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.ImportResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ImportResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin5.ImportResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImportResourceState indicates an expected call of ImportResourceState. +func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportResourceState", reflect.TypeOf((*MockProviderClient)(nil).ImportResourceState), varargs...) +} + +// PlanResourceChange mocks base method. +func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin5.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.PlanResourceChange_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PlanResourceChange", varargs...) + ret0, _ := ret[0].(*tfplugin5.PlanResourceChange_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PlanResourceChange indicates an expected call of PlanResourceChange. +func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlanResourceChange", reflect.TypeOf((*MockProviderClient)(nil).PlanResourceChange), varargs...) +} + +// PrepareProviderConfig mocks base method. +func (m *MockProviderClient) PrepareProviderConfig(arg0 context.Context, arg1 *tfplugin5.PrepareProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.PrepareProviderConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PrepareProviderConfig", varargs...) + ret0, _ := ret[0].(*tfplugin5.PrepareProviderConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PrepareProviderConfig indicates an expected call of PrepareProviderConfig. +func (mr *MockProviderClientMockRecorder) PrepareProviderConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareProviderConfig", reflect.TypeOf((*MockProviderClient)(nil).PrepareProviderConfig), varargs...) +} + +// ReadDataSource mocks base method. +func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin5.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadDataSource_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadDataSource", varargs...) + ret0, _ := ret[0].(*tfplugin5.ReadDataSource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadDataSource indicates an expected call of ReadDataSource. +func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDataSource", reflect.TypeOf((*MockProviderClient)(nil).ReadDataSource), varargs...) +} + +// ReadResource mocks base method. +func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin5.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadResource_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadResource", varargs...) + ret0, _ := ret[0].(*tfplugin5.ReadResource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadResource indicates an expected call of ReadResource. +func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadResource", reflect.TypeOf((*MockProviderClient)(nil).ReadResource), varargs...) +} + +// Stop mocks base method. +func (m *MockProviderClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Stop", varargs...) + ret0, _ := ret[0].(*tfplugin5.Stop_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stop indicates an expected call of Stop. +func (mr *MockProviderClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProviderClient)(nil).Stop), varargs...) +} + +// UpgradeResourceState mocks base method. +func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin5.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.UpgradeResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpgradeResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin5.UpgradeResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpgradeResourceState indicates an expected call of UpgradeResourceState. +func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeResourceState", reflect.TypeOf((*MockProviderClient)(nil).UpgradeResourceState), varargs...) +} + +// ValidateDataSourceConfig mocks base method. +func (m *MockProviderClient) ValidateDataSourceConfig(arg0 context.Context, arg1 *tfplugin5.ValidateDataSourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateDataSourceConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateDataSourceConfig", varargs...) + ret0, _ := ret[0].(*tfplugin5.ValidateDataSourceConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateDataSourceConfig indicates an expected call of ValidateDataSourceConfig. +func (mr *MockProviderClientMockRecorder) ValidateDataSourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDataSourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateDataSourceConfig), varargs...) +} + +// ValidateResourceTypeConfig mocks base method. +func (m *MockProviderClient) ValidateResourceTypeConfig(arg0 context.Context, arg1 *tfplugin5.ValidateResourceTypeConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateResourceTypeConfig", varargs...) + ret0, _ := ret[0].(*tfplugin5.ValidateResourceTypeConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateResourceTypeConfig indicates an expected call of ValidateResourceTypeConfig. +func (mr *MockProviderClientMockRecorder) ValidateResourceTypeConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourceTypeConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateResourceTypeConfig), varargs...) +} + +// MockProvisionerClient is a mock of ProvisionerClient interface. +type MockProvisionerClient struct { + ctrl *gomock.Controller + recorder *MockProvisionerClientMockRecorder +} + +// MockProvisionerClientMockRecorder is the mock recorder for MockProvisionerClient. +type MockProvisionerClientMockRecorder struct { + mock *MockProvisionerClient +} + +// NewMockProvisionerClient creates a new mock instance. +func NewMockProvisionerClient(ctrl *gomock.Controller) *MockProvisionerClient { + mock := &MockProvisionerClient{ctrl: ctrl} + mock.recorder = &MockProvisionerClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProvisionerClient) EXPECT() *MockProvisionerClientMockRecorder { + return m.recorder +} + +// GetSchema mocks base method. +func (m *MockProvisionerClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProvisionerSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProvisionerSchema_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSchema", varargs...) + ret0, _ := ret[0].(*tfplugin5.GetProvisionerSchema_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSchema indicates an expected call of GetSchema. +func (mr *MockProvisionerClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockProvisionerClient)(nil).GetSchema), varargs...) +} + +// ProvisionResource mocks base method. +func (m *MockProvisionerClient) ProvisionResource(arg0 context.Context, arg1 *tfplugin5.ProvisionResource_Request, arg2 ...grpc.CallOption) (tfplugin5.Provisioner_ProvisionResourceClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ProvisionResource", varargs...) + ret0, _ := ret[0].(tfplugin5.Provisioner_ProvisionResourceClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProvisionResource indicates an expected call of ProvisionResource. +func (mr *MockProvisionerClientMockRecorder) ProvisionResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionResource", reflect.TypeOf((*MockProvisionerClient)(nil).ProvisionResource), varargs...) +} + +// Stop mocks base method. +func (m *MockProvisionerClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Stop", varargs...) + ret0, _ := ret[0].(*tfplugin5.Stop_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stop indicates an expected call of Stop. +func (mr *MockProvisionerClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProvisionerClient)(nil).Stop), varargs...) +} + +// ValidateProvisionerConfig mocks base method. +func (m *MockProvisionerClient) ValidateProvisionerConfig(arg0 context.Context, arg1 *tfplugin5.ValidateProvisionerConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateProvisionerConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateProvisionerConfig", varargs...) + ret0, _ := ret[0].(*tfplugin5.ValidateProvisionerConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateProvisionerConfig indicates an expected call of ValidateProvisionerConfig. +func (mr *MockProvisionerClientMockRecorder) ValidateProvisionerConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateProvisionerConfig", reflect.TypeOf((*MockProvisionerClient)(nil).ValidateProvisionerConfig), varargs...) +} + +// MockProvisioner_ProvisionResourceClient is a mock of Provisioner_ProvisionResourceClient interface. +type MockProvisioner_ProvisionResourceClient struct { + ctrl *gomock.Controller + recorder *MockProvisioner_ProvisionResourceClientMockRecorder +} + +// MockProvisioner_ProvisionResourceClientMockRecorder is the mock recorder for MockProvisioner_ProvisionResourceClient. +type MockProvisioner_ProvisionResourceClientMockRecorder struct { + mock *MockProvisioner_ProvisionResourceClient +} + +// NewMockProvisioner_ProvisionResourceClient creates a new mock instance. +func NewMockProvisioner_ProvisionResourceClient(ctrl *gomock.Controller) *MockProvisioner_ProvisionResourceClient { + mock := &MockProvisioner_ProvisionResourceClient{ctrl: ctrl} + mock.recorder = &MockProvisioner_ProvisionResourceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProvisioner_ProvisionResourceClient) EXPECT() *MockProvisioner_ProvisionResourceClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) Recv() (*tfplugin5.ProvisionResource_Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*tfplugin5.ProvisionResource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).RecvMsg), arg0) +} + +// SendMsg mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Trailer)) +} + +// MockProvisioner_ProvisionResourceServer is a mock of Provisioner_ProvisionResourceServer interface. +type MockProvisioner_ProvisionResourceServer struct { + ctrl *gomock.Controller + recorder *MockProvisioner_ProvisionResourceServerMockRecorder +} + +// MockProvisioner_ProvisionResourceServerMockRecorder is the mock recorder for MockProvisioner_ProvisionResourceServer. +type MockProvisioner_ProvisionResourceServerMockRecorder struct { + mock *MockProvisioner_ProvisionResourceServer +} + +// NewMockProvisioner_ProvisionResourceServer creates a new mock instance. +func NewMockProvisioner_ProvisionResourceServer(ctrl *gomock.Controller) *MockProvisioner_ProvisionResourceServer { + mock := &MockProvisioner_ProvisionResourceServer{ctrl: ctrl} + mock.recorder = &MockProvisioner_ProvisionResourceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProvisioner_ProvisionResourceServer) EXPECT() *MockProvisioner_ProvisionResourceServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).Context)) +} + +// RecvMsg mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).RecvMsg), arg0) +} + +// Send mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) Send(arg0 *tfplugin5.ProvisionResource_Response) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).Send), arg0) +} + +// SendHeader mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SendMsg), arg0) +} + +// SetHeader mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SetTrailer), arg0) +} diff --git a/internal/plugin/plugin.go b/plugin/plugin.go similarity index 89% rename from internal/plugin/plugin.go rename to plugin/plugin.go index 27df5ba0c4b7..3f962f1131a4 100644 --- a/internal/plugin/plugin.go +++ b/plugin/plugin.go @@ -2,7 +2,7 @@ package plugin import ( "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/plugin6" + "github.com/hashicorp/terraform/plugin6" ) // VersionedPlugins includes both protocol 5 and 6 because this is the function diff --git a/plugin/serve.go b/plugin/serve.go new file mode 100644 index 000000000000..5b76ec29fe12 --- /dev/null +++ b/plugin/serve.go @@ -0,0 +1,75 @@ +package plugin + +import ( + "github.com/hashicorp/go-plugin" + proto "github.com/hashicorp/terraform/tfplugin5" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" + ProvisionerPluginName = "provisioner" + + // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify + // a particular version during their handshake. This is the version used when Terraform 0.10 + // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must + // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and + // 0.11. + DefaultProtocolVersion = 4 +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The ProtocolVersion is the version that must match between TF core + // and TF plugins. This should be bumped whenever a change happens in + // one or the other that makes it so that they can't safely communicate. + // This could be adding a new interface value, it could be how + // helper/schema computes diffs, etc. + ProtocolVersion: DefaultProtocolVersion, + + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type GRPCProviderFunc func() proto.ProviderServer +type GRPCProvisionerFunc func() proto.ProvisionerServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc + GRPCProvisionerFunc GRPCProvisionerFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: pluginSet(opts), + GRPCServer: plugin.DefaultGRPCServer, + }) +} + +func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { + plugins := map[int]plugin.PluginSet{} + + // add the new protocol versions if they're configured + if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil { + plugins[5] = plugin.PluginSet{} + if opts.GRPCProviderFunc != nil { + plugins[5]["provider"] = &GRPCProviderPlugin{ + GRPCProvider: opts.GRPCProviderFunc, + } + } + if opts.GRPCProvisionerFunc != nil { + plugins[5]["provisioner"] = &GRPCProvisionerPlugin{ + GRPCProvisioner: opts.GRPCProvisionerFunc, + } + } + } + return plugins +} diff --git a/plugin/ui_input.go b/plugin/ui_input.go new file mode 100644 index 000000000000..3469e6a96b4f --- /dev/null +++ b/plugin/ui_input.go @@ -0,0 +1,52 @@ +package plugin + +import ( + "context" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" +) + +// UIInput is an implementation of terraform.UIInput that communicates +// over RPC. +type UIInput struct { + Client *rpc.Client +} + +func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { + var resp UIInputInputResponse + err := i.Client.Call("Plugin.Input", opts, &resp) + if err != nil { + return "", err + } + if resp.Error != nil { + err = resp.Error + return "", err + } + + return resp.Value, nil +} + +type UIInputInputResponse struct { + Value string + Error *plugin.BasicError +} + +// UIInputServer is a net/rpc compatible structure for serving +// a UIInputServer. This should not be used directly. +type UIInputServer struct { + UIInput terraform.UIInput +} + +func (s *UIInputServer) Input( + opts *terraform.InputOpts, + reply *UIInputInputResponse) error { + value, err := s.UIInput.Input(context.Background(), opts) + *reply = UIInputInputResponse{ + Value: value, + Error: plugin.NewBasicError(err), + } + + return nil +} diff --git a/plugin/ui_input_test.go b/plugin/ui_input_test.go new file mode 100644 index 000000000000..c6d7036d17ec --- /dev/null +++ b/plugin/ui_input_test.go @@ -0,0 +1,50 @@ +package plugin + +import ( + "context" + "reflect" + "testing" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" +) + +func TestUIInput_impl(t *testing.T) { + var _ terraform.UIInput = new(UIInput) +} + +func TestUIInput_input(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + i := new(terraform.MockUIInput) + i.InputReturnString = "foo" + + err := server.RegisterName("Plugin", &UIInputServer{ + UIInput: i, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + input := &UIInput{Client: client} + + opts := &terraform.InputOpts{ + Id: "foo", + } + + v, err := input.Input(context.Background(), opts) + if !i.InputCalled { + t.Fatal("input should be called") + } + if !reflect.DeepEqual(i.InputOpts, opts) { + t.Fatalf("bad: %#v", i.InputOpts) + } + if err != nil { + t.Fatalf("bad: %#v", err) + } + + if v != "foo" { + t.Fatalf("bad: %#v", v) + } +} diff --git a/plugin/ui_output.go b/plugin/ui_output.go new file mode 100644 index 000000000000..c222b00cde61 --- /dev/null +++ b/plugin/ui_output.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/terraform/terraform" +) + +// UIOutput is an implementatin of terraform.UIOutput that communicates +// over RPC. +type UIOutput struct { + Client *rpc.Client +} + +func (o *UIOutput) Output(v string) { + o.Client.Call("Plugin.Output", v, new(interface{})) +} + +// UIOutputServer is the RPC server for serving UIOutput. +type UIOutputServer struct { + UIOutput terraform.UIOutput +} + +func (s *UIOutputServer) Output( + v string, + reply *interface{}) error { + s.UIOutput.Output(v) + return nil +} diff --git a/internal/plugin/ui_output_test.go b/plugin/ui_output_test.go similarity index 92% rename from internal/plugin/ui_output_test.go rename to plugin/ui_output_test.go index 5d9b8910d5e3..50eadaa02248 100644 --- a/internal/plugin/ui_output_test.go +++ b/plugin/ui_output_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/terraform" ) func TestUIOutput_impl(t *testing.T) { diff --git a/plugin6/convert/diagnostics.go b/plugin6/convert/diagnostics.go new file mode 100644 index 000000000000..1d67bd905888 --- /dev/null +++ b/plugin6/convert/diagnostics.go @@ -0,0 +1,132 @@ +package convert + +import ( + "github.com/hashicorp/terraform/tfdiags" + proto "github.com/hashicorp/terraform/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +// WarnsAndErrorsToProto converts the warnings and errors return by the legacy +// provider to protobuf diagnostics. +func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { + for _, w := range warns { + diags = AppendProtoDiag(diags, w) + } + + for _, e := range errs { + diags = AppendProtoDiag(diags, e) + } + + return diags +} + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. +func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, d := range ds { + var severity tfdiags.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = tfdiags.Error + case proto.Diagnostic_WARNING: + severity = tfdiags.Warning + } + + var newDiag tfdiags.Diagnostic + + // if there's an attribute path, we need to create a AttributeValue diagnostic + if d.Attribute != nil { + path := AttributePathToPath(d.Attribute) + newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) + } else { + newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + } + + diags = diags.Append(newDiag) + } + + return diags +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/plugin6/convert/diagnostics_test.go b/plugin6/convert/diagnostics_test.go new file mode 100644 index 000000000000..afd114f5aedf --- /dev/null +++ b/plugin6/convert/diagnostics_test.go @@ -0,0 +1,367 @@ +package convert + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform/tfdiags" + proto "github.com/hashicorp/terraform/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +var ignoreUnexported = cmpopts.IgnoreUnexported( + proto.Diagnostic{}, + proto.Schema_Block{}, + proto.Schema_NestedBlock{}, + proto.Schema_Attribute{}, +) + +func TestProtoDiagnostics(t *testing.T) { + diags := WarnsAndErrsToProto( + []string{ + "warning 1", + "warning 2", + }, + []error{ + errors.New("error 1"), + errors.New("error 2"), + }, + ) + + expected := []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_WARNING, + Summary: "warning 1", + }, + { + Severity: proto.Diagnostic_WARNING, + Summary: "warning 2", + }, + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + }, + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 2", + }, + } + + if !cmp.Equal(expected, diags, ignoreUnexported) { + t.Fatal(cmp.Diff(expected, diags, ignoreUnexported)) + } +} + +func TestDiagnostics(t *testing.T) { + type diagFlat struct { + Severity tfdiags.Severity + Attr []interface{} + Summary string + Detail string + } + + tests := map[string]struct { + Cons func([]*proto.Diagnostic) []*proto.Diagnostic + Want []diagFlat + }{ + "nil": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return diags + }, + nil, + }, + "error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "simple error", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "simple error", + }, + }, + }, + "detailed error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "simple error", + Detail: "detailed error", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "simple error", + Detail: "detailed error", + }, + }, + }, + "warning": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "simple warning", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "simple warning", + }, + }, + }, + "detailed warning": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "simple warning", + Detail: "detailed warning", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "simple warning", + Detail: "detailed warning", + }, + }, + }, + "multi error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "first error", + }, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "second error", + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "first error", + }, + { + Severity: tfdiags.Error, + Summary: "second error", + }, + }, + }, + "warning and error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "warning", + }, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error", + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "warning", + }, + { + Severity: tfdiags.Error, + Summary: "error", + }, + }, + }, + "attr error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error", + Detail: "error detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attribute_name", + }, + }, + }, + }, + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "error", + Detail: "error detail", + Attr: []interface{}{"attribute_name"}, + }, + }, + }, + "multi attr": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + Detail: "error 1 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 2", + Detail: "error 2 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "warning", + Detail: "warning detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: 1, + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 3", + Detail: "error 3 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: "idx", + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + ) + + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "error 1", + Detail: "error 1 detail", + Attr: []interface{}{"attr"}, + }, + { + Severity: tfdiags.Error, + Summary: "error 2", + Detail: "error 2 detail", + Attr: []interface{}{"attr", "sub"}, + }, + { + Severity: tfdiags.Warning, + Summary: "warning", + Detail: "warning detail", + Attr: []interface{}{"attr", 1, "sub"}, + }, + { + Severity: tfdiags.Error, + Summary: "error 3", + Detail: "error 3 detail", + Attr: []interface{}{"attr", "idx", "sub"}, + }, + }, + }, + } + + flattenTFDiags := func(ds tfdiags.Diagnostics) []diagFlat { + var flat []diagFlat + for _, item := range ds { + desc := item.Description() + + var attr []interface{} + + for _, a := range tfdiags.GetAttribute(item) { + switch step := a.(type) { + case cty.GetAttrStep: + attr = append(attr, step.Name) + case cty.IndexStep: + switch step.Key.Type() { + case cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + attr = append(attr, int(i)) + case cty.String: + attr = append(attr, step.Key.AsString()) + } + } + } + + flat = append(flat, diagFlat{ + Severity: item.Severity(), + Attr: attr, + Summary: desc.Summary, + Detail: desc.Detail, + }) + } + return flat + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + // we take the + tfDiags := ProtoToDiagnostics(tc.Cons(nil)) + + flat := flattenTFDiags(tfDiags) + + if !cmp.Equal(flat, tc.Want, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(flat, tc.Want, typeComparer, valueComparer, equateEmpty)) + } + }) + } +} diff --git a/plugin6/convert/schema.go b/plugin6/convert/schema.go new file mode 100644 index 000000000000..1eaca37befc4 --- /dev/null +++ b/plugin6/convert/schema.go @@ -0,0 +1,297 @@ +package convert + +import ( + "encoding/json" + "reflect" + "sort" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + proto "github.com/hashicorp/terraform/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{ + Description: b.Description, + DescriptionKind: protoStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if a.Type != cty.NilType { + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + attr.Type = ty + } + + if a.NestedType != nil { + attr.NestedType = configschemaObjectToProto(a.NestedType) + } + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoStringKind(k configschema.StringKind) proto.StringKind { + switch k { + default: + return proto.StringKind_PLAIN + case configschema.StringMarkdown: + return proto.StringKind_MARKDOWN + } +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. +func ProtoToProviderSchema(s *proto.Schema) providers.Schema { + return providers.Schema{ + Version: s.Version, + Block: ProtoToConfigSchema(s.Block), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + + Description: b.Description, + DescriptionKind: schemaStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if a.Type != nil { + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + } + + if a.NestedType != nil { + attr.NestedType = protoObjectToConfigSchema(a.NestedType) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaStringKind(k proto.StringKind) configschema.StringKind { + switch k { + default: + return configschema.StringPlain + case proto.StringKind_MARKDOWN: + return configschema.StringMarkdown + } +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +func protoObjectToConfigSchema(b *proto.Schema_Object) *configschema.Object { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_Object_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_Object_LIST: + nesting = configschema.NestingList + case proto.Schema_Object_MAP: + nesting = configschema.NestingMap + case proto.Schema_Object_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + object := &configschema.Object{ + Attributes: make(map[string]*configschema.Attribute), + Nesting: nesting, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if a.Type != nil { + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + } + + if a.NestedType != nil { + attr.NestedType = protoObjectToConfigSchema(a.NestedType) + } + + object.Attributes[a.Name] = attr + } + + return object +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} + +func configschemaObjectToProto(b *configschema.Object) *proto.Schema_Object { + var nesting proto.Schema_Object_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_Object_SINGLE + case configschema.NestingList: + nesting = proto.Schema_Object_LIST + case configschema.NestingSet: + nesting = proto.Schema_Object_SET + case configschema.NestingMap: + nesting = proto.Schema_Object_MAP + default: + nesting = proto.Schema_Object_INVALID + } + + attributes := make([]*proto.Schema_Attribute, 0, len(b.Attributes)) + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if a.Type != cty.NilType { + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + attr.Type = ty + } + + if a.NestedType != nil { + attr.NestedType = configschemaObjectToProto(a.NestedType) + } + + attributes = append(attributes, attr) + } + + return &proto.Schema_Object{ + Attributes: attributes, + Nesting: nesting, + } +} diff --git a/plugin6/convert/schema_test.go b/plugin6/convert/schema_test.go new file mode 100644 index 000000000000..f132995797ba --- /dev/null +++ b/plugin6/convert/schema_test.go @@ -0,0 +1,566 @@ +package convert + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform/configs/configschema" + proto "github.com/hashicorp/terraform/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +var ( + equateEmpty = cmpopts.EquateEmpty() + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + +// Test that we can convert configschema to protobuf types and back again. +func TestConvertSchemaBlocks(t *testing.T) { + tests := map[string]struct { + Block *proto.Schema_Block + Want *configschema.Block + }{ + "attributes": { + &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + { + Name: "nested_type", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_SINGLE, + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + Required: true, + }, + { + Name: "deeply_nested_type", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_SINGLE, + Attributes: []*proto.Schema_Attribute{ + { + Name: "first_level", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_SINGLE, + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + Computed: true, + }, + }, + }, + Required: true, + }, + { + Name: "nested_list", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_LIST, + Attributes: []*proto.Schema_Attribute{ + { + Name: "required", + Type: []byte(`"string"`), + Computed: true, + }, + }, + }, + Required: true, + }, + { + Name: "nested_set", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_SET, + Attributes: []*proto.Schema_Attribute{ + { + Name: "required", + Type: []byte(`"string"`), + Computed: true, + }, + }, + }, + Required: true, + }, + { + Name: "nested_map", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_MAP, + Attributes: []*proto.Schema_Attribute{ + { + Name: "required", + Type: []byte(`"string"`), + Computed: true, + }, + }, + }, + Required: true, + }, + }, + }, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + "nested_type": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + Nesting: configschema.NestingSingle, + }, + Required: true, + }, + "deeply_nested_type": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "first_level": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + }, + Computed: true, + }, + }, + Nesting: configschema.NestingSingle, + }, + Required: true, + }, + "nested_list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "required": { + Type: cty.String, + Computed: true, + }, + }, + }, + Required: true, + }, + "nested_map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "required": { + Type: cty.String, + Computed: true, + }, + }, + }, + Required: true, + }, + "nested_set": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "required": { + Type: cty.String, + Computed: true, + }, + }, + }, + Required: true, + }, + }, + }, + }, + "blocks": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "map", + Nesting: proto.Schema_NestedBlock_MAP, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "foo", + Type: []byte(`"dynamic"`), + Required: true, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + }, + "map": &configschema.NestedBlock{ + Nesting: configschema.NestingMap, + }, + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "deep block nesting": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + converted := ProtoToConfigSchema(tc.Block) + if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty)) + } + }) + } +} + +// Test that we can convert configschema to protobuf types and back again. +func TestConvertProtoSchemaBlocks(t *testing.T) { + tests := map[string]struct { + Want *proto.Schema_Block + Block *configschema.Block + }{ + "attributes": { + &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + }, + }, + "blocks": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "map", + Nesting: proto.Schema_NestedBlock_MAP, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "foo", + Type: []byte(`"dynamic"`), + Required: true, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + }, + "map": &configschema.NestedBlock{ + Nesting: configschema.NestingMap, + }, + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "deep block nesting": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + converted := ConfigSchemaToProto(tc.Block) + if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported)) + } + }) + } +} diff --git a/internal/plugin6/doc.go b/plugin6/doc.go similarity index 100% rename from internal/plugin6/doc.go rename to plugin6/doc.go diff --git a/plugin6/grpc_error.go b/plugin6/grpc_error.go new file mode 100644 index 000000000000..4781d8216d04 --- /dev/null +++ b/plugin6/grpc_error.go @@ -0,0 +1,74 @@ +package plugin6 + +import ( + "fmt" + "path" + "runtime" + + "github.com/hashicorp/terraform/tfdiags" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcErr extracts some known error types and formats them into better +// representations for core. This must only be called from plugin methods. +// Since we don't use RPC status errors for the plugin protocol, these do not +// contain any useful details, and we can return some text that at least +// indicates the plugin call and possible error condition. +func grpcErr(err error) (diags tfdiags.Diagnostics) { + if err == nil { + return + } + + // extract the method name from the caller. + pc, _, _, ok := runtime.Caller(1) + if !ok { + logger.Error("unknown grpc call", "error", err) + return diags.Append(err) + } + + f := runtime.FuncForPC(pc) + + // Function names will contain the full import path. Take the last + // segment, which will let users know which method was being called. + _, requestName := path.Split(f.Name()) + + // Here we can at least correlate the error in the logs to a particular binary. + logger.Error(requestName, "error", err) + + // TODO: while this expands the error codes into somewhat better messages, + // this still does not easily link the error to an actual user-recognizable + // plugin. The grpc plugin does not know its configured name, and the + // errors are in a list of diagnostics, making it hard for the caller to + // annotate the returned errors. + switch status.Code(err) { + case codes.Unavailable: + // This case is when the plugin has stopped running for some reason, + // and is usually the result of a crash. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Plugin did not respond", + fmt.Sprintf("The plugin encountered an error, and failed to respond to the %s call. "+ + "The plugin logs may contain more details.", requestName), + )) + case codes.Canceled: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Request cancelled", + fmt.Sprintf("The %s request was cancelled.", requestName), + )) + case codes.Unimplemented: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported plugin method", + fmt.Sprintf("The %s method is not supported by this plugin.", requestName), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Plugin error", + fmt.Sprintf("The plugin returned an unexpected error from %s: %v", requestName, err), + )) + } + return +} diff --git a/plugin6/grpc_provider.go b/plugin6/grpc_provider.go new file mode 100644 index 000000000000..bc5257e90e0c --- /dev/null +++ b/plugin6/grpc_provider.go @@ -0,0 +1,693 @@ +package plugin6 + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/plugin6/convert" + "github.com/hashicorp/terraform/providers" + proto6 "github.com/hashicorp/terraform/tfplugin6" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +var logger = logging.HCLogger() + +// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. +type GRPCProviderPlugin struct { + plugin.Plugin + GRPCProvider func() proto6.ProviderServer +} + +func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvider{ + client: proto6.NewProviderClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto6.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} + +// GRPCProvider handles the client, or core side of the plugin rpc connection. +// The GRPCProvider methods are mostly a translation layer between the +// terraform providers types and the grpc proto types, directly converting +// between the two. +type GRPCProvider struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + // TestServer contains a grpc.Server to close when the GRPCProvider is being + // used in an end to end test of a provider. + TestServer *grpc.Server + + // Proto client use to make the grpc service calls. + client proto6.ProviderClient + + // this context is created by the plugin package, and is canceled when the + // plugin process ends. + ctx context.Context + + // schema stores the schema for this provider. This is used to properly + // serialize the state for requests. + mu sync.Mutex + schemas providers.GetProviderSchemaResponse +} + +func New(client proto6.ProviderClient, ctx context.Context) GRPCProvider { + return GRPCProvider{ + client: client, + ctx: ctx, + } +} + +// getSchema is used internally to get the cached provider schema. +func (p *GRPCProvider) getSchema() providers.GetProviderSchemaResponse { + p.mu.Lock() + // unlock inline in case GetProviderSchema needs to be called + if p.schemas.Provider.Block != nil { + p.mu.Unlock() + return p.schemas + } + p.mu.Unlock() + + return p.GetProviderSchema() +} + +func (p *GRPCProvider) GetProviderSchema() (resp providers.GetProviderSchemaResponse) { + logger.Trace("GRPCProvider.v6: GetProviderSchema") + p.mu.Lock() + defer p.mu.Unlock() + + if p.schemas.Provider.Block != nil { + return p.schemas + } + + resp.ResourceTypes = make(map[string]providers.Schema) + resp.DataSources = make(map[string]providers.Schema) + + // Some providers may generate quite large schemas, and the internal default + // grpc response size limit is 4MB. 64MB should cover most any use case, and + // if we get providers nearing that we may want to consider a finer-grained + // API to fetch individual resource schemas. + // Note: this option is marked as EXPERIMENTAL in the grpc API. We keep + // this for compatibility, but recent providers all set the max message + // size much higher on the server side, which is the supported method for + // determining payload size. + const maxRecvSize = 64 << 20 + protoResp, err := p.client.GetProviderSchema(p.ctx, new(proto6.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if resp.Diagnostics.HasErrors() { + return resp + } + + if protoResp.Provider == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) + return resp + } + + resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) + if protoResp.ProviderMeta == nil { + logger.Debug("No provider meta schema returned") + } else { + resp.ProviderMeta = convert.ProtoToProviderSchema(protoResp.ProviderMeta) + } + + for name, res := range protoResp.ResourceSchemas { + resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) + } + + for name, data := range protoResp.DataSourceSchemas { + resp.DataSources[name] = convert.ProtoToProviderSchema(data) + } + + if protoResp.ServerCapabilities != nil { + resp.ServerCapabilities.PlanDestroy = protoResp.ServerCapabilities.PlanDestroy + } + + p.schemas = resp + + return resp +} + +func (p *GRPCProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + logger.Trace("GRPCProvider.v6: ValidateProviderConfig") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + ty := schema.Provider.Block.ImpliedType() + + mp, err := msgpack.Marshal(r.Config, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ValidateProviderConfig_Request{ + Config: &proto6.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateProviderConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + logger.Trace("GRPCProvider.v6: ValidateResourceConfig") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resourceSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ValidateResourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto6.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateResourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + logger.Trace("GRPCProvider.v6: ValidateDataResourceConfig") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + dataSchema, ok := schema.DataSources[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) + return resp + } + + mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ValidateDataResourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto6.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateDataResourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + logger.Trace("GRPCProvider.v6: UpgradeResourceState") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + protoReq := &proto6.UpgradeResourceState_Request{ + TypeName: r.TypeName, + Version: int64(r.Version), + RawState: &proto6.RawState{ + Json: r.RawStateJSON, + Flatmap: r.RawStateFlatmap, + }, + } + + protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + ty := resSchema.Block.ImpliedType() + resp.UpgradedState = cty.NullVal(ty) + if protoResp.UpgradedState == nil { + return resp + } + + state, err := decodeDynamicValue(protoResp.UpgradedState, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = state + + return resp +} + +func (p *GRPCProvider) ConfigureProvider(r providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + logger.Trace("GRPCProvider.v6: ConfigureProvider") + + schema := p.getSchema() + + var mp []byte + + // we don't have anything to marshal if there's no config + mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ConfigureProvider_Request{ + TerraformVersion: r.TerraformVersion, + Config: &proto6.DynamicValue{ + Msgpack: mp, + }, + } + + protoResp, err := p.client.ConfigureProvider(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) Stop() error { + logger.Trace("GRPCProvider.v6: Stop") + + resp, err := p.client.StopProvider(p.ctx, new(proto6.StopProvider_Request)) + if err != nil { + return err + } + + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + logger.Trace("GRPCProvider.v6: ReadResource") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type " + r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + + mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ReadResource_Request{ + TypeName: r.TypeName, + CurrentState: &proto6.DynamicValue{Msgpack: mp}, + Private: r.Private, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ReadResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.NewState = state + resp.Private = protoResp.Private + + return resp +} + +func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + logger.Trace("GRPCProvider.v6: PlanResourceChange") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + capabilities := schema.ServerCapabilities + + // If the provider doesn't support planning a destroy operation, we can + // return immediately. + if r.ProposedNewState.IsNull() && !capabilities.PlanDestroy { + resp.PlannedState = r.ProposedNewState + resp.PlannedPrivate = r.PriorPrivate + return resp + } + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.PlanResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto6.DynamicValue{Msgpack: priorMP}, + Config: &proto6.DynamicValue{Msgpack: configMP}, + ProposedNewState: &proto6.DynamicValue{Msgpack: propMP}, + PriorPrivate: r.PriorPrivate, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.PlannedState = state + + for _, p := range protoResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) + } + + resp.PlannedPrivate = protoResp.PlannedPrivate + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + logger.Trace("GRPCProvider.v6: ApplyResourceChange") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ApplyResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto6.DynamicValue{Msgpack: priorMP}, + PlannedState: &proto6.DynamicValue{Msgpack: plannedMP}, + Config: &proto6.DynamicValue{Msgpack: configMP}, + PlannedPrivate: r.PlannedPrivate, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + resp.Private = protoResp.Private + + state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.NewState = state + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + logger.Trace("GRPCProvider.v6: ImportResourceState") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + protoReq := &proto6.ImportResourceState_Request{ + TypeName: r.TypeName, + Id: r.ID, + } + + protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + for _, imported := range protoResp.ImportedResources { + resource := providers.ImportedResource{ + TypeName: imported.TypeName, + Private: imported.Private, + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + continue + } + + state, err := decodeDynamicValue(imported.State, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resource.State = state + resp.ImportedResources = append(resp.ImportedResources, resource) + } + + return resp +} + +func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + logger.Trace("GRPCProvider.v6: ReadDataSource") + + schema := p.getSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + dataSchema, ok := schema.DataSources[r.TypeName] + if !ok { + schema.Diagnostics = schema.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) + } + + metaSchema := schema.ProviderMeta + + config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ReadDataSource_Request{ + TypeName: r.TypeName, + Config: &proto6.DynamicValue{ + Msgpack: config, + }, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.State, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.State = state + + return resp +} + +// closing the grpc connection is final, and terraform will call it at the end of every phase. +func (p *GRPCProvider) Close() error { + logger.Trace("GRPCProvider.v6: Close") + + // Make sure to stop the server if we're not running within go-plugin. + if p.TestServer != nil { + p.TestServer.Stop() + } + + // Check this since it's not automatically inserted during plugin creation. + // It's currently only inserted by the command package, because that is + // where the factory is built and is the only point with access to the + // plugin.Client. + if p.PluginClient == nil { + logger.Debug("provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} + +// Decode a DynamicValue from either the JSON or MsgPack encoding. +func decodeDynamicValue(v *proto6.DynamicValue, ty cty.Type) (cty.Value, error) { + // always return a valid value + var err error + res := cty.NullVal(ty) + if v == nil { + return res, nil + } + + switch { + case len(v.Msgpack) > 0: + res, err = msgpack.Unmarshal(v.Msgpack, ty) + case len(v.Json) > 0: + res, err = ctyjson.Unmarshal(v.Json, ty) + } + return res, err +} diff --git a/plugin6/grpc_provider_test.go b/plugin6/grpc_provider_test.go new file mode 100644 index 000000000000..e0275f62e150 --- /dev/null +++ b/plugin6/grpc_provider_test.go @@ -0,0 +1,784 @@ +package plugin6 + +import ( + "bytes" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + mockproto "github.com/hashicorp/terraform/plugin6/mock_proto" + proto "github.com/hashicorp/terraform/tfplugin6" +) + +var _ providers.Interface = (*GRPCProvider)(nil) + +var ( + equateEmpty = cmpopts.EquateEmpty() + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + +func mockProviderClient(t *testing.T) *mockproto.MockProviderClient { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + // we always need a GetSchema method + client.EXPECT().GetProviderSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(providerProtoSchema(), nil) + + return client +} + +func checkDiags(t *testing.T, d tfdiags.Diagnostics) { + t.Helper() + if d.HasErrors() { + t.Fatal(d.Err()) + } +} + +// checkDiagsHasError ensures error diagnostics are present or fails the test. +func checkDiagsHasError(t *testing.T, d tfdiags.Diagnostics) { + t.Helper() + + if !d.HasErrors() { + t.Fatal("expected error diagnostics") + } +} + +func providerProtoSchema() *proto.GetProviderSchema_Response { + return &proto.GetProviderSchema_Response{ + Provider: &proto.Schema{ + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + ResourceSchemas: map[string]*proto.Schema{ + "resource": { + Version: 1, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + }, + DataSourceSchemas: map[string]*proto.Schema{ + "data": { + Version: 1, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + }, + } +} + +func TestGRPCProvider_GetSchema(t *testing.T) { + p := &GRPCProvider{ + client: mockProviderClient(t), + } + + resp := p.GetProviderSchema() + checkDiags(t, resp.Diagnostics) +} + +// Ensure that gRPC errors are returned early. +// Reference: https://github.com/hashicorp/terraform/issues/31047 +func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + client.EXPECT().GetProviderSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&proto.GetProviderSchema_Response{}, fmt.Errorf("test error")) + + p := &GRPCProvider{ + client: client, + } + + resp := p.GetProviderSchema() + + checkDiagsHasError(t, resp.Diagnostics) +} + +// Ensure that provider error diagnostics are returned early. +// Reference: https://github.com/hashicorp/terraform/issues/31047 +func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + client.EXPECT().GetProviderSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&proto.GetProviderSchema_Response{ + Diagnostics: []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_ERROR, + Summary: "error summary", + Detail: "error detail", + }, + }, + // Trigger potential panics + Provider: &proto.Schema{}, + }, nil) + + p := &GRPCProvider{ + client: client, + } + + resp := p.GetProviderSchema() + + checkDiagsHasError(t, resp.Diagnostics) +} + +func TestGRPCProvider_PrepareProviderConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateProviderConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateProviderConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateProviderConfig(providers.ValidateProviderConfigRequest{Config: cfg}) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_ValidateResourceConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateResourceConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateResourceConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ + TypeName: "resource", + Config: cfg, + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_ValidateDataResourceConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateDataResourceConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateDataResourceConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ + TypeName: "data", + Config: cfg, + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_UpgradeResourceState(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().UpgradeResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.UpgradeResourceState_Response{ + UpgradedState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: "resource", + Version: 0, + RawStateJSON: []byte(`{"old_attr":"bar"}`), + }) + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().UpgradeResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.UpgradeResourceState_Response{ + UpgradedState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: "resource", + Version: 0, + RawStateJSON: []byte(`{"old_attr":"bar"}`), + }) + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_Configure(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ConfigureProvider( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ConfigureProvider_Response{}, nil) + + resp := p.ConfigureProvider(providers.ConfigureProviderRequest{ + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_Stop(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().StopProvider( + gomock.Any(), + gomock.Any(), + ).Return(&proto.StopProvider_Response{}, nil) + + err := p.Stop() + if err != nil { + t.Fatal(err) + } +} + +func TestGRPCProvider_ReadResource(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadResourceJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadEmptyJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(``), + }, + }, nil) + + obj := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }) + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: obj, + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.NullVal(obj.Type()) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_PlanResourceChange(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().PlanResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PlanResourceChange_Response{ + PlannedState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + RequiresReplace: []*proto.AttributePath{ + { + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + PlannedPrivate: expectedPrivate, + }, nil) + + resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) + } + + expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` + replace := fmt.Sprintf("%#v", resp.RequiresReplace) + if expectedReplace != replace { + t.Fatalf("expected %q, got %q", expectedReplace, replace) + } + + if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) + } +} + +func TestGRPCProvider_PlanResourceChangeJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().PlanResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PlanResourceChange_Response{ + PlannedState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + RequiresReplace: []*proto.AttributePath{ + { + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + PlannedPrivate: expectedPrivate, + }, nil) + + resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) + } + + expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` + replace := fmt.Sprintf("%#v", resp.RequiresReplace) + if expectedReplace != replace { + t.Fatalf("expected %q, got %q", expectedReplace, replace) + } + + if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) + } +} + +func TestGRPCProvider_ApplyResourceChange(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ApplyResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ApplyResourceChange_Response{ + NewState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + Private: expectedPrivate, + }, nil) + + resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + PlannedPrivate: expectedPrivate, + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } + + if !bytes.Equal(expectedPrivate, resp.Private) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) + } +} +func TestGRPCProvider_ApplyResourceChangeJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ApplyResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ApplyResourceChange_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + Private: expectedPrivate, + }, nil) + + resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + PlannedPrivate: expectedPrivate, + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } + + if !bytes.Equal(expectedPrivate, resp.Private) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) + } +} + +func TestGRPCProvider_ImportResourceState(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ImportResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ImportResourceState_Response{ + ImportedResources: []*proto.ImportResourceState_ImportedResource{ + { + TypeName: "resource", + State: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + Private: expectedPrivate, + }, + }, + }, nil) + + resp := p.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: "resource", + ID: "foo", + }) + + checkDiags(t, resp.Diagnostics) + + expectedResource := providers.ImportedResource{ + TypeName: "resource", + State: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Private: expectedPrivate, + } + + imported := resp.ImportedResources[0] + if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) + } +} +func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ImportResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ImportResourceState_Response{ + ImportedResources: []*proto.ImportResourceState_ImportedResource{ + { + TypeName: "resource", + State: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + Private: expectedPrivate, + }, + }, + }, nil) + + resp := p.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: "resource", + ID: "foo", + }) + + checkDiags(t, resp.Diagnostics) + + expectedResource := providers.ImportedResource{ + TypeName: "resource", + State: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Private: expectedPrivate, + } + + imported := resp.ImportedResources[0] + if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadDataSource(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadDataSource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadDataSource_Response{ + State: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: "data", + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadDataSource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadDataSource_Response{ + State: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: "data", + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) + } +} diff --git a/plugin6/mock_proto/generate.go b/plugin6/mock_proto/generate.go new file mode 100644 index 000000000000..27c2f3c1dd0b --- /dev/null +++ b/plugin6/mock_proto/generate.go @@ -0,0 +1,3 @@ +//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/hashicorp/terraform/tfplugin6 ProviderClient + +package mock_tfplugin6 diff --git a/plugin6/mock_proto/mock.go b/plugin6/mock_proto/mock.go new file mode 100644 index 000000000000..b6b6e50739c0 --- /dev/null +++ b/plugin6/mock_proto/mock.go @@ -0,0 +1,277 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/hashicorp/terraform/tfplugin6 (interfaces: ProviderClient) + +// Package mock_tfplugin6 is a generated GoMock package. +package mock_tfplugin6 + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + tfplugin6 "github.com/hashicorp/terraform/tfplugin6" + grpc "google.golang.org/grpc" +) + +// MockProviderClient is a mock of ProviderClient interface. +type MockProviderClient struct { + ctrl *gomock.Controller + recorder *MockProviderClientMockRecorder +} + +// MockProviderClientMockRecorder is the mock recorder for MockProviderClient. +type MockProviderClientMockRecorder struct { + mock *MockProviderClient +} + +// NewMockProviderClient creates a new mock instance. +func NewMockProviderClient(ctrl *gomock.Controller) *MockProviderClient { + mock := &MockProviderClient{ctrl: ctrl} + mock.recorder = &MockProviderClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder { + return m.recorder +} + +// ApplyResourceChange mocks base method. +func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin6.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin6.ApplyResourceChange_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ApplyResourceChange", varargs...) + ret0, _ := ret[0].(*tfplugin6.ApplyResourceChange_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApplyResourceChange indicates an expected call of ApplyResourceChange. +func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyResourceChange", reflect.TypeOf((*MockProviderClient)(nil).ApplyResourceChange), varargs...) +} + +// ConfigureProvider mocks base method. +func (m *MockProviderClient) ConfigureProvider(arg0 context.Context, arg1 *tfplugin6.ConfigureProvider_Request, arg2 ...grpc.CallOption) (*tfplugin6.ConfigureProvider_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ConfigureProvider", varargs...) + ret0, _ := ret[0].(*tfplugin6.ConfigureProvider_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ConfigureProvider indicates an expected call of ConfigureProvider. +func (mr *MockProviderClientMockRecorder) ConfigureProvider(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigureProvider", reflect.TypeOf((*MockProviderClient)(nil).ConfigureProvider), varargs...) +} + +// GetProviderSchema mocks base method. +func (m *MockProviderClient) GetProviderSchema(arg0 context.Context, arg1 *tfplugin6.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin6.GetProviderSchema_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetProviderSchema", varargs...) + ret0, _ := ret[0].(*tfplugin6.GetProviderSchema_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProviderSchema indicates an expected call of GetProviderSchema. +func (mr *MockProviderClientMockRecorder) GetProviderSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProviderSchema", reflect.TypeOf((*MockProviderClient)(nil).GetProviderSchema), varargs...) +} + +// ImportResourceState mocks base method. +func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin6.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.ImportResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ImportResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin6.ImportResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImportResourceState indicates an expected call of ImportResourceState. +func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportResourceState", reflect.TypeOf((*MockProviderClient)(nil).ImportResourceState), varargs...) +} + +// PlanResourceChange mocks base method. +func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin6.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin6.PlanResourceChange_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PlanResourceChange", varargs...) + ret0, _ := ret[0].(*tfplugin6.PlanResourceChange_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PlanResourceChange indicates an expected call of PlanResourceChange. +func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlanResourceChange", reflect.TypeOf((*MockProviderClient)(nil).PlanResourceChange), varargs...) +} + +// ReadDataSource mocks base method. +func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin6.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin6.ReadDataSource_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadDataSource", varargs...) + ret0, _ := ret[0].(*tfplugin6.ReadDataSource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadDataSource indicates an expected call of ReadDataSource. +func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDataSource", reflect.TypeOf((*MockProviderClient)(nil).ReadDataSource), varargs...) +} + +// ReadResource mocks base method. +func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin6.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin6.ReadResource_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadResource", varargs...) + ret0, _ := ret[0].(*tfplugin6.ReadResource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadResource indicates an expected call of ReadResource. +func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadResource", reflect.TypeOf((*MockProviderClient)(nil).ReadResource), varargs...) +} + +// StopProvider mocks base method. +func (m *MockProviderClient) StopProvider(arg0 context.Context, arg1 *tfplugin6.StopProvider_Request, arg2 ...grpc.CallOption) (*tfplugin6.StopProvider_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "StopProvider", varargs...) + ret0, _ := ret[0].(*tfplugin6.StopProvider_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StopProvider indicates an expected call of StopProvider. +func (mr *MockProviderClientMockRecorder) StopProvider(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopProvider", reflect.TypeOf((*MockProviderClient)(nil).StopProvider), varargs...) +} + +// UpgradeResourceState mocks base method. +func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin6.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.UpgradeResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpgradeResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin6.UpgradeResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpgradeResourceState indicates an expected call of UpgradeResourceState. +func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeResourceState", reflect.TypeOf((*MockProviderClient)(nil).UpgradeResourceState), varargs...) +} + +// ValidateDataResourceConfig mocks base method. +func (m *MockProviderClient) ValidateDataResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateDataResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateDataResourceConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateDataResourceConfig", varargs...) + ret0, _ := ret[0].(*tfplugin6.ValidateDataResourceConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateDataResourceConfig indicates an expected call of ValidateDataResourceConfig. +func (mr *MockProviderClientMockRecorder) ValidateDataResourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDataResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateDataResourceConfig), varargs...) +} + +// ValidateProviderConfig mocks base method. +func (m *MockProviderClient) ValidateProviderConfig(arg0 context.Context, arg1 *tfplugin6.ValidateProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateProviderConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateProviderConfig", varargs...) + ret0, _ := ret[0].(*tfplugin6.ValidateProviderConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateProviderConfig indicates an expected call of ValidateProviderConfig. +func (mr *MockProviderClientMockRecorder) ValidateProviderConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateProviderConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateProviderConfig), varargs...) +} + +// ValidateResourceConfig mocks base method. +func (m *MockProviderClient) ValidateResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateResourceConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateResourceConfig", varargs...) + ret0, _ := ret[0].(*tfplugin6.ValidateResourceConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateResourceConfig indicates an expected call of ValidateResourceConfig. +func (mr *MockProviderClientMockRecorder) ValidateResourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateResourceConfig), varargs...) +} diff --git a/plugin6/serve.go b/plugin6/serve.go new file mode 100644 index 000000000000..881c52237bb0 --- /dev/null +++ b/plugin6/serve.go @@ -0,0 +1,63 @@ +package plugin6 + +import ( + "github.com/hashicorp/go-plugin" + proto "github.com/hashicorp/terraform/tfplugin6" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" + + // DefaultProtocolVersion is the protocol version assumed for legacy clients + // that don't specify a particular version during their handshake. Since we + // explicitly set VersionedPlugins in Serve, this number does not need to + // change with the protocol version and can effectively stay 4 forever + // (unless we need the "biggest hammer" approach to break all provider + // compatibility). + DefaultProtocolVersion = 4 +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The ProtocolVersion is the version that must match between TF core + // and TF plugins. + ProtocolVersion: DefaultProtocolVersion, + + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type GRPCProviderFunc func() proto.ProviderServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + GRPCProviderFunc GRPCProviderFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: pluginSet(opts), + GRPCServer: plugin.DefaultGRPCServer, + }) +} + +func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { + plugins := map[int]plugin.PluginSet{} + + // add the new protocol versions if they're configured + if opts.GRPCProviderFunc != nil { + plugins[6] = plugin.PluginSet{} + if opts.GRPCProviderFunc != nil { + plugins[6]["provider"] = &GRPCProviderPlugin{ + GRPCProvider: opts.GRPCProviderFunc, + } + } + } + return plugins +} diff --git a/plugins.go b/plugins.go index be576e81ac51..47ae2e4f61d9 100644 --- a/plugins.go +++ b/plugins.go @@ -6,7 +6,7 @@ import ( "path/filepath" "runtime" - "github.com/hashicorp/terraform/internal/command/cliconfig" + "github.com/hashicorp/terraform/command/cliconfig" ) // globalPluginDirs returns directories that should be searched for diff --git a/provider-simple-v6/main/main.go b/provider-simple-v6/main/main.go new file mode 100644 index 000000000000..107846b7064e --- /dev/null +++ b/provider-simple-v6/main/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "github.com/hashicorp/terraform/grpcwrap" + plugin "github.com/hashicorp/terraform/plugin6" + simple "github.com/hashicorp/terraform/provider-simple-v6" + "github.com/hashicorp/terraform/tfplugin6" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + GRPCProviderFunc: func() tfplugin6.ProviderServer { + return grpcwrap.Provider6(simple.Provider()) + }, + }) +} diff --git a/provider-simple-v6/provider.go b/provider-simple-v6/provider.go new file mode 100644 index 000000000000..f6b1db61c645 --- /dev/null +++ b/provider-simple-v6/provider.go @@ -0,0 +1,147 @@ +// simple provider a minimal provider implementation for testing +package simple + +import ( + "errors" + "fmt" + "time" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +type simple struct { + schema providers.GetProviderSchemaResponse +} + +func Provider() providers.Interface { + simpleResource := providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Computed: true, + Type: cty.String, + }, + "value": { + Optional: true, + Type: cty.String, + }, + }, + }, + } + + return simple{ + schema: providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: nil, + }, + ResourceTypes: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + DataSources: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + ServerCapabilities: providers.ServerCapabilities{ + PlanDestroy: true, + }, + }, + } +} + +func (s simple) GetProviderSchema() providers.GetProviderSchemaResponse { + return s.schema +} + +func (s simple) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + return resp +} + +func (s simple) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + return resp +} + +func (s simple) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + return resp +} + +func (p simple) UpgradeResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) + resp.Diagnostics = resp.Diagnostics.Append(err) + resp.UpgradedState = val + return resp +} + +func (s simple) ConfigureProvider(providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + return resp +} + +func (s simple) Stop() error { + return nil +} + +func (s simple) ReadResource(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + // just return the same state we received + resp.NewState = req.PriorState + return resp +} + +func (s simple) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + if req.ProposedNewState.IsNull() { + // destroy op + resp.PlannedState = req.ProposedNewState + + // signal that this resource was properly planned for destruction, + // verifying that the schema capabilities with PlanDestroy took effect. + resp.PlannedPrivate = []byte("destroy planned") + return resp + } + + m := req.ProposedNewState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(m) + return resp +} + +func (s simple) ApplyResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.PlannedState.IsNull() { + // make sure this was transferred from the plan action + if string(req.PlannedPrivate) != "destroy planned" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("resource not planned for destroy, private data %q", req.PlannedPrivate)) + } + + resp.NewState = req.PlannedState + return resp + } + + m := req.PlannedState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.StringVal(time.Now().String()) + } + resp.NewState = cty.ObjectVal(m) + + return resp +} + +func (s simple) ImportResourceState(providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("unsupported")) + return resp +} + +func (s simple) ReadDataSource(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("static_id") + resp.State = cty.ObjectVal(m) + return resp +} + +func (s simple) Close() error { + return nil +} diff --git a/provider-simple/main/main.go b/provider-simple/main/main.go new file mode 100644 index 000000000000..d4c40b04af33 --- /dev/null +++ b/provider-simple/main/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "github.com/hashicorp/terraform/grpcwrap" + "github.com/hashicorp/terraform/plugin" + simple "github.com/hashicorp/terraform/provider-simple" + "github.com/hashicorp/terraform/tfplugin5" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + GRPCProviderFunc: func() tfplugin5.ProviderServer { + return grpcwrap.Provider(simple.Provider()) + }, + }) +} diff --git a/provider-simple/provider.go b/provider-simple/provider.go new file mode 100644 index 000000000000..83b9d8cb3cd0 --- /dev/null +++ b/provider-simple/provider.go @@ -0,0 +1,138 @@ +// simple provider a minimal provider implementation for testing +package simple + +import ( + "errors" + "time" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +type simple struct { + schema providers.GetProviderSchemaResponse +} + +func Provider() providers.Interface { + simpleResource := providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Computed: true, + Type: cty.String, + }, + "value": { + Optional: true, + Type: cty.String, + }, + }, + }, + } + + return simple{ + schema: providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: nil, + }, + ResourceTypes: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + DataSources: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + ServerCapabilities: providers.ServerCapabilities{ + PlanDestroy: true, + }, + }, + } +} + +func (s simple) GetProviderSchema() providers.GetProviderSchemaResponse { + return s.schema +} + +func (s simple) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + return resp +} + +func (s simple) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + return resp +} + +func (s simple) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + return resp +} + +func (p simple) UpgradeResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) + resp.Diagnostics = resp.Diagnostics.Append(err) + resp.UpgradedState = val + return resp +} + +func (s simple) ConfigureProvider(providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + return resp +} + +func (s simple) Stop() error { + return nil +} + +func (s simple) ReadResource(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + // just return the same state we received + resp.NewState = req.PriorState + return resp +} + +func (s simple) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + if req.ProposedNewState.IsNull() { + // destroy op + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + m := req.ProposedNewState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(m) + return resp +} + +func (s simple) ApplyResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.PlannedState.IsNull() { + resp.NewState = req.PlannedState + return resp + } + + m := req.PlannedState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.StringVal(time.Now().String()) + } + resp.NewState = cty.ObjectVal(m) + + return resp +} + +func (s simple) ImportResourceState(providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("unsupported")) + return resp +} + +func (s simple) ReadDataSource(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("static_id") + resp.State = cty.ObjectVal(m) + return resp +} + +func (s simple) Close() error { + return nil +} diff --git a/provider-terraform/main/main.go b/provider-terraform/main/main.go new file mode 100644 index 000000000000..7afea2f56626 --- /dev/null +++ b/provider-terraform/main/main.go @@ -0,0 +1,17 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/terraform" + "github.com/hashicorp/terraform/grpcwrap" + "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/tfplugin5" +) + +func main() { + // Provide a binary version of the internal terraform provider for testing + plugin.Serve(&plugin.ServeOpts{ + GRPCProviderFunc: func() tfplugin5.ProviderServer { + return grpcwrap.Provider(terraform.NewProvider()) + }, + }) +} diff --git a/provider_source.go b/provider_source.go index f27ca54b2a79..200d9f619739 100644 --- a/provider_source.go +++ b/provider_source.go @@ -10,10 +10,10 @@ import ( "github.com/apparentlymart/go-userdirs/userdirs" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/command/cliconfig" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/cliconfig" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/tfdiags" ) // providerSource constructs a provider source based on a combination of the diff --git a/internal/providercache/cached_provider.go b/providercache/cached_provider.go similarity index 98% rename from internal/providercache/cached_provider.go rename to providercache/cached_provider.go index 0adbef21ba56..e74abe46b1aa 100644 --- a/internal/providercache/cached_provider.go +++ b/providercache/cached_provider.go @@ -6,8 +6,8 @@ import ( "path/filepath" "strings" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) // CachedProvider represents a provider package in a cache directory. diff --git a/internal/providercache/cached_provider_test.go b/providercache/cached_provider_test.go similarity index 97% rename from internal/providercache/cached_provider_test.go rename to providercache/cached_provider_test.go index 5e6f29fb70a6..d53f3cbecf15 100644 --- a/internal/providercache/cached_provider_test.go +++ b/providercache/cached_provider_test.go @@ -3,8 +3,8 @@ package providercache import ( "testing" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) func TestCachedProviderHash(t *testing.T) { diff --git a/internal/providercache/dir.go b/providercache/dir.go similarity index 98% rename from internal/providercache/dir.go rename to providercache/dir.go index f58184aa2156..2f3b8f8084a3 100644 --- a/internal/providercache/dir.go +++ b/providercache/dir.go @@ -5,8 +5,8 @@ import ( "path/filepath" "sort" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) // Dir represents a single local filesystem directory containing cached diff --git a/internal/providercache/dir_modify.go b/providercache/dir_modify.go similarity index 98% rename from internal/providercache/dir_modify.go rename to providercache/dir_modify.go index 5ac79ba4f74f..29ae18e37cc8 100644 --- a/internal/providercache/dir_modify.go +++ b/providercache/dir_modify.go @@ -5,7 +5,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/getproviders" ) // InstallPackage takes a metadata object describing a package available for diff --git a/internal/providercache/dir_modify_test.go b/providercache/dir_modify_test.go similarity index 97% rename from internal/providercache/dir_modify_test.go rename to providercache/dir_modify_test.go index 6e7821b575c1..681895b76c98 100644 --- a/internal/providercache/dir_modify_test.go +++ b/providercache/dir_modify_test.go @@ -8,8 +8,8 @@ import ( "github.com/apparentlymart/go-versions/versions" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) func TestInstallPackage(t *testing.T) { diff --git a/internal/providercache/dir_test.go b/providercache/dir_test.go similarity index 98% rename from internal/providercache/dir_test.go rename to providercache/dir_test.go index 799a149e3290..f757c113f494 100644 --- a/internal/providercache/dir_test.go +++ b/providercache/dir_test.go @@ -6,8 +6,8 @@ import ( "github.com/apparentlymart/go-versions/versions" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) func TestDirReading(t *testing.T) { diff --git a/internal/providercache/doc.go b/providercache/doc.go similarity index 100% rename from internal/providercache/doc.go rename to providercache/doc.go diff --git a/internal/providercache/installer.go b/providercache/installer.go similarity index 99% rename from internal/providercache/installer.go rename to providercache/installer.go index 62e57cc2b233..68f041ac2546 100644 --- a/internal/providercache/installer.go +++ b/providercache/installer.go @@ -9,10 +9,10 @@ import ( "github.com/apparentlymart/go-versions/versions" - "github.com/hashicorp/terraform/internal/addrs" - copydir "github.com/hashicorp/terraform/internal/copy" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + copydir "github.com/hashicorp/terraform/copy" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" ) // Installer is the main type in this package, representing a provider installer diff --git a/internal/providercache/installer_events.go b/providercache/installer_events.go similarity index 98% rename from internal/providercache/installer_events.go rename to providercache/installer_events.go index 8fc579af2666..26f7bd541346 100644 --- a/internal/providercache/installer_events.go +++ b/providercache/installer_events.go @@ -3,8 +3,8 @@ package providercache import ( "context" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) // InstallerEvents is a collection of function references that can be diff --git a/internal/providercache/installer_events_test.go b/providercache/installer_events_test.go similarity index 98% rename from internal/providercache/installer_events_test.go rename to providercache/installer_events_test.go index cde5b7f0abf5..21a13ce1590c 100644 --- a/internal/providercache/installer_events_test.go +++ b/providercache/installer_events_test.go @@ -1,8 +1,8 @@ package providercache import ( - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" ) type testInstallerEventLogItem struct { diff --git a/internal/providercache/installer_test.go b/providercache/installer_test.go similarity index 99% rename from internal/providercache/installer_test.go rename to providercache/installer_test.go index c0cfd7b03070..abcbb4f318b2 100644 --- a/internal/providercache/installer_test.go +++ b/providercache/installer_test.go @@ -18,9 +18,9 @@ import ( svchost "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/depsfile" - "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/depsfile" + "github.com/hashicorp/terraform/getproviders" ) func TestEnsureProviderVersions(t *testing.T) { diff --git a/internal/providercache/package_install.go b/providercache/package_install.go similarity index 98% rename from internal/providercache/package_install.go rename to providercache/package_install.go index 89ef862cec1f..5930fe311279 100644 --- a/internal/providercache/package_install.go +++ b/providercache/package_install.go @@ -10,9 +10,9 @@ import ( getter "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform/internal/copy" - "github.com/hashicorp/terraform/internal/getproviders" - "github.com/hashicorp/terraform/internal/httpclient" + "github.com/hashicorp/terraform/copy" + "github.com/hashicorp/terraform/getproviders" + "github.com/hashicorp/terraform/httpclient" ) // We borrow the "unpack a zip file into a target directory" logic from diff --git a/internal/providercache/testdata/beep-provider-other-platform/terraform-provider-beep b/providercache/testdata/beep-provider-other-platform/terraform-provider-beep similarity index 100% rename from internal/providercache/testdata/beep-provider-other-platform/terraform-provider-beep rename to providercache/testdata/beep-provider-other-platform/terraform-provider-beep diff --git a/internal/providercache/testdata/beep-provider/terraform-provider-beep b/providercache/testdata/beep-provider/terraform-provider-beep similarity index 100% rename from internal/providercache/testdata/beep-provider/terraform-provider-beep rename to providercache/testdata/beep-provider/terraform-provider-beep diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy b/providercache/testdata/cachedir/registry.terraform.io/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy rename to providercache/testdata/cachedir/registry.terraform.io/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null b/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null rename to providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null b/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null rename to providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/invalid b/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/invalid similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/invalid rename to providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/invalid diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip b/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip rename to providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid.zip b/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid.zip similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid.zip rename to providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid.zip diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip b/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip rename to providercache/testdata/cachedir/registry.terraform.io/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta b/providercache/testdata/cachedir/registry.terraform.io/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta rename to providercache/testdata/cachedir/registry.terraform.io/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random b/providercache/testdata/cachedir/registry.terraform.io/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random rename to providercache/testdata/cachedir/registry.terraform.io/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random diff --git a/internal/providercache/testdata/cachedir/registry.terraform.io/missing/executable/2.0.0/linux_amd64/executable b/providercache/testdata/cachedir/registry.terraform.io/missing/executable/2.0.0/linux_amd64/executable similarity index 100% rename from internal/providercache/testdata/cachedir/registry.terraform.io/missing/executable/2.0.0/linux_amd64/executable rename to providercache/testdata/cachedir/registry.terraform.io/missing/executable/2.0.0/linux_amd64/executable diff --git a/internal/providercache/testdata/cachedir/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt b/providercache/testdata/cachedir/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt similarity index 100% rename from internal/providercache/testdata/cachedir/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt rename to providercache/testdata/cachedir/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt diff --git a/internal/providercache/testdata/cachedir/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud b/providercache/testdata/cachedir/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud similarity index 100% rename from internal/providercache/testdata/cachedir/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud rename to providercache/testdata/cachedir/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud diff --git a/internal/providercache/testdata/terraform-provider-null_2.1.0_linux_amd64.zip b/providercache/testdata/terraform-provider-null_2.1.0_linux_amd64.zip similarity index 100% rename from internal/providercache/testdata/terraform-provider-null_2.1.0_linux_amd64.zip rename to providercache/testdata/terraform-provider-null_2.1.0_linux_amd64.zip diff --git a/internal/providers/addressed_types.go b/providers/addressed_types.go similarity index 93% rename from internal/providers/addressed_types.go rename to providers/addressed_types.go index 8efa82ca3e72..85ff4c962d35 100644 --- a/internal/providers/addressed_types.go +++ b/providers/addressed_types.go @@ -3,7 +3,7 @@ package providers import ( "sort" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // AddressedTypesAbs is a helper that extracts all of the distinct provider diff --git a/internal/providers/addressed_types_test.go b/providers/addressed_types_test.go similarity index 95% rename from internal/providers/addressed_types_test.go rename to providers/addressed_types_test.go index 3bb47667050e..0d45f44b33fb 100644 --- a/internal/providers/addressed_types_test.go +++ b/providers/addressed_types_test.go @@ -5,7 +5,7 @@ import ( "github.com/go-test/deep" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) func TestAddressedTypesAbs(t *testing.T) { diff --git a/internal/providers/doc.go b/providers/doc.go similarity index 100% rename from internal/providers/doc.go rename to providers/doc.go diff --git a/internal/providers/factory.go b/providers/factory.go similarity index 100% rename from internal/providers/factory.go rename to providers/factory.go diff --git a/providers/provider.go b/providers/provider.go new file mode 100644 index 000000000000..f983f98dfa9d --- /dev/null +++ b/providers/provider.go @@ -0,0 +1,393 @@ +package providers + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Interface represents the set of methods required for a complete resource +// provider plugin. +type Interface interface { + // GetSchema returns the complete schema for the provider. + GetProviderSchema() GetProviderSchemaResponse + + // ValidateProviderConfig allows the provider to validate the configuration. + // The ValidateProviderConfigResponse.PreparedConfig field is unused. The + // final configuration is not stored in the state, and any modifications + // that need to be made must be made during the Configure method call. + ValidateProviderConfig(ValidateProviderConfigRequest) ValidateProviderConfigResponse + + // ValidateResourceConfig allows the provider to validate the resource + // configuration values. + ValidateResourceConfig(ValidateResourceConfigRequest) ValidateResourceConfigResponse + + // ValidateDataResourceConfig allows the provider to validate the data source + // configuration values. + ValidateDataResourceConfig(ValidateDataResourceConfigRequest) ValidateDataResourceConfigResponse + + // UpgradeResourceState is called when the state loader encounters an + // instance state whose schema version is less than the one reported by the + // currently-used version of the corresponding provider, and the upgraded + // result is used for any further processing. + UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse + + // Configure configures and initialized the provider. + ConfigureProvider(ConfigureProviderRequest) ConfigureProviderResponse + + // Stop is called when the provider should halt any in-flight actions. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provider after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // ReadResource refreshes a resource and returns its current state. + ReadResource(ReadResourceRequest) ReadResourceResponse + + // PlanResourceChange takes the current state and proposed state of a + // resource, and returns the planned final state. + PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse + + // ApplyResourceChange takes the planned state for a resource, which may + // yet contain unknown computed values, and applies the changes returning + // the final state. + ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse + + // ImportResourceState requests that the given resource be imported. + ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse + + // ReadDataSource returns the data source's current state. + ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetProviderSchemaResponse struct { + // Provider is the schema for the provider itself. + Provider Schema + + // ProviderMeta is the schema for the provider's meta info in a module + ProviderMeta Schema + + // ResourceTypes map the resource type name to that type's schema. + ResourceTypes map[string]Schema + + // DataSources maps the data source name to that data source's schema. + DataSources map[string]Schema + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // ServerCapabilities lists optional features supported by the provider. + ServerCapabilities ServerCapabilities +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate availability +// of certain forward-compatible changes which may be optional in a major +// protocol version, but cannot be tested for directly. +type ServerCapabilities struct { + // PlanDestroy signals that this provider expects to receive a + // PlanResourceChange call for resources that are to be destroyed. + PlanDestroy bool +} + +type ValidateProviderConfigRequest struct { + // Config is the raw configuration value for the provider. + Config cty.Value +} + +type ValidateProviderConfigResponse struct { + // PreparedConfig is unused and will be removed with support for plugin protocol v5. + PreparedConfig cty.Value + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateResourceConfigRequest struct { + // TypeName is the name of the resource type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateResourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateDataResourceConfigRequest struct { + // TypeName is the name of the data source type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateDataResourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type UpgradeResourceStateRequest struct { + // TypeName is the name of the resource type being upgraded + TypeName string + + // Version is version of the schema that created the current state. + Version int64 + + // RawStateJSON and RawStateFlatmap contiain the state that needs to be + // upgraded to match the current schema version. Because the schema is + // unknown, this contains only the raw data as stored in the state. + // RawStateJSON is the current json state encoding. + // RawStateFlatmap is the legacy flatmap encoding. + // Only on of these fields may be set for the upgrade request. + RawStateJSON []byte + RawStateFlatmap map[string]string +} + +type UpgradeResourceStateResponse struct { + // UpgradedState is the newly upgraded resource state. + UpgradedState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ConfigureProviderRequest struct { + // Terraform version is the version string from the running instance of + // terraform. Providers can use TerraformVersion to verify compatibility, + // and to store for informational purposes. + TerraformVersion string + + // Config is the complete configuration value for the provider. + Config cty.Value +} + +type ConfigureProviderResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ReadResourceRequest struct { + // TypeName is the name of the resource type being read. + TypeName string + + // PriorState contains the previously saved state value for this resource. + PriorState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ReadResourceResponse struct { + // NewState contains the current state of the resource. + NewState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type PlanResourceChangeRequest struct { + // TypeName is the name of the resource type to plan. + TypeName string + + // PriorState is the previously saved state value for this resource. + PriorState cty.Value + + // ProposedNewState is the expected state after the new configuration is + // applied. This is created by directly applying the configuration to the + // PriorState. The provider is then responsible for applying any further + // changes required to create the proposed final state. + ProposedNewState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the ProposedNewState in most circumstances. + Config cty.Value + + // PriorPrivate is the previously saved private data returned from the + // provider during the last apply. + PriorPrivate []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type PlanResourceChangeResponse struct { + // PlannedState is the expected state of the resource once the current + // configuration is applied. + PlannedState cty.Value + + // RequiresReplace is the list of the attributes that are requiring + // resource replacement. + RequiresReplace []cty.Path + + // PlannedPrivate is an opaque blob that is not interpreted by terraform + // core. This will be saved and relayed back to the provider during + // ApplyResourceChange. + PlannedPrivate []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ApplyResourceChangeRequest struct { + // TypeName is the name of the resource type being applied. + TypeName string + + // PriorState is the current state of resource. + PriorState cty.Value + + // Planned state is the state returned from PlanResourceChange, and should + // represent the new state, minus any remaining computed attributes. + PlannedState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the PlannedState in most circumstances. + Config cty.Value + + // PlannedPrivate is the same value as returned by PlanResourceChange. + PlannedPrivate []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ApplyResourceChangeResponse struct { + // NewState is the new complete state after applying the planned change. + // In the event of an error, NewState should represent the most recent + // known state of the resource, if it exists. + NewState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ImportResourceStateRequest struct { + // TypeName is the name of the resource type to be imported. + TypeName string + + // ID is a string with which the provider can identify the resource to be + // imported. + ID string +} + +type ImportResourceStateResponse struct { + // ImportedResources contains one or more state values related to the + // imported resource. It is not required that these be complete, only that + // there is enough identifying information for the provider to successfully + // update the states in ReadResource. + ImportedResources []ImportedResource + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// ImportedResource represents an object being imported into Terraform with the +// help of a provider. An ImportedObject is a RemoteObject that has been read +// by the provider's import handler but hasn't yet been committed to state. +type ImportedResource struct { + // TypeName is the name of the resource type associated with the + // returned state. It's possible for providers to import multiple related + // types with a single import request. + TypeName string + + // State is the state of the remote object being imported. This may not be + // complete, but must contain enough information to uniquely identify the + // resource. + State cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +// AsInstanceObject converts the receiving ImportedObject into a +// ResourceInstanceObject that has status ObjectReady. +// +// The returned object does not know its own resource type, so the caller must +// retain the ResourceType value from the source object if this information is +// needed. +// +// The returned object also has no dependency addresses, but the caller may +// freely modify the direct fields of the returned object without affecting +// the receiver. +func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { + return &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: ir.State, + Private: ir.Private, + } +} + +type ReadDataSourceRequest struct { + // TypeName is the name of the data source type to Read. + TypeName string + + // Config is the complete configuration for the requested data source. + Config cty.Value + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ReadDataSourceResponse struct { + // State is the current state of the requested data source. + State cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/providers/schemas.go b/providers/schemas.go new file mode 100644 index 000000000000..35b69b60fc5c --- /dev/null +++ b/providers/schemas.go @@ -0,0 +1,62 @@ +package providers + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" +) + +// Schemas is an overall container for all of the schemas for all configurable +// objects defined within a particular provider. +// +// The schema for each individual configurable object is represented by nested +// instances of type Schema (singular) within this data structure. +// +// This type used to be known as terraform.ProviderSchema, but moved out here +// as part of our ongoing efforts to shrink down the "terraform" package. +// There's still a type alias at the old name, but we should prefer using +// providers.Schema in new code. However, a consequence of this transitional +// situation is that the "terraform" package still has the responsibility for +// constructing a providers.Schemas object based on responses from the provider +// API; hopefully we'll continue this refactor later so that functions in this +// package totally encapsulate the unmarshalling and include this as part of +// providers.GetProviderSchemaResponse. +type Schemas struct { + Provider *configschema.Block + ProviderMeta *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ss *Schemas) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ss.ResourceTypes[typeName], ss.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ss.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ss *Schemas) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ss.SchemaForResourceType(addr.Mode, addr.Type) +} + +// Schema pairs a provider or resource schema with that schema's version. +// This is used to be able to upgrade the schema in UpgradeResourceState. +// +// This describes the schema for a single object within a provider. Type +// "Schemas" (plural) instead represents the overall collection of schemas +// for everything within a particular provider. +type Schema struct { + Version int64 + Block *configschema.Block +} diff --git a/provisioner-local-exec/main/main.go b/provisioner-local-exec/main/main.go new file mode 100644 index 000000000000..1c5050e079b5 --- /dev/null +++ b/provisioner-local-exec/main/main.go @@ -0,0 +1,17 @@ +package main + +import ( + localexec "github.com/hashicorp/terraform/builtin/provisioners/local-exec" + "github.com/hashicorp/terraform/grpcwrap" + "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/tfplugin5" +) + +func main() { + // Provide a binary version of the internal terraform provider for testing + plugin.Serve(&plugin.ServeOpts{ + GRPCProvisionerFunc: func() tfplugin5.ProvisionerServer { + return grpcwrap.Provisioner(localexec.New()) + }, + }) +} diff --git a/internal/provisioners/doc.go b/provisioners/doc.go similarity index 100% rename from internal/provisioners/doc.go rename to provisioners/doc.go diff --git a/internal/provisioners/factory.go b/provisioners/factory.go similarity index 100% rename from internal/provisioners/factory.go rename to provisioners/factory.go diff --git a/provisioners/provisioner.go b/provisioners/provisioner.go new file mode 100644 index 000000000000..e53c88488e80 --- /dev/null +++ b/provisioners/provisioner.go @@ -0,0 +1,82 @@ +package provisioners + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Interface is the set of methods required for a resource provisioner plugin. +type Interface interface { + // GetSchema returns the schema for the provisioner configuration. + GetSchema() GetSchemaResponse + + // ValidateProvisionerConfig allows the provisioner to validate the + // configuration values. + ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse + + // ProvisionResource runs the provisioner with provided configuration. + // ProvisionResource blocks until the execution is complete. + // If the returned diagnostics contain any errors, the resource will be + // left in a tainted state. + ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse + + // Stop is called to interrupt the provisioner. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provisioner after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provisioner contains the schema for this provisioner. + Provisioner *configschema.Block + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// UIOutput provides the Output method for resource provisioner +// plugins to write any output to the UI. +// +// Provisioners may call the Output method multiple times while Apply is in +// progress. It is invalid to call Output after Apply returns. +type UIOutput interface { + Output(string) +} + +type ValidateProvisionerConfigRequest struct { + // Config is the complete configuration to be used for the provisioner. + Config cty.Value +} + +type ValidateProvisionerConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ProvisionResourceRequest struct { + // Config is the complete provisioner configuration. + Config cty.Value + + // Connection contains any information required to access the resource + // instance. + Connection cty.Value + + // UIOutput is used to return output during the Apply operation. + UIOutput UIOutput +} + +type ProvisionResourceResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/internal/refactoring/move_execute.go b/refactoring/move_execute.go similarity index 98% rename from internal/refactoring/move_execute.go rename to refactoring/move_execute.go index 9a6d577cfb4f..90cf843712ed 100644 --- a/internal/refactoring/move_execute.go +++ b/refactoring/move_execute.go @@ -4,10 +4,10 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/logging" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/states" ) // ApplyMoves modifies in-place the given state object so that any existing diff --git a/internal/refactoring/move_execute_test.go b/refactoring/move_execute_test.go similarity index 99% rename from internal/refactoring/move_execute_test.go rename to refactoring/move_execute_test.go index edc8afb9fda9..ba2b949bfd43 100644 --- a/internal/refactoring/move_execute_test.go +++ b/refactoring/move_execute_test.go @@ -11,8 +11,8 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" ) func TestApplyMoves(t *testing.T) { diff --git a/internal/refactoring/move_statement.go b/refactoring/move_statement.go similarity index 97% rename from internal/refactoring/move_statement.go rename to refactoring/move_statement.go index 08fffeb6f47f..1c1dd75e1381 100644 --- a/internal/refactoring/move_statement.go +++ b/refactoring/move_statement.go @@ -3,10 +3,10 @@ package refactoring import ( "fmt" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) type MoveStatement struct { diff --git a/internal/refactoring/move_statement_test.go b/refactoring/move_statement_test.go similarity index 97% rename from internal/refactoring/move_statement_test.go rename to refactoring/move_statement_test.go index a6f0f9f6eee2..084716e4341f 100644 --- a/internal/refactoring/move_statement_test.go +++ b/refactoring/move_statement_test.go @@ -6,9 +6,9 @@ import ( "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) func TestImpliedMoveStatements(t *testing.T) { diff --git a/internal/refactoring/move_validate.go b/refactoring/move_validate.go similarity index 97% rename from internal/refactoring/move_validate.go rename to refactoring/move_validate.go index 585f62368732..51276efac719 100644 --- a/internal/refactoring/move_validate.go +++ b/refactoring/move_validate.go @@ -7,11 +7,11 @@ import ( "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/dag" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/instances" + "github.com/hashicorp/terraform/tfdiags" ) // ValidateMoves tests whether all of the given move statements comply with diff --git a/internal/refactoring/move_validate_test.go b/refactoring/move_validate_test.go similarity index 98% rename from internal/refactoring/move_validate_test.go rename to refactoring/move_validate_test.go index 56d767af51cd..5abb1fa0bb14 100644 --- a/internal/refactoring/move_validate_test.go +++ b/refactoring/move_validate_test.go @@ -9,13 +9,13 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/zclconf/go-cty/cty/gocty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/configs/configload" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/instances" - "github.com/hashicorp/terraform/internal/registry" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/instances" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/tfdiags" ) func TestValidateMoves(t *testing.T) { diff --git a/internal/refactoring/testdata/move-statement-implied/child/move-statement-implied.tf b/refactoring/testdata/move-statement-implied/child/move-statement-implied.tf similarity index 100% rename from internal/refactoring/testdata/move-statement-implied/child/move-statement-implied.tf rename to refactoring/testdata/move-statement-implied/child/move-statement-implied.tf diff --git a/internal/refactoring/testdata/move-statement-implied/move-statement-implied.tf b/refactoring/testdata/move-statement-implied/move-statement-implied.tf similarity index 100% rename from internal/refactoring/testdata/move-statement-implied/move-statement-implied.tf rename to refactoring/testdata/move-statement-implied/move-statement-implied.tf diff --git a/internal/refactoring/testdata/move-validate-zoo/child/move-validate-child.tf b/refactoring/testdata/move-validate-zoo/child/move-validate-child.tf similarity index 100% rename from internal/refactoring/testdata/move-validate-zoo/child/move-validate-child.tf rename to refactoring/testdata/move-validate-zoo/child/move-validate-child.tf diff --git a/internal/refactoring/testdata/move-validate-zoo/move-validate-root.tf b/refactoring/testdata/move-validate-zoo/move-validate-root.tf similarity index 100% rename from internal/refactoring/testdata/move-validate-zoo/move-validate-root.tf rename to refactoring/testdata/move-validate-zoo/move-validate-root.tf diff --git a/registry/client.go b/registry/client.go new file mode 100644 index 000000000000..987e80bdae2e --- /dev/null +++ b/registry/client.go @@ -0,0 +1,327 @@ +package registry + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-retryablehttp" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/logging" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/response" + "github.com/hashicorp/terraform/version" +) + +const ( + xTerraformGet = "X-Terraform-Get" + xTerraformVersion = "X-Terraform-Version" + modulesServiceID = "modules.v1" + providersServiceID = "providers.v1" + + // registryDiscoveryRetryEnvName is the name of the environment variable that + // can be configured to customize number of retries for module and provider + // discovery requests with the remote registry. + registryDiscoveryRetryEnvName = "TF_REGISTRY_DISCOVERY_RETRY" + defaultRetry = 1 + + // registryClientTimeoutEnvName is the name of the environment variable that + // can be configured to customize the timeout duration (seconds) for module + // and provider discovery with the remote registry. + registryClientTimeoutEnvName = "TF_REGISTRY_CLIENT_TIMEOUT" + + // defaultRequestTimeout is the default timeout duration for requests to the + // remote registry. + defaultRequestTimeout = 10 * time.Second +) + +var ( + tfVersion = version.String() + + discoveryRetry int + requestTimeout time.Duration +) + +func init() { + configureDiscoveryRetry() + configureRequestTimeout() +} + +// Client provides methods to query Terraform Registries. +type Client struct { + // this is the client to be used for all requests. + client *retryablehttp.Client + + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco +} + +// NewClient returns a new initialized registry client. +func NewClient(services *disco.Disco, client *http.Client) *Client { + if services == nil { + services = disco.New() + } + + if client == nil { + client = httpclient.New() + client.Timeout = requestTimeout + } + retryableClient := retryablehttp.NewClient() + retryableClient.HTTPClient = client + retryableClient.RetryMax = discoveryRetry + retryableClient.RequestLogHook = requestLogHook + retryableClient.ErrorHandler = maxRetryErrorHandler + + logOutput := logging.LogOutput() + retryableClient.Logger = log.New(logOutput, "", log.Flags()) + + services.Transport = retryableClient.HTTPClient.Transport + + services.SetUserAgent(httpclient.TerraformUserAgent(version.String())) + + return &Client{ + client: retryableClient, + services: services, + } +} + +// Discover queries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { + service, err := c.services.DiscoverServiceURL(host, serviceID) + if err != nil { + return nil, &ServiceUnreachableError{err} + } + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" + } + return service, nil +} + +// ModuleVersions queries the registry for a module, and returns the available versions. +func (c *Client) ModuleVersions(ctx context.Context, module *regsrc.Module) (*response.ModuleVersions, error) { + host, err := module.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(module.Module(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching module versions from %q", service) + + req, err := retryablehttp.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + c.addRequestCreds(host, req.Request) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errModuleNotFound{addr: module} + default: + return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) + } + + var versions response.ModuleVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + for _, mod := range versions.Modules { + for _, v := range mod.Versions { + log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) + } + } + + return &versions, nil +} + +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + creds, err := c.services.CredentialsForHost(host) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// ModuleLocation find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) ModuleLocation(ctx context.Context, module *regsrc.Module, version string) (string, error) { + host, err := module.SvcHost() + if err != nil { + return "", err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return "", err + } + + var p *url.URL + if version == "" { + p, err = url.Parse(path.Join(module.Module(), "download")) + } else { + p, err = url.Parse(path.Join(module.Module(), version, "download")) + } + if err != nil { + return "", err + } + download := service.ResolveReference(p) + + log.Printf("[DEBUG] looking up module location from %q", download) + + req, err := retryablehttp.NewRequest("GET", download.String(), nil) + if err != nil { + return "", err + } + + req = req.WithContext(ctx) + + c.addRequestCreds(host, req.Request) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // there should be no body, but save it for logging + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading response body from registry: %s", err) + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return "", fmt.Errorf("module %q version %q not found", module, version) + default: + // anything else is an error: + return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) + } + + // the download location is in the X-Terraform-Get header + location := resp.Header.Get(xTerraformGet) + if location == "" { + return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) + } + + // If location looks like it's trying to be a relative URL, treat it as + // one. + // + // We don't do this for just _any_ location, since the X-Terraform-Get + // header is a go-getter location rather than a URL, and so not all + // possible values will parse reasonably as URLs.) + // + // When used in conjunction with go-getter we normally require this header + // to be an absolute URL, but we are more liberal here because third-party + // registry implementations may not "know" their own absolute URLs if + // e.g. they are running behind a reverse proxy frontend, or such. + if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { + locationURL, err := url.Parse(location) + if err != nil { + return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) + } + locationURL = download.ResolveReference(locationURL) + location = locationURL.String() + } + + return location, nil +} + +// configureDiscoveryRetry configures the number of retries the registry client +// will attempt for requests with retryable errors, like 502 status codes +func configureDiscoveryRetry() { + discoveryRetry = defaultRetry + + if v := os.Getenv(registryDiscoveryRetryEnvName); v != "" { + retry, err := strconv.Atoi(v) + if err == nil && retry > 0 { + discoveryRetry = retry + } + } +} + +func requestLogHook(logger retryablehttp.Logger, req *http.Request, i int) { + if i > 0 { + logger.Printf("[INFO] Previous request to the remote registry failed, attempting retry.") + } +} + +func maxRetryErrorHandler(resp *http.Response, err error, numTries int) (*http.Response, error) { + // Close the body per library instructions + if resp != nil { + resp.Body.Close() + } + + // Additional error detail: if we have a response, use the status code; + // if we have an error, use that; otherwise nothing. We will never have + // both response and error. + var errMsg string + if resp != nil { + errMsg = fmt.Sprintf(": %s returned from %s", resp.Status, resp.Request.URL) + } else if err != nil { + errMsg = fmt.Sprintf(": %s", err) + } + + // This function is always called with numTries=RetryMax+1. If we made any + // retry attempts, include that in the error message. + if numTries > 1 { + return resp, fmt.Errorf("the request failed after %d attempts, please try again later%s", + numTries, errMsg) + } + return resp, fmt.Errorf("the request failed, please try again later%s", errMsg) +} + +// configureRequestTimeout configures the registry client request timeout from +// environment variables +func configureRequestTimeout() { + requestTimeout = defaultRequestTimeout + + if v := os.Getenv(registryClientTimeoutEnvName); v != "" { + timeout, err := strconv.Atoi(v) + if err == nil && timeout > 0 { + requestTimeout = time.Duration(timeout) * time.Second + } + } +} diff --git a/registry/client_test.go b/registry/client_test.go new file mode 100644 index 000000000000..4195aa6e267f --- /dev/null +++ b/registry/client_test.go @@ -0,0 +1,369 @@ +package registry + +import ( + "context" + "net/http" + "os" + "strings" + "testing" + "time" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/test" + tfversion "github.com/hashicorp/terraform/version" +) + +func TestConfigureDiscoveryRetry(t *testing.T) { + t.Run("default retry", func(t *testing.T) { + if discoveryRetry != defaultRetry { + t.Fatalf("expected retry %q, got %q", defaultRetry, discoveryRetry) + } + + rc := NewClient(nil, nil) + if rc.client.RetryMax != defaultRetry { + t.Fatalf("expected client retry %q, got %q", + defaultRetry, rc.client.RetryMax) + } + }) + + t.Run("configured retry", func(t *testing.T) { + defer func(retryEnv string) { + os.Setenv(registryDiscoveryRetryEnvName, retryEnv) + discoveryRetry = defaultRetry + }(os.Getenv(registryDiscoveryRetryEnvName)) + os.Setenv(registryDiscoveryRetryEnvName, "2") + + configureDiscoveryRetry() + expected := 2 + if discoveryRetry != expected { + t.Fatalf("expected retry %q, got %q", + expected, discoveryRetry) + } + + rc := NewClient(nil, nil) + if rc.client.RetryMax != expected { + t.Fatalf("expected client retry %q, got %q", + expected, rc.client.RetryMax) + } + }) +} + +func TestConfigureRegistryClientTimeout(t *testing.T) { + t.Run("default timeout", func(t *testing.T) { + if requestTimeout != defaultRequestTimeout { + t.Fatalf("expected timeout %q, got %q", + defaultRequestTimeout.String(), requestTimeout.String()) + } + + rc := NewClient(nil, nil) + if rc.client.HTTPClient.Timeout != defaultRequestTimeout { + t.Fatalf("expected client timeout %q, got %q", + defaultRequestTimeout.String(), rc.client.HTTPClient.Timeout.String()) + } + }) + + t.Run("configured timeout", func(t *testing.T) { + defer func(timeoutEnv string) { + os.Setenv(registryClientTimeoutEnvName, timeoutEnv) + requestTimeout = defaultRequestTimeout + }(os.Getenv(registryClientTimeoutEnvName)) + os.Setenv(registryClientTimeoutEnvName, "20") + + configureRequestTimeout() + expected := 20 * time.Second + if requestTimeout != expected { + t.Fatalf("expected timeout %q, got %q", + expected, requestTimeout.String()) + } + + rc := NewClient(nil, nil) + if rc.client.HTTPClient.Timeout != expected { + t.Fatalf("expected client timeout %q, got %q", + expected, rc.client.HTTPClient.Timeout.String()) + } + }) +} + +func TestLookupModuleVersions(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + // test with and without a hostname + for _, src := range []string{ + "example.com/test-versions/name/provider", + "test-versions/name/provider", + } { + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + resp, err := client.ModuleVersions(context.Background(), modsrc) + if err != nil { + t.Fatal(err) + } + + if len(resp.Modules) != 1 { + t.Fatal("expected 1 module, got", len(resp.Modules)) + } + + mod := resp.Modules[0] + name := "test-versions/name/provider" + if mod.Source != name { + t.Fatalf("expected module name %q, got %q", name, mod.Source) + } + + if len(mod.Versions) != 4 { + t.Fatal("expected 4 versions, got", len(mod.Versions)) + } + + for _, v := range mod.Versions { + _, err := version.NewVersion(v.Version) + if err != nil { + t.Fatalf("invalid version %q: %s", v.Version, err) + } + } + } +} + +func TestInvalidRegistry(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "non-existent.localhost.localdomain/test-versions/name/provider" + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + if _, err := client.ModuleVersions(context.Background(), modsrc); err == nil { + t.Fatal("expected error") + } +} + +func TestRegistryAuth(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "private/name/provider" + mod, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + _, err = client.ModuleVersions(context.Background(), mod) + if err != nil { + t.Fatal(err) + } + _, err = client.ModuleLocation(context.Background(), mod, "1.0.0") + if err != nil { + t.Fatal(err) + } + + // Also test without a credentials source + client.services.SetCredentialsSource(nil) + + // both should fail without auth + _, err = client.ModuleVersions(context.Background(), mod) + if err == nil { + t.Fatal("expected error") + } + _, err = client.ModuleLocation(context.Background(), mod, "1.0.0") + if err == nil { + t.Fatal("expected error") + } +} + +func TestLookupModuleLocationRelative(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "relative/foo/bar" + mod, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + got, err := client.ModuleLocation(context.Background(), mod, "0.2.0") + if err != nil { + t.Fatal(err) + } + + want := server.URL + "/relative-path" + if got != want { + t.Errorf("wrong location %s; want %s", got, want) + } +} + +func TestAccLookupModuleVersions(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip() + } + regDisco := disco.New() + regDisco.SetUserAgent(httpclient.TerraformUserAgent(tfversion.String())) + + // test with and without a hostname + for _, src := range []string{ + "terraform-aws-modules/vpc/aws", + regsrc.PublicRegistryHost.String() + "/terraform-aws-modules/vpc/aws", + } { + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + s := NewClient(regDisco, nil) + resp, err := s.ModuleVersions(context.Background(), modsrc) + if err != nil { + t.Fatal(err) + } + + if len(resp.Modules) != 1 { + t.Fatal("expected 1 module, got", len(resp.Modules)) + } + + mod := resp.Modules[0] + name := "terraform-aws-modules/vpc/aws" + if mod.Source != name { + t.Fatalf("expected module name %q, got %q", name, mod.Source) + } + + if len(mod.Versions) == 0 { + t.Fatal("expected multiple versions, got 0") + } + + for _, v := range mod.Versions { + _, err := version.NewVersion(v.Version) + if err != nil { + t.Fatalf("invalid version %q: %s", v.Version, err) + } + } + } +} + +// the error should reference the config source exactly, not the discovered path. +func TestLookupLookupModuleError(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + // this should not be found in the registry + src := "bad/local/path" + mod, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + // Instrument CheckRetry to make sure 404s are not retried + retries := 0 + oldCheck := client.client.CheckRetry + client.client.CheckRetry = func(ctx context.Context, resp *http.Response, err error) (bool, error) { + if retries > 0 { + t.Fatal("retried after module not found") + } + retries++ + return oldCheck(ctx, resp, err) + } + + _, err = client.ModuleLocation(context.Background(), mod, "0.2.0") + if err == nil { + t.Fatal("expected error") + } + + // check for the exact quoted string to ensure we didn't prepend a hostname. + if !strings.Contains(err.Error(), `"bad/local/path"`) { + t.Fatal("error should not include the hostname. got:", err) + } +} + +func TestLookupModuleRetryError(t *testing.T) { + server := test.RegistryRetryableErrorsServer() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "example.com/test-versions/name/provider" + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + resp, err := client.ModuleVersions(context.Background(), modsrc) + if err == nil { + t.Fatal("expected requests to exceed retry", err) + } + if resp != nil { + t.Fatal("unexpected response", *resp) + } + + // verify maxRetryErrorHandler handler returned the error + if !strings.Contains(err.Error(), "the request failed after 2 attempts, please try again later") { + t.Fatal("unexpected error, got:", err) + } +} + +func TestLookupModuleNoRetryError(t *testing.T) { + // Disable retries + discoveryRetry = 0 + defer configureDiscoveryRetry() + + server := test.RegistryRetryableErrorsServer() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "example.com/test-versions/name/provider" + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + resp, err := client.ModuleVersions(context.Background(), modsrc) + if err == nil { + t.Fatal("expected request to fail", err) + } + if resp != nil { + t.Fatal("unexpected response", *resp) + } + + // verify maxRetryErrorHandler handler returned the error + if !strings.Contains(err.Error(), "the request failed, please try again later") { + t.Fatal("unexpected error, got:", err) + } +} + +func TestLookupModuleNetworkError(t *testing.T) { + server := test.RegistryRetryableErrorsServer() + client := NewClient(test.Disco(server), nil) + + // Shut down the server to simulate network failure + server.Close() + + src := "example.com/test-versions/name/provider" + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + resp, err := client.ModuleVersions(context.Background(), modsrc) + if err == nil { + t.Fatal("expected request to fail", err) + } + if resp != nil { + t.Fatal("unexpected response", *resp) + } + + // verify maxRetryErrorHandler handler returned the correct error + if !strings.Contains(err.Error(), "the request failed after 2 attempts, please try again later") { + t.Fatal("unexpected error, got:", err) + } +} diff --git a/registry/errors.go b/registry/errors.go new file mode 100644 index 000000000000..5ae022fd2e71 --- /dev/null +++ b/registry/errors.go @@ -0,0 +1,47 @@ +package registry + +import ( + "fmt" + + "github.com/hashicorp/terraform-svchost/disco" + "github.com/hashicorp/terraform/registry/regsrc" +) + +type errModuleNotFound struct { + addr *regsrc.Module +} + +func (e *errModuleNotFound) Error() string { + return fmt.Sprintf("module %s not found", e.addr) +} + +// IsModuleNotFound returns true only if the given error is a "module not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsModuleNotFound(err error) bool { + _, ok := err.(*errModuleNotFound) + return ok +} + +// IsServiceNotProvided returns true only if the given error is a "service not provided" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsServiceNotProvided(err error) bool { + _, ok := err.(*disco.ErrServiceNotProvided) + return ok +} + +// ServiceUnreachableError Registry service is unreachable +type ServiceUnreachableError struct { + err error +} + +func (e *ServiceUnreachableError) Error() string { + return e.err.Error() +} + +// IsServiceUnreachable returns true if the registry/discovery service was unreachable +func IsServiceUnreachable(err error) bool { + _, ok := err.(*ServiceUnreachableError) + return ok +} diff --git a/internal/registry/regsrc/friendly_host.go b/registry/regsrc/friendly_host.go similarity index 100% rename from internal/registry/regsrc/friendly_host.go rename to registry/regsrc/friendly_host.go diff --git a/internal/registry/regsrc/friendly_host_test.go b/registry/regsrc/friendly_host_test.go similarity index 100% rename from internal/registry/regsrc/friendly_host_test.go rename to registry/regsrc/friendly_host_test.go diff --git a/registry/regsrc/module.go b/registry/regsrc/module.go new file mode 100644 index 000000000000..7bd1f8e41a25 --- /dev/null +++ b/registry/regsrc/module.go @@ -0,0 +1,245 @@ +package regsrc + +import ( + "errors" + "fmt" + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform/addrs" +) + +var ( + ErrInvalidModuleSource = errors.New("not a valid registry module source") + + // nameSubRe is the sub-expression that matches a valid module namespace or + // name. It's strictly a super-set of what GitHub allows for user/org and + // repo names respectively, but more restrictive than our original repo-name + // regex which allowed periods but could cause ambiguity with hostname + // prefixes. It does not anchor the start or end so it can be composed into + // more complex RegExps below. Alphanumeric with - and _ allowed in non + // leading or trailing positions. Max length 64 chars. (GitHub username is + // 38 max.) + nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" + + // providerSubRe is the sub-expression that matches a valid provider. It + // does not anchor the start or end so it can be composed into more complex + // RegExps below. Only lowercase chars and digits are supported in practice. + // Max length 64 chars. + providerSubRe = "[0-9a-z]{1,64}" + + // moduleSourceRe is a regular expression that matches the basic + // namespace/name/provider[//...] format for registry sources. It assumes + // any FriendlyHost prefix has already been removed if present. + moduleSourceRe = regexp.MustCompile( + fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", + nameSubRe, nameSubRe, providerSubRe)) + + // NameRe is a regular expression defining the format allowed for namespace + // or name fields in module registry implementations. + NameRe = regexp.MustCompile("^" + nameSubRe + "$") + + // ProviderRe is a regular expression defining the format allowed for + // provider fields in module registry implementations. + ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") + + // these hostnames are not allowed as registry sources, because they are + // already special case module sources in terraform. + disallowed = map[string]bool{ + "github.com": true, + "bitbucket.org": true, + } +) + +// Module describes a Terraform Registry Module source. +type Module struct { + // RawHost is the friendly host prefix if one was present. It might be nil + // if the original source had no host prefix which implies + // PublicRegistryHost but is distinct from having an actual pointer to + // PublicRegistryHost since it encodes the fact the original string didn't + // include a host prefix at all which is significant for recovering actual + // input not just normalized form. Most callers should access it with Host() + // which will return public registry host instance if it's nil. + RawHost *FriendlyHost + RawNamespace string + RawName string + RawProvider string + RawSubmodule string +} + +// NewModule construct a new module source from separate parts. Pass empty +// string if host or submodule are not needed. +func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { + m := &Module{ + RawNamespace: namespace, + RawName: name, + RawProvider: provider, + RawSubmodule: submodule, + } + if host != "" { + h := NewFriendlyHost(host) + if h != nil { + fmt.Println("HOST:", h) + if !h.Valid() || disallowed[h.Display()] { + return nil, ErrInvalidModuleSource + } + } + m.RawHost = h + } + return m, nil +} + +// ModuleFromModuleSourceAddr is an adapter to automatically transform the +// modern representation of registry module addresses, +// addrs.ModuleSourceRegistry, into the legacy representation regsrc.Module. +// +// Note that the new-style model always does normalization during parsing and +// does not preserve the raw user input at all, and so although the fields +// of regsrc.Module are all called "Raw...", initializing a Module indirectly +// through an addrs.ModuleSourceRegistry will cause those values to be the +// normalized ones, not the raw user input. +// +// Use this only for temporary shims to call into existing code that still +// uses regsrc.Module. Eventually all other subsystems should be updated to +// use addrs.ModuleSourceRegistry instead, and then package regsrc can be +// removed altogether. +func ModuleFromModuleSourceAddr(addr addrs.ModuleSourceRegistry) *Module { + ret := ModuleFromRegistryPackageAddr(addr.Package) + ret.RawSubmodule = addr.Subdir + return ret +} + +// ModuleFromRegistryPackageAddr is similar to ModuleFromModuleSourceAddr, but +// it works with just the isolated registry package address, and not the +// full source address. +// +// The practical implication of that is that RawSubmodule will always be +// the empty string in results from this function, because "Submodule" maps +// to "Subdir" and that's a module source address concept, not a module +// package concept. In practice this typically doesn't matter because the +// registry client ignores the RawSubmodule field anyway; that's a concern +// for the higher-level module installer to deal with. +func ModuleFromRegistryPackageAddr(addr addrs.ModuleRegistryPackage) *Module { + return &Module{ + RawHost: NewFriendlyHost(addr.Host.String()), + RawNamespace: addr.Namespace, + RawName: addr.Name, + RawProvider: addr.TargetSystem, // this field was never actually enforced to be a provider address, so now has a more general name + } +} + +// ParseModuleSource attempts to parse source as a Terraform registry module +// source. If the string is not found to be in a valid format, +// ErrInvalidModuleSource is returned. Note that this can only be used on +// "input" strings, e.g. either ones supplied by the user or potentially +// normalised but in Display form (unicode). It will fail to parse a source with +// a punycoded domain since this is not permitted input from a user. If you have +// an already normalized string internally, you can compare it without parsing +// by comparing with the normalized version of the subject with the normal +// string equality operator. +func ParseModuleSource(source string) (*Module, error) { + // See if there is a friendly host prefix. + host, rest := ParseFriendlyHost(source) + if host != nil { + if !host.Valid() || disallowed[host.Display()] { + return nil, ErrInvalidModuleSource + } + } + + matches := moduleSourceRe.FindStringSubmatch(rest) + if len(matches) < 4 { + return nil, ErrInvalidModuleSource + } + + m := &Module{ + RawHost: host, + RawNamespace: matches[1], + RawName: matches[2], + RawProvider: matches[3], + } + + if len(matches) == 5 { + m.RawSubmodule = matches[4] + } + + return m, nil +} + +// Display returns the source formatted for display to the user in CLI or web +// output. +func (m *Module) Display() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) +} + +// Normalized returns the source formatted for internal reference or comparison. +func (m *Module) Normalized() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) +} + +// String returns the source formatted as the user originally typed it assuming +// it was parsed from user input. +func (m *Module) String() string { + // Don't normalize public registry hostname - leave it exactly like the user + // input it. + hostPrefix := "" + if m.RawHost != nil { + hostPrefix = m.RawHost.String() + "/" + } + return m.formatWithPrefix(hostPrefix, true) +} + +// Equal compares the module source against another instance taking +// normalization into account. +func (m *Module) Equal(other *Module) bool { + return m.Normalized() == other.Normalized() +} + +// Host returns the FriendlyHost object describing which registry this module is +// in. If the original source string had not host component this will return the +// PublicRegistryHost. +func (m *Module) Host() *FriendlyHost { + if m.RawHost == nil { + return PublicRegistryHost + } + return m.RawHost +} + +func (m *Module) normalizedHostPrefix(host string) string { + if m.Host().Equal(PublicRegistryHost) { + return "" + } + return host + "/" +} + +func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { + suffix := "" + if m.RawSubmodule != "" { + suffix = "//" + m.RawSubmodule + } + str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, + m.RawProvider, suffix) + + // lower case by default + if !preserveCase { + return strings.ToLower(str) + } + return str +} + +// Module returns just the registry ID of the module, without a hostname or +// suffix. +func (m *Module) Module() string { + return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) +} + +// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may +// contain an invalid hostname, this also returns an error indicating if it +// could be converted to a svchost.Hostname. If no host is specified, the +// default PublicRegistryHost is returned. +func (m *Module) SvcHost() (svchost.Hostname, error) { + if m.RawHost == nil { + return svchost.ForComparison(PublicRegistryHost.Raw) + } + return svchost.ForComparison(m.RawHost.Raw) +} diff --git a/internal/registry/regsrc/module_test.go b/registry/regsrc/module_test.go similarity index 100% rename from internal/registry/regsrc/module_test.go rename to registry/regsrc/module_test.go diff --git a/internal/registry/regsrc/regsrc.go b/registry/regsrc/regsrc.go similarity index 100% rename from internal/registry/regsrc/regsrc.go rename to registry/regsrc/regsrc.go diff --git a/internal/registry/response/module.go b/registry/response/module.go similarity index 100% rename from internal/registry/response/module.go rename to registry/response/module.go diff --git a/internal/registry/response/module_list.go b/registry/response/module_list.go similarity index 100% rename from internal/registry/response/module_list.go rename to registry/response/module_list.go diff --git a/internal/registry/response/module_provider.go b/registry/response/module_provider.go similarity index 100% rename from internal/registry/response/module_provider.go rename to registry/response/module_provider.go diff --git a/internal/registry/response/module_versions.go b/registry/response/module_versions.go similarity index 100% rename from internal/registry/response/module_versions.go rename to registry/response/module_versions.go diff --git a/internal/registry/response/pagination.go b/registry/response/pagination.go similarity index 100% rename from internal/registry/response/pagination.go rename to registry/response/pagination.go diff --git a/internal/registry/response/pagination_test.go b/registry/response/pagination_test.go similarity index 100% rename from internal/registry/response/pagination_test.go rename to registry/response/pagination_test.go diff --git a/internal/registry/response/redirect.go b/registry/response/redirect.go similarity index 100% rename from internal/registry/response/redirect.go rename to registry/response/redirect.go diff --git a/internal/registry/test/mock_registry.go b/registry/test/mock_registry.go similarity index 97% rename from internal/registry/test/mock_registry.go rename to registry/test/mock_registry.go index 079df1bfea9f..00ead006e534 100644 --- a/internal/registry/test/mock_registry.go +++ b/registry/test/mock_registry.go @@ -13,9 +13,9 @@ import ( svchost "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/auth" "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/httpclient" - "github.com/hashicorp/terraform/internal/registry/regsrc" - "github.com/hashicorp/terraform/internal/registry/response" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/response" tfversion "github.com/hashicorp/terraform/version" ) diff --git a/internal/repl/format.go b/repl/format.go similarity index 98% rename from internal/repl/format.go rename to repl/format.go index c65ad048b3b3..74cea8411072 100644 --- a/internal/repl/format.go +++ b/repl/format.go @@ -5,7 +5,7 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/repl/format_test.go b/repl/format_test.go similarity index 98% rename from internal/repl/format_test.go rename to repl/format_test.go index 31843bbe010b..5576af6651fc 100644 --- a/internal/repl/format_test.go +++ b/repl/format_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform/internal/lang/marks" + "github.com/hashicorp/terraform/lang/marks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/repl/repl.go b/repl/repl.go similarity index 100% rename from internal/repl/repl.go rename to repl/repl.go diff --git a/internal/repl/session.go b/repl/session.go similarity index 96% rename from internal/repl/session.go rename to repl/session.go index f07363ec1a16..2d3c7cee463f 100644 --- a/internal/repl/session.go +++ b/repl/session.go @@ -9,10 +9,10 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/internal/lang" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/lang/types" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/lang/types" + "github.com/hashicorp/terraform/tfdiags" ) // Session represents the state for a single REPL session. diff --git a/internal/repl/session_test.go b/repl/session_test.go similarity index 96% rename from internal/repl/session_test.go rename to repl/session_test.go index 3e976cadc0bf..a09f05fa456a 100644 --- a/internal/repl/session_test.go +++ b/repl/session_test.go @@ -9,14 +9,14 @@ import ( "github.com/google/go-cmp/cmp" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/configschema" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/internal/providers" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" - - _ "github.com/hashicorp/terraform/internal/logging" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/initwd" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + + _ "github.com/hashicorp/terraform/logging" ) func TestMain(m *testing.M) { diff --git a/internal/repl/testdata/config-fixture/child/empty.tf b/repl/testdata/config-fixture/child/empty.tf similarity index 100% rename from internal/repl/testdata/config-fixture/child/empty.tf rename to repl/testdata/config-fixture/child/empty.tf diff --git a/internal/repl/testdata/config-fixture/repl_test.tf b/repl/testdata/config-fixture/repl_test.tf similarity index 100% rename from internal/repl/testdata/config-fixture/repl_test.tf rename to repl/testdata/config-fixture/repl_test.tf diff --git a/internal/replacefile/doc.go b/replacefile/doc.go similarity index 100% rename from internal/replacefile/doc.go rename to replacefile/doc.go diff --git a/internal/replacefile/replacefile_unix.go b/replacefile/replacefile_unix.go similarity index 100% rename from internal/replacefile/replacefile_unix.go rename to replacefile/replacefile_unix.go diff --git a/internal/replacefile/replacefile_windows.go b/replacefile/replacefile_windows.go similarity index 100% rename from internal/replacefile/replacefile_windows.go rename to replacefile/replacefile_windows.go diff --git a/internal/replacefile/writefile.go b/replacefile/writefile.go similarity index 100% rename from internal/replacefile/writefile.go rename to replacefile/writefile.go diff --git a/states/checks.go b/states/checks.go new file mode 100644 index 000000000000..4557fa5d0dac --- /dev/null +++ b/states/checks.go @@ -0,0 +1,182 @@ +package states + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/checks" +) + +// CheckResults represents a summary snapshot of the status of a set of checks +// declared in configuration, updated after each Terraform Core run that +// changes the state or remote system in a way that might impact the check +// results. +// +// Unlike a checks.State, this type only tracks the overall results for +// each checkable object and doesn't aim to preserve the identity of individual +// checks in the configuration. For our UI reporting purposes, it is entire +// objects that pass or fail based on their declared checks; the individual +// checks have no durable identity between runs, and so are only a language +// design convenience to help authors describe various independent conditions +// with different failure messages each. +// +// CheckResults should typically be considered immutable once constructed: +// instead of updating it in-place,instead construct an entirely new +// CheckResults object based on a fresh checks.State. +type CheckResults struct { + // ConfigResults has all of the individual check results grouped by the + // configuration object they relate to. + // + // The top-level map here will always have a key for every configuration + // object that includes checks at the time of evaluating the results, + // even if there turned out to be no instances of that object and + // therefore no individual check results. + ConfigResults addrs.Map[addrs.ConfigCheckable, *CheckResultAggregate] +} + +// CheckResultAggregate represents both the overall result for a particular +// configured object that has checks and the individual checkable objects +// it declared, if any. +type CheckResultAggregate struct { + // Status is the aggregate status across all objects. + // + // Sometimes an error or check failure during planning will prevent + // Terraform Core from even determining the individual checkable objects + // associated with a downstream configuration object, and that situation is + // described here by this Status being checks.StatusUnknown and there being + // no elements in the ObjectResults field. + // + // That's different than Terraform Core explicitly reporting that there are + // no instances of the config object (e.g. a resource with count = 0), + // which leads to the aggregate status being checks.StatusPass while + // ObjectResults is still empty. + Status checks.Status + + ObjectResults addrs.Map[addrs.Checkable, *CheckResultObject] +} + +// CheckResultObject is the check status for a single checkable object. +// +// This aggregates together all of the checks associated with a particular +// object into a single pass/fail/error/unknown result, because checkable +// objects have durable addresses that can survive between runs, but their +// individual checks do not. (Module authors are free to reorder their checks +// for a particular object in the configuration with no change in meaning.) +type CheckResultObject struct { + // Status is the check status of the checkable object, derived from the + // results of all of its individual checks. + Status checks.Status + + // FailureMessages is an optional set of module-author-defined messages + // describing the problems that the checks detected, for objects whose + // status is checks.StatusFail. + // + // (checks.StatusError problems get reported as normal diagnostics during + // evaluation instead, and so will not appear here.) + FailureMessages []string +} + +// NewCheckResults constructs a new states.CheckResults object that is a +// snapshot of the check statuses recorded in the given checks.State object. +// +// This should be called only after a Terraform Core run has completed and +// recorded any results from running the checks in the given object. +func NewCheckResults(source *checks.State) *CheckResults { + ret := &CheckResults{ + ConfigResults: addrs.MakeMap[addrs.ConfigCheckable, *CheckResultAggregate](), + } + + for _, configAddr := range source.AllConfigAddrs() { + aggr := &CheckResultAggregate{ + Status: source.AggregateCheckStatus(configAddr), + ObjectResults: addrs.MakeMap[addrs.Checkable, *CheckResultObject](), + } + + for _, objectAddr := range source.ObjectAddrs(configAddr) { + obj := &CheckResultObject{ + Status: source.ObjectCheckStatus(objectAddr), + FailureMessages: source.ObjectFailureMessages(objectAddr), + } + aggr.ObjectResults.Put(objectAddr, obj) + } + + ret.ConfigResults.Put(configAddr, aggr) + } + + // If there aren't actually any configuration objects then we'll just + // leave the map as a whole nil, because having it be zero-value makes + // life easier for deep comparisons in unit tests elsewhere. + if ret.ConfigResults.Len() == 0 { + ret.ConfigResults.Elems = nil + } + + return ret +} + +// GetObjectResult looks up the result for a single object, or nil if there +// is no such object. +// +// In main code we shouldn't typically need to look up individual objects +// like this, since we'll usually be reporting check results in an aggregate +// form, but determining the result of a particular object is useful in our +// internal unit tests, and so this is here primarily for that purpose. +func (r *CheckResults) GetObjectResult(objectAddr addrs.Checkable) *CheckResultObject { + configAddr := objectAddr.ConfigCheckable() + + aggr := r.ConfigResults.Get(configAddr) + if aggr == nil { + return nil + } + + return aggr.ObjectResults.Get(objectAddr) +} + +func (r *CheckResults) DeepCopy() *CheckResults { + if r == nil { + return nil + } + ret := &CheckResults{} + if r.ConfigResults.Elems == nil { + return ret + } + + ret.ConfigResults = addrs.MakeMap[addrs.ConfigCheckable, *CheckResultAggregate]() + + for _, configElem := range r.ConfigResults.Elems { + aggr := &CheckResultAggregate{ + Status: configElem.Value.Status, + } + + if configElem.Value.ObjectResults.Elems != nil { + aggr.ObjectResults = addrs.MakeMap[addrs.Checkable, *CheckResultObject]() + + for _, objectElem := range configElem.Value.ObjectResults.Elems { + result := &CheckResultObject{ + Status: objectElem.Value.Status, + + // NOTE: We don't deep-copy this slice because it's + // immutable once constructed by convention. + FailureMessages: objectElem.Value.FailureMessages, + } + aggr.ObjectResults.Put(objectElem.Key, result) + } + } + + ret.ConfigResults.Put(configElem.Key, aggr) + } + + return ret +} + +// ObjectAddrsKnown determines whether the set of objects recorded in this +// aggregate is accurate (true) or if it's incomplete as a result of the +// run being interrupted before instance expansion. +func (r *CheckResultAggregate) ObjectAddrsKnown() bool { + if r.ObjectResults.Len() != 0 { + // If there are any object results at all then we definitely know. + return true + } + + // If we don't have any object addresses then we distinguish a known + // empty set of objects from an unknown set of objects by the aggregate + // status being unknown. + return r.Status != checks.StatusUnknown +} diff --git a/internal/states/doc.go b/states/doc.go similarity index 100% rename from internal/states/doc.go rename to states/doc.go diff --git a/internal/states/instance_generation.go b/states/instance_generation.go similarity index 100% rename from internal/states/instance_generation.go rename to states/instance_generation.go diff --git a/internal/states/instance_object.go b/states/instance_object.go similarity index 99% rename from internal/states/instance_object.go rename to states/instance_object.go index 0e790bba1a66..a52a5ec6aa1a 100644 --- a/internal/states/instance_object.go +++ b/states/instance_object.go @@ -6,7 +6,7 @@ import ( "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // ResourceInstanceObject is the local representation of a specific remote diff --git a/internal/states/instance_object_src.go b/states/instance_object_src.go similarity index 97% rename from internal/states/instance_object_src.go rename to states/instance_object_src.go index a564e0d90778..aeb612eaa8a4 100644 --- a/internal/states/instance_object_src.go +++ b/states/instance_object_src.go @@ -4,8 +4,8 @@ import ( "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/hcl2shim" ) // ResourceInstanceObjectSrc is a not-fully-decoded version of diff --git a/internal/states/instance_object_test.go b/states/instance_object_test.go similarity index 97% rename from internal/states/instance_object_test.go rename to states/instance_object_test.go index e7f4eca6a084..1c781f0a8749 100644 --- a/internal/states/instance_object_test.go +++ b/states/instance_object_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" "github.com/zclconf/go-cty/cty" ) diff --git a/states/module.go b/states/module.go new file mode 100644 index 000000000000..fbef01c7a9ee --- /dev/null +++ b/states/module.go @@ -0,0 +1,321 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" +) + +// Module is a container for the states of objects within a particular module. +type Module struct { + Addr addrs.ModuleInstance + + // Resources contains the state for each resource. The keys in this map are + // an implementation detail and must not be used by outside callers. + Resources map[string]*Resource + + // OutputValues contains the state for each output value. The keys in this + // map are output value names. + OutputValues map[string]*OutputValue + + // LocalValues contains the value for each named output value. The keys + // in this map are local value names. + LocalValues map[string]cty.Value +} + +// NewModule constructs an empty module state for the given module address. +func NewModule(addr addrs.ModuleInstance) *Module { + return &Module{ + Addr: addr, + Resources: map[string]*Resource{}, + OutputValues: map[string]*OutputValue{}, + LocalValues: map[string]cty.Value{}, + } +} + +// Resource returns the state for the resource with the given address within +// the receiving module state, or nil if the requested resource is not tracked +// in the state. +func (ms *Module) Resource(addr addrs.Resource) *Resource { + return ms.Resources[addr.String()] +} + +// ResourceInstance returns the state for the resource instance with the given +// address within the receiving module state, or nil if the requested instance +// is not tracked in the state. +func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { + rs := ms.Resource(addr.Resource) + if rs == nil { + return nil + } + return rs.Instance(addr.Key) +} + +// SetResourceProvider updates the resource-level metadata for the resource +// with the given address, creating the resource state for it if it doesn't +// already exist. +func (ms *Module) SetResourceProvider(addr addrs.Resource, provider addrs.AbsProviderConfig) { + rs := ms.Resource(addr) + if rs == nil { + rs = &Resource{ + Addr: addr.Absolute(ms.Addr), + Instances: map[addrs.InstanceKey]*ResourceInstance{}, + } + ms.Resources[addr.String()] = rs + } + + rs.ProviderConfig = provider +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed. +func (ms *Module) RemoveResource(addr addrs.Resource) { + delete(ms.Resources, addr.String()) +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simultaneously +// updating the recorded provider configuration address and dependencies. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance will be removed altogether. +// +// The provider address is a resource-wide setting and is updated for all other +// instances of the same resource as a side-effect of this call. +func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + rs := ms.Resource(addr.Resource) + // if the resource is nil and the object is nil, don't do anything! + // you'll probably just cause issues + if obj == nil && rs == nil { + return + } + if obj == nil && rs != nil { + // does the resource have any other objects? + // if not then delete the whole resource + if len(rs.Instances) == 0 { + delete(ms.Resources, addr.Resource.String()) + return + } + // check for an existing resource, now that we've ensured that rs.Instances is more than 0/not nil + is := rs.Instance(addr.Key) + if is == nil { + // if there is no instance on the resource with this address and obj is nil, return and change nothing + return + } + // if we have an instance, update the current + is.Current = obj + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + // Delete the resource if it has no instances, but only if NoEach + if len(rs.Instances) == 0 { + delete(ms.Resources, addr.Resource.String()) + return + } + } + // Nothing more to do here, so return! + return + } + if rs == nil && obj != nil { + // We don't have have a resource so make one, which is a side effect of setResourceMeta + ms.SetResourceProvider(addr.Resource, provider) + // now we have a resource! so update the rs value to point to it + rs = ms.Resource(addr.Resource) + } + // Get our instance from the resource; it could be there or not at this point + is := rs.Instance(addr.Key) + if is == nil { + // if we don't have a resource, create one and add to the instances + is = rs.CreateInstance(addr.Key) + // update the resource meta because we have a new + ms.SetResourceProvider(addr.Resource, provider) + } + // Update the resource's ProviderConfig, in case the provider has updated + rs.ProviderConfig = provider + is.Current = obj +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceProvider(addr.Resource, provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + if obj != nil { + is.Deposed[key] = obj + } else { + delete(is.Deposed, key) + } + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + delete(rs.Instances, addr.Key) + + if len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + is := rs.Instance(addr.Key) + if is == nil { + return + } + delete(is.Deposed, key) + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// deposeResourceInstanceObject is the real implementation of +// SyncState.DeposeResourceInstanceObject. +func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { + is := ms.ResourceInstance(addr) + if is == nil { + return NotDeposed + } + return is.deposeCurrentObject(forceKey) +} + +// maybeRestoreResourceInstanceDeposed is the real implementation of +// SyncState.MaybeRestoreResourceInstanceDeposed. +func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { + rs := ms.Resource(addr.Resource) + if rs == nil { + return false + } + is := rs.Instance(addr.Key) + if is == nil { + return false + } + if is.Current != nil { + return false + } + if len(is.Deposed) == 0 { + return false + } + is.Current = is.Deposed[key] + delete(is.Deposed, key) + return true +} + +// SetOutputValue writes an output value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { + os := &OutputValue{ + Addr: addrs.AbsOutputValue{ + Module: ms.Addr, + OutputValue: addrs.OutputValue{ + Name: name, + }, + }, + Value: value, + Sensitive: sensitive, + } + ms.OutputValues[name] = os + return os +} + +// RemoveOutputValue removes the output value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveOutputValue(name string) { + delete(ms.OutputValues, name) +} + +// SetLocalValue writes a local value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetLocalValue(name string, value cty.Value) { + ms.LocalValues[name] = value +} + +// RemoveLocalValue removes the local value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveLocalValue(name string) { + delete(ms.LocalValues, name) +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// You probably shouldn't call this! See the method of the same name on +// type State for more information on what this is for and the rare situations +// where it is safe to use. +func (ms *Module) PruneResourceHusks() { + for _, rs := range ms.Resources { + if len(rs.Instances) == 0 { + ms.RemoveResource(rs.Addr.Resource) + } + } +} + +// empty returns true if the receving module state is contributing nothing +// to the state. In other words, it returns true if the module could be +// removed from the state altogether without changing the meaning of the state. +// +// In practice a module containing no objects is the same as a non-existent +// module, and so we can opportunistically clean up once a module becomes +// empty on the assumption that it will be re-added if needed later. +func (ms *Module) empty() bool { + if ms == nil { + return true + } + + // This must be updated to cover any new collections added to Module + // in future. + return (len(ms.Resources) == 0 && + len(ms.OutputValues) == 0 && + len(ms.LocalValues) == 0) +} diff --git a/internal/states/objectstatus_string.go b/states/objectstatus_string.go similarity index 100% rename from internal/states/objectstatus_string.go rename to states/objectstatus_string.go diff --git a/states/output_value.go b/states/output_value.go new file mode 100644 index 000000000000..268420cf4ad5 --- /dev/null +++ b/states/output_value.go @@ -0,0 +1,16 @@ +package states + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" +) + +// OutputValue represents the state of a particular output value. +// +// It is not valid to mutate an OutputValue object once it has been created. +// Instead, create an entirely new OutputValue to replace the previous one. +type OutputValue struct { + Addr addrs.AbsOutputValue + Value cty.Value + Sensitive bool +} diff --git a/internal/states/remote/remote.go b/states/remote/remote.go similarity index 94% rename from internal/states/remote/remote.go rename to states/remote/remote.go index a87c9145a830..0dab1863ce48 100644 --- a/internal/states/remote/remote.go +++ b/states/remote/remote.go @@ -1,7 +1,7 @@ package remote import ( - "github.com/hashicorp/terraform/internal/states/statemgr" + "github.com/hashicorp/terraform/states/statemgr" ) // Client is the interface that must be implemented for a remote state diff --git a/internal/states/remote/remote_test.go b/states/remote/remote_test.go similarity index 100% rename from internal/states/remote/remote_test.go rename to states/remote/remote_test.go diff --git a/states/remote/state.go b/states/remote/state.go new file mode 100644 index 000000000000..9a2addf54189 --- /dev/null +++ b/states/remote/state.go @@ -0,0 +1,265 @@ +package remote + +import ( + "bytes" + "fmt" + "log" + "sync" + + uuid "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" +) + +// State implements the State interfaces in the state package to handle +// reading and writing the remote state. This State on its own does no +// local caching so every persist will go to the remote storage and local +// writes will go to memory. +type State struct { + mu sync.Mutex + + Client Client + + // We track two pieces of meta data in addition to the state itself: + // + // lineage - the state's unique ID + // serial - the monotonic counter of "versions" of the state + // + // Both of these (along with state) have a sister field + // that represents the values read in from an existing source. + // All three of these values are used to determine if the new + // state has changed from an existing state we read in. + lineage, readLineage string + serial, readSerial uint64 + state, readState *states.State + disableLocks bool +} + +var _ statemgr.Full = (*State)(nil) +var _ statemgr.Migrator = (*State)(nil) + +// statemgr.Reader impl. +func (s *State) State() *states.State { + s.mu.Lock() + defer s.mu.Unlock() + + return s.state.DeepCopy() +} + +func (s *State) GetRootOutputValues() (map[string]*states.OutputValue, error) { + if err := s.RefreshState(); err != nil { + return nil, fmt.Errorf("Failed to load state: %s", err) + } + + state := s.State() + if state == nil { + state = states.NewState() + } + + return state.RootModule().OutputValues, nil +} + +// StateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) StateForMigration() *statefile.File { + s.mu.Lock() + defer s.mu.Unlock() + + return statefile.New(s.state.DeepCopy(), s.lineage, s.serial) +} + +// statemgr.Writer impl. +func (s *State) WriteState(state *states.State) error { + s.mu.Lock() + defer s.mu.Unlock() + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = state.DeepCopy() + + return nil +} + +// WriteStateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) WriteStateForMigration(f *statefile.File, force bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !force { + checkFile := statefile.New(s.state, s.lineage, s.serial) + if err := statemgr.CheckValidImport(f, checkFile); err != nil { + return err + } + } + + // The remote backend needs to pass the `force` flag through to its client. + // For backends that support such operations, inform the client + // that a force push has been requested + c, isForcePusher := s.Client.(ClientForcePusher) + if force && isForcePusher { + c.EnableForcePush() + } + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = f.State.DeepCopy() + s.lineage = f.Lineage + s.serial = f.Serial + + return nil +} + +// statemgr.Refresher impl. +func (s *State) RefreshState() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.refreshState() +} + +// refreshState is the main implementation of RefreshState, but split out so +// that we can make internal calls to it from methods that are already holding +// the s.mu lock. +func (s *State) refreshState() error { + payload, err := s.Client.Get() + if err != nil { + return err + } + + // no remote state is OK + if payload == nil { + s.readState = nil + s.lineage = "" + s.serial = 0 + return nil + } + + stateFile, err := statefile.Read(bytes.NewReader(payload.Data)) + if err != nil { + return err + } + + s.lineage = stateFile.Lineage + s.serial = stateFile.Serial + s.state = stateFile.State + + // Properties from the remote must be separate so we can + // track changes as lineage, serial and/or state are mutated + s.readLineage = stateFile.Lineage + s.readSerial = stateFile.Serial + s.readState = s.state.DeepCopy() + return nil +} + +// statemgr.Persister impl. +func (s *State) PersistState(schemas *terraform.Schemas) error { + s.mu.Lock() + defer s.mu.Unlock() + + log.Printf("[DEBUG] states/remote: state read serial is: %d; serial is: %d", s.readSerial, s.serial) + log.Printf("[DEBUG] states/remote: state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) + + if s.readState != nil { + lineageUnchanged := s.readLineage != "" && s.lineage == s.readLineage + serialUnchanged := s.readSerial != 0 && s.serial == s.readSerial + stateUnchanged := statefile.StatesMarshalEqual(s.state, s.readState) + if stateUnchanged && lineageUnchanged && serialUnchanged { + // If the state, lineage or serial haven't changed at all then we have nothing to do. + return nil + } + s.serial++ + } else { + // We might be writing a new state altogether, but before we do that + // we'll check to make sure there isn't already a snapshot present + // that we ought to be updating. + err := s.refreshState() + if err != nil { + return fmt.Errorf("failed checking for existing remote state: %s", err) + } + log.Printf("[DEBUG] states/remote: after refresh, state read serial is: %d; serial is: %d", s.readSerial, s.serial) + log.Printf("[DEBUG] states/remote: after refresh, state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) + if s.lineage == "" { // indicates that no state snapshot is present yet + lineage, err := uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("failed to generate initial lineage: %v", err) + } + s.lineage = lineage + s.serial++ + } + } + + f := statefile.New(s.state, s.lineage, s.serial) + + var buf bytes.Buffer + err := statefile.Write(f, &buf) + if err != nil { + return err + } + + err = s.Client.Put(buf.Bytes()) + if err != nil { + return err + } + + // After we've successfully persisted, what we just wrote is our new + // reference state until someone calls RefreshState again. + // We've potentially overwritten (via force) the state, lineage + // and / or serial (and serial was incremented) so we copy over all + // three fields so everything matches the new state and a subsequent + // operation would correctly detect no changes to the lineage, serial or state. + s.readState = s.state.DeepCopy() + s.readLineage = s.lineage + s.readSerial = s.serial + return nil +} + +// Lock calls the Client's Lock method if it's implemented. +func (s *State) Lock(info *statemgr.LockInfo) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return "", nil + } + + if c, ok := s.Client.(ClientLocker); ok { + return c.Lock(info) + } + return "", nil +} + +// Unlock calls the Client's Unlock method if it's implemented. +func (s *State) Unlock(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return nil + } + + if c, ok := s.Client.(ClientLocker); ok { + return c.Unlock(id) + } + return nil +} + +// DisableLocks turns the Lock and Unlock methods into no-ops. This is intended +// to be called during initialization of a state manager and should not be +// called after any of the statemgr.Full interface methods have been called. +func (s *State) DisableLocks() { + s.disableLocks = true +} + +// StateSnapshotMeta returns the metadata from the most recently persisted +// or refreshed persistent state snapshot. +// +// This is an implementation of statemgr.PersistentMeta. +func (s *State) StateSnapshotMeta() statemgr.SnapshotMeta { + return statemgr.SnapshotMeta{ + Lineage: s.lineage, + Serial: s.serial, + } +} diff --git a/states/remote/state_test.go b/states/remote/state_test.go new file mode 100644 index 000000000000..00dc2621a4d9 --- /dev/null +++ b/states/remote/state_test.go @@ -0,0 +1,740 @@ +package remote + +import ( + "log" + "sync" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/zclconf/go-cty/cty" + + tfaddr "github.com/hashicorp/terraform-registry-address" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/version" +) + +func TestState_impl(t *testing.T) { + var _ statemgr.Reader = new(State) + var _ statemgr.Writer = new(State) + var _ statemgr.Persister = new(State) + var _ statemgr.Refresher = new(State) + var _ statemgr.OutputReader = new(State) + var _ statemgr.Locker = new(State) +} + +func TestStateRace(t *testing.T) { + s := &State{ + Client: nilClient{}, + } + + current := states.NewState() + + var wg sync.WaitGroup + + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s.WriteState(current) + s.PersistState(nil) + s.RefreshState() + }() + } + wg.Wait() +} + +// testCase encapsulates a test state test +type testCase struct { + name string + // A function to mutate state and return a cleanup function + mutationFunc func(*State) (*states.State, func()) + // The expected requests to have taken place + expectedRequests []mockClientRequest + // Mark this case as not having a request + noRequest bool +} + +// isRequested ensures a test that is specified as not having +// a request doesn't have one by checking if a method exists +// on the expectedRequest. +func (tc testCase) isRequested(t *testing.T) bool { + for _, expectedMethod := range tc.expectedRequests { + hasMethod := expectedMethod.Method != "" + if tc.noRequest && hasMethod { + t.Fatalf("expected no content for %q but got: %v", tc.name, expectedMethod) + } + } + return !tc.noRequest +} + +func TestStatePersist(t *testing.T) { + testCases := []testCase{ + { + name: "first state persistence", + mutationFunc: func(mgr *State) (*states.State, func()) { + mgr.state = &states.State{ + Modules: map[string]*states.Module{"": {}}, + } + s := mgr.State() + s.RootModule().SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Name: "myfile", + Type: "local_file", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "filename": "file.txt", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: tfaddr.Provider{Namespace: "local"}, + }, + ) + return s, func() {} + }, + expectedRequests: []mockClientRequest{ + // Expect an initial refresh, which returns nothing since there is no remote state. + { + Method: "Get", + Content: nil, + }, + // Expect a second refresh, since the read state is nil + { + Method: "Get", + Content: nil, + }, + // Expect an initial push with values and a serial of 1 + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "some meaningless value", + "serial": 1.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{}, + "resources": []interface{}{ + map[string]interface{}{ + "instances": []interface{}{ + map[string]interface{}{ + "attributes_flat": map[string]interface{}{ + "filename": "file.txt", + }, + "schema_version": 0.0, + "sensitive_attributes": []interface{}{}, + }, + }, + "mode": "managed", + "name": "myfile", + "provider": `provider["/local/"]`, + "type": "local_file", + }, + }, + "check_results": nil, + }, + }, + }, + }, + // If lineage changes, expect the serial to increment + { + name: "change lineage", + mutationFunc: func(mgr *State) (*states.State, func()) { + mgr.lineage = "mock-lineage" + return mgr.State(), func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 2.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{}, + "resources": []interface{}{ + map[string]interface{}{ + "instances": []interface{}{ + map[string]interface{}{ + "attributes_flat": map[string]interface{}{ + "filename": "file.txt", + }, + "schema_version": 0.0, + "sensitive_attributes": []interface{}{}, + }, + }, + "mode": "managed", + "name": "myfile", + "provider": `provider["/local/"]`, + "type": "local_file", + }, + }, + "check_results": nil, + }, + }, + }, + }, + // removing resources should increment the serial + { + name: "remove resources", + mutationFunc: func(mgr *State) (*states.State, func()) { + mgr.state.RootModule().Resources = map[string]*states.Resource{} + return mgr.State(), func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 3.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + // If the remote serial is incremented, then we increment it once more. + { + name: "change serial", + mutationFunc: func(mgr *State) (*states.State, func()) { + originalSerial := mgr.serial + mgr.serial++ + return mgr.State(), func() { + mgr.serial = originalSerial + } + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 5.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + // Adding an output should cause the serial to increment as well. + { + name: "add output to state", + mutationFunc: func(mgr *State) (*states.State, func()) { + s := mgr.State() + s.RootModule().SetOutputValue("foo", cty.StringVal("bar"), false) + return s, func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 4.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{ + "foo": map[string]interface{}{ + "type": "string", + "value": "bar", + }, + }, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + // ...as should changing an output + { + name: "mutate state bar -> baz", + mutationFunc: func(mgr *State) (*states.State, func()) { + s := mgr.State() + s.RootModule().SetOutputValue("foo", cty.StringVal("baz"), false) + return s, func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 5.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{ + "foo": map[string]interface{}{ + "type": "string", + "value": "baz", + }, + }, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + { + name: "nothing changed", + mutationFunc: func(mgr *State) (*states.State, func()) { + s := mgr.State() + return s, func() {} + }, + noRequest: true, + }, + // If the remote state's serial is less (force push), then we + // increment it once from there. + { + name: "reset serial (force push style)", + mutationFunc: func(mgr *State) (*states.State, func()) { + mgr.serial = 2 + return mgr.State(), func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 3.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{ + "foo": map[string]interface{}{ + "type": "string", + "value": "baz", + }, + }, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + } + + // Initial setup of state just to give us a fixed starting point for our + // test assertions below, or else we'd need to deal with + // random lineage. + mgr := &State{ + Client: &mockClient{}, + } + + // In normal use (during a Terraform operation) we always refresh and read + // before any writes would happen, so we'll mimic that here for realism. + // NB This causes a GET to be logged so the first item in the test cases + // must account for this + if err := mgr.RefreshState(); err != nil { + t.Fatalf("failed to RefreshState: %s", err) + } + + // Our client is a mockClient which has a log we + // use to check that operations generate expected requests + mockClient := mgr.Client.(*mockClient) + + // logIdx tracks the current index of the log separate from + // the loop iteration so we can check operations that don't + // cause any requests to be generated + logIdx := 0 + + // Run tests in order. + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s, cleanup := tc.mutationFunc(mgr) + + if err := mgr.WriteState(s); err != nil { + t.Fatalf("failed to WriteState for %q: %s", tc.name, err) + } + if err := mgr.PersistState(nil); err != nil { + t.Fatalf("failed to PersistState for %q: %s", tc.name, err) + } + + if tc.isRequested(t) { + // Get captured request from the mock client log + // based on the index of the current test + if logIdx >= len(mockClient.log) { + t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) + } + for expectedRequestIdx := 0; expectedRequestIdx < len(tc.expectedRequests); expectedRequestIdx++ { + loggedRequest := mockClient.log[logIdx] + logIdx++ + if diff := cmp.Diff(tc.expectedRequests[expectedRequestIdx], loggedRequest, cmpopts.IgnoreMapEntries(func(key string, value interface{}) bool { + // This is required since the initial state creation causes the lineage to be a UUID that is not known at test time. + return tc.name == "first state persistence" && key == "lineage" + })); len(diff) > 0 { + t.Logf("incorrect client requests for %q:\n%s", tc.name, diff) + t.Fail() + } + } + } + cleanup() + }) + } + logCnt := len(mockClient.log) + if logIdx != logCnt { + t.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) + } +} + +func TestState_GetRootOutputValues(t *testing.T) { + // Initial setup of state with outputs already defined + mgr := &State{ + Client: &mockClient{ + current: []byte(` + { + "version": 4, + "lineage": "mock-lineage", + "serial": 1, + "terraform_version":"0.0.0", + "outputs": {"foo": {"value":"bar", "type": "string"}}, + "resources": [] + } + `), + }, + } + + outputs, err := mgr.GetRootOutputValues() + if err != nil { + t.Errorf("Expected GetRootOutputValues to not return an error, but it returned %v", err) + } + + if len(outputs) != 1 { + t.Errorf("Expected %d outputs, but received %d", 1, len(outputs)) + } +} + +type migrationTestCase struct { + name string + // A function to generate a statefile + stateFile func(*State) *statefile.File + // The expected request to have taken place + expectedRequest mockClientRequest + // Mark this case as not having a request + expectedError string + // force flag passed to client + force bool +} + +func TestWriteStateForMigration(t *testing.T) { + mgr := &State{ + Client: &mockClient{ + current: []byte(` + { + "version": 4, + "lineage": "mock-lineage", + "serial": 3, + "terraform_version":"0.0.0", + "outputs": {"foo": {"value":"bar", "type": "string"}}, + "resources": [] + } + `), + }, + } + + testCases := []migrationTestCase{ + // Refreshing state before we run the test loop causes a GET + { + name: "refresh state", + stateFile: func(mgr *State) *statefile.File { + return mgr.StateForMigration() + }, + expectedRequest: mockClientRequest{ + Method: "Get", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "mock-lineage", + "serial": 3.0, + "terraform_version": "0.0.0", + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + }, + }, + }, + { + name: "cannot import lesser serial without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, mgr.lineage, 1) + }, + expectedError: "cannot import state with serial 1 over newer state with serial 3", + }, + { + name: "cannot import differing lineage without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, "different-lineage", mgr.serial) + }, + expectedError: `cannot import state with lineage "different-lineage" over unrelated state with lineage "mock-lineage"`, + }, + { + name: "can import lesser serial with force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, mgr.lineage, 1) + }, + expectedRequest: mockClientRequest{ + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "mock-lineage", + "serial": 2.0, + "terraform_version": version.Version, + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + force: true, + }, + { + name: "cannot import differing lineage without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, "different-lineage", mgr.serial) + }, + expectedRequest: mockClientRequest{ + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "different-lineage", + "serial": 3.0, + "terraform_version": version.Version, + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + force: true, + }, + } + + // In normal use (during a Terraform operation) we always refresh and read + // before any writes would happen, so we'll mimic that here for realism. + // NB This causes a GET to be logged so the first item in the test cases + // must account for this + if err := mgr.RefreshState(); err != nil { + t.Fatalf("failed to RefreshState: %s", err) + } + + if err := mgr.WriteState(mgr.State()); err != nil { + t.Fatalf("failed to write initial state: %s", err) + } + + // Our client is a mockClient which has a log we + // use to check that operations generate expected requests + mockClient := mgr.Client.(*mockClient) + + // logIdx tracks the current index of the log separate from + // the loop iteration so we can check operations that don't + // cause any requests to be generated + logIdx := 0 + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sf := tc.stateFile(mgr) + err := mgr.WriteStateForMigration(sf, tc.force) + shouldError := tc.expectedError != "" + + // If we are expecting and error check it and move on + if shouldError { + if err == nil { + t.Fatalf("test case %q should have failed with error %q", tc.name, tc.expectedError) + } else if err.Error() != tc.expectedError { + t.Fatalf("test case %q expected error %q but got %q", tc.name, tc.expectedError, err) + } + return + } + + if err != nil { + t.Fatalf("test case %q failed: %v", tc.name, err) + } + + // At this point we should just do a normal write and persist + // as would happen from the CLI + mgr.WriteState(mgr.State()) + mgr.PersistState(nil) + + if logIdx >= len(mockClient.log) { + t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) + } + loggedRequest := mockClient.log[logIdx] + logIdx++ + if diff := cmp.Diff(tc.expectedRequest, loggedRequest); len(diff) > 0 { + t.Fatalf("incorrect client requests for %q:\n%s", tc.name, diff) + } + }) + } + + logCnt := len(mockClient.log) + if logIdx != logCnt { + log.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) + } +} + +// This test runs the same test cases as above, but with +// a client that implements EnableForcePush -- this allows +// us to test that -force continues to work for backends without +// this interface, but that this interface works for those that do. +func TestWriteStateForMigrationWithForcePushClient(t *testing.T) { + mgr := &State{ + Client: &mockClientForcePusher{ + current: []byte(` + { + "version": 4, + "lineage": "mock-lineage", + "serial": 3, + "terraform_version":"0.0.0", + "outputs": {"foo": {"value":"bar", "type": "string"}}, + "resources": [] + } + `), + }, + } + + testCases := []migrationTestCase{ + // Refreshing state before we run the test loop causes a GET + { + name: "refresh state", + stateFile: func(mgr *State) *statefile.File { + return mgr.StateForMigration() + }, + expectedRequest: mockClientRequest{ + Method: "Get", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "mock-lineage", + "serial": 3.0, + "terraform_version": "0.0.0", + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + }, + }, + }, + { + name: "cannot import lesser serial without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, mgr.lineage, 1) + }, + expectedError: "cannot import state with serial 1 over newer state with serial 3", + }, + { + name: "cannot import differing lineage without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, "different-lineage", mgr.serial) + }, + expectedError: `cannot import state with lineage "different-lineage" over unrelated state with lineage "mock-lineage"`, + }, + { + name: "can import lesser serial with force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, mgr.lineage, 1) + }, + expectedRequest: mockClientRequest{ + Method: "Force Put", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "mock-lineage", + "serial": 2.0, + "terraform_version": version.Version, + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + force: true, + }, + { + name: "cannot import differing lineage without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, "different-lineage", mgr.serial) + }, + expectedRequest: mockClientRequest{ + Method: "Force Put", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "different-lineage", + "serial": 3.0, + "terraform_version": version.Version, + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + force: true, + }, + } + + // In normal use (during a Terraform operation) we always refresh and read + // before any writes would happen, so we'll mimic that here for realism. + // NB This causes a GET to be logged so the first item in the test cases + // must account for this + if err := mgr.RefreshState(); err != nil { + t.Fatalf("failed to RefreshState: %s", err) + } + + if err := mgr.WriteState(mgr.State()); err != nil { + t.Fatalf("failed to write initial state: %s", err) + } + + // Our client is a mockClientForcePusher which has a log we + // use to check that operations generate expected requests + mockClient := mgr.Client.(*mockClientForcePusher) + + if mockClient.force { + t.Fatalf("client should not default to force") + } + + // logIdx tracks the current index of the log separate from + // the loop iteration so we can check operations that don't + // cause any requests to be generated + logIdx := 0 + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Always reset client to not be force pushing + mockClient.force = false + sf := tc.stateFile(mgr) + err := mgr.WriteStateForMigration(sf, tc.force) + shouldError := tc.expectedError != "" + + // If we are expecting and error check it and move on + if shouldError { + if err == nil { + t.Fatalf("test case %q should have failed with error %q", tc.name, tc.expectedError) + } else if err.Error() != tc.expectedError { + t.Fatalf("test case %q expected error %q but got %q", tc.name, tc.expectedError, err) + } + return + } + + if err != nil { + t.Fatalf("test case %q failed: %v", tc.name, err) + } + + if tc.force && !mockClient.force { + t.Fatalf("test case %q should have enabled force push", tc.name) + } + + // At this point we should just do a normal write and persist + // as would happen from the CLI + mgr.WriteState(mgr.State()) + mgr.PersistState(nil) + + if logIdx >= len(mockClient.log) { + t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) + } + loggedRequest := mockClient.log[logIdx] + logIdx++ + if diff := cmp.Diff(tc.expectedRequest, loggedRequest); len(diff) > 0 { + t.Fatalf("incorrect client requests for %q:\n%s", tc.name, diff) + } + }) + } + + logCnt := len(mockClient.log) + if logIdx != logCnt { + log.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) + } +} diff --git a/states/remote/testing.go b/states/remote/testing.go new file mode 100644 index 000000000000..cbd7586b3e30 --- /dev/null +++ b/states/remote/testing.go @@ -0,0 +1,102 @@ +package remote + +import ( + "bytes" + "testing" + + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" +) + +// TestClient is a generic function to test any client. +func TestClient(t *testing.T, c Client) { + var buf bytes.Buffer + s := statemgr.TestFullInitialState() + sf := statefile.New(s, "stub-lineage", 2) + err := statefile.Write(sf, &buf) + if err != nil { + t.Fatalf("err: %s", err) + } + data := buf.Bytes() + + if err := c.Put(data); err != nil { + t.Fatalf("put: %s", err) + } + + p, err := c.Get() + if err != nil { + t.Fatalf("get: %s", err) + } + if !bytes.Equal(p.Data, data) { + t.Fatalf("expected full state %q\n\ngot: %q", string(p.Data), string(data)) + } + + if err := c.Delete(); err != nil { + t.Fatalf("delete: %s", err) + } + + p, err = c.Get() + if err != nil { + t.Fatalf("get: %s", err) + } + if p != nil { + t.Fatalf("expected empty state, got: %q", string(p.Data)) + } +} + +// Test the lock implementation for a remote.Client. +// This test requires 2 client instances, in oder to have multiple remote +// clients since some implementations may tie the client to the lock, or may +// have reentrant locks. +func TestRemoteLocks(t *testing.T, a, b Client) { + lockerA, ok := a.(statemgr.Locker) + if !ok { + t.Fatal("client A not a statemgr.Locker") + } + + lockerB, ok := b.(statemgr.Locker) + if !ok { + t.Fatal("client B not a statemgr.Locker") + } + + infoA := statemgr.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := statemgr.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + if _, ok := err.(*statemgr.LockError); !ok { + t.Errorf("expected a LockError, but was %t: %s", err, err) + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Fatalf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } + + // TODO: Should we enforce that Unlock requires the correct ID? +} diff --git a/states/resource.go b/states/resource.go new file mode 100644 index 000000000000..28223671d5a8 --- /dev/null +++ b/states/resource.go @@ -0,0 +1,215 @@ +package states + +import ( + "fmt" + "math/rand" + "time" + + "github.com/hashicorp/terraform/addrs" +) + +// Resource represents the state of a resource. +type Resource struct { + // Addr is the absolute address for the resource this state object + // belongs to. + Addr addrs.AbsResource + + // Instances contains the potentially-multiple instances associated with + // this resource. This map can contain a mixture of different key types, + // but only the ones of InstanceKeyType are considered current. + Instances map[addrs.InstanceKey]*ResourceInstance + + // ProviderConfig is the absolute address for the provider configuration that + // most recently managed this resource. This is used to connect a resource + // with a provider configuration when the resource configuration block is + // not available, such as if it has been removed from configuration + // altogether. + ProviderConfig addrs.AbsProviderConfig +} + +// Instance returns the state for the instance with the given key, or nil +// if no such instance is tracked within the state. +func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { + return rs.Instances[key] +} + +// CreateInstance creates an instance and adds it to the resource +func (rs *Resource) CreateInstance(key addrs.InstanceKey) *ResourceInstance { + is := NewResourceInstance() + rs.Instances[key] = is + return is +} + +// EnsureInstance returns the state for the instance with the given key, +// creating a new empty state for it if one doesn't already exist. +// +// Because this may create and save a new state, it is considered to be +// a write operation. +func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { + ret := rs.Instance(key) + if ret == nil { + ret = NewResourceInstance() + rs.Instances[key] = ret + } + return ret +} + +// ResourceInstance represents the state of a particular instance of a resource. +type ResourceInstance struct { + // Current, if non-nil, is the remote object that is currently represented + // by the corresponding resource instance. + Current *ResourceInstanceObjectSrc + + // Deposed, if len > 0, contains any remote objects that were previously + // represented by the corresponding resource instance but have been + // replaced and are pending destruction due to the create_before_destroy + // lifecycle mode. + Deposed map[DeposedKey]*ResourceInstanceObjectSrc +} + +// NewResourceInstance constructs and returns a new ResourceInstance, ready to +// use. +func NewResourceInstance() *ResourceInstance { + return &ResourceInstance{ + Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, + } +} + +// HasCurrent returns true if this resource instance has a "current"-generation +// object. Most instances do, but this can briefly be false during a +// create-before-destroy replace operation when the current has been deposed +// but its replacement has not yet been created. +func (i *ResourceInstance) HasCurrent() bool { + return i != nil && i.Current != nil +} + +// HasDeposed returns true if this resource instance has a deposed object +// with the given key. +func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { + return i != nil && i.Deposed[key] != nil +} + +// HasAnyDeposed returns true if this resource instance has one or more +// deposed objects. +func (i *ResourceInstance) HasAnyDeposed() bool { + return i != nil && len(i.Deposed) > 0 +} + +// HasObjects returns true if this resource has any objects at all, whether +// current or deposed. +func (i *ResourceInstance) HasObjects() bool { + return i.Current != nil || len(i.Deposed) != 0 +} + +// deposeCurrentObject is part of the real implementation of +// SyncState.DeposeResourceInstanceObject. The exported method uses a lock +// to ensure that we can safely allocate an unused deposed key without +// collision. +func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { + if !i.HasCurrent() { + return NotDeposed + } + + key := forceKey + if key == NotDeposed { + key = i.findUnusedDeposedKey() + } else { + if _, exists := i.Deposed[key]; exists { + panic(fmt.Sprintf("forced key %s is already in use", forceKey)) + } + } + i.Deposed[key] = i.Current + i.Current = nil + return key +} + +// GetGeneration retrieves the object of the given generation from the +// ResourceInstance, or returns nil if there is no such object. +// +// If the given generation is nil or invalid, this method will panic. +func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { + if gen == CurrentGen { + return i.Current + } + if dk, ok := gen.(DeposedKey); ok { + return i.Deposed[dk] + } + if gen == nil { + panic("get with nil Generation") + } + // Should never fall out here, since the above covers all possible + // Generation values. + panic(fmt.Sprintf("get invalid Generation %#v", gen)) +} + +// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance at the time of the call. +// +// Note that the validity of this result may change if new deposed keys are +// allocated before it is used. To avoid this risk, instead use the +// DeposeResourceInstanceObject method on the SyncState wrapper type, which +// allocates a key and uses it atomically. +func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { + return i.findUnusedDeposedKey() +} + +// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance. +func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { + for { + key := NewDeposedKey() + if _, exists := i.Deposed[key]; !exists { + return key + } + // Spin until we find a unique one. This shouldn't take long, because + // we have a 32-bit keyspace and there's rarely more than one deposed + // instance. + } +} + +// DeposedKey is a 8-character hex string used to uniquely identify deposed +// instance objects in the state. +type DeposedKey string + +// NotDeposed is a special invalid value of DeposedKey that is used to represent +// the absense of a deposed key. It must not be used as an actual deposed key. +const NotDeposed = DeposedKey("") + +var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) + +// NewDeposedKey generates a pseudo-random deposed key. Because of the short +// length of these keys, uniqueness is not a natural consequence and so the +// caller should test to see if the generated key is already in use and generate +// another if so, until a unique key is found. +func NewDeposedKey() DeposedKey { + v := deposedKeyRand.Uint32() + return DeposedKey(fmt.Sprintf("%08x", v)) +} + +func (k DeposedKey) String() string { + return string(k) +} + +func (k DeposedKey) GoString() string { + ks := string(k) + switch { + case ks == "": + return "states.NotDeposed" + default: + return fmt.Sprintf("states.DeposedKey(%s)", ks) + } +} + +// Generation is a helper method to convert a DeposedKey into a Generation. +// If the reciever is anything other than NotDeposed then the result is +// just the same value as a Generation. If the receiver is NotDeposed then +// the result is CurrentGen. +func (k DeposedKey) Generation() Generation { + if k == NotDeposed { + return CurrentGen + } + return k +} + +// generation is an implementation of Generation. +func (k DeposedKey) generation() {} diff --git a/internal/states/resource_test.go b/states/resource_test.go similarity index 100% rename from internal/states/resource_test.go rename to states/resource_test.go diff --git a/states/state.go b/states/state.go new file mode 100644 index 000000000000..39afe1e4005d --- /dev/null +++ b/states/state.go @@ -0,0 +1,634 @@ +package states + +import ( + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/getproviders" +) + +// State is the top-level type of a Terraform state. +// +// A state should be mutated only via its accessor methods, to ensure that +// invariants are preserved. +// +// Access to State and the nested values within it is not concurrency-safe, +// so when accessing a State object concurrently it is the caller's +// responsibility to ensure that only one write is in progress at a time +// and that reads only occur when no write is in progress. The most common +// way to achieve this is to wrap the State in a SyncState and use the +// higher-level atomic operations supported by that type. +type State struct { + // Modules contains the state for each module. The keys in this map are + // an implementation detail and must not be used by outside callers. + Modules map[string]*Module + + // CheckResults contains a snapshot of the statuses of checks at the + // end of the most recent update to the state. Callers might compare + // checks between runs to see if e.g. a previously-failing check has + // been fixed since the last run, or similar. + // + // CheckResults can be nil to indicate that there are no check results + // from the previous run at all, which is subtly different than the + // previous run having affirmatively recorded that there are no checks + // to run. For example, if this object was created from a state snapshot + // created by a version of Terraform that didn't yet support checks + // then this field will be nil. + CheckResults *CheckResults +} + +// NewState constructs a minimal empty state, containing an empty root module. +func NewState() *State { + modules := map[string]*Module{} + modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) + return &State{ + Modules: modules, + } +} + +// BuildState is a helper -- primarily intended for tests -- to build a state +// using imperative code against the StateSync type while still acting as +// an expression of type *State to assign into a containing struct. +func BuildState(cb func(*SyncState)) *State { + s := NewState() + cb(s.SyncWrapper()) + return s +} + +// Empty returns true if there are no resources or populated output values +// in the receiver. In other words, if this state could be safely replaced +// with the return value of NewState and be functionally equivalent. +func (s *State) Empty() bool { + if s == nil { + return true + } + for _, ms := range s.Modules { + if len(ms.Resources) != 0 { + return false + } + if len(ms.OutputValues) != 0 { + return false + } + } + return true +} + +// Module returns the state for the module with the given address, or nil if +// the requested module is not tracked in the state. +func (s *State) Module(addr addrs.ModuleInstance) *Module { + if s == nil { + panic("State.Module on nil *State") + } + return s.Modules[addr.String()] +} + +// ModuleInstances returns the set of Module states that matches the given path. +func (s *State) ModuleInstances(addr addrs.Module) []*Module { + var ms []*Module + for _, m := range s.Modules { + if m.Addr.Module().Equal(addr) { + ms = append(ms, m) + } + } + return ms +} + +// ModuleOutputs returns all outputs for the given module call under the +// parentAddr instance. +func (s *State) ModuleOutputs(parentAddr addrs.ModuleInstance, module addrs.ModuleCall) []*OutputValue { + var os []*OutputValue + for _, m := range s.Modules { + // can't get outputs from the root module + if m.Addr.IsRoot() { + continue + } + + parent, call := m.Addr.Call() + // make sure this is a descendent in the correct path + if !parentAddr.Equal(parent) { + continue + } + + // and check if this is the correct child + if call.Name != module.Name { + continue + } + + for _, o := range m.OutputValues { + os = append(os, o) + } + } + + return os +} + +// RemoveModule removes the module with the given address from the state, +// unless it is the root module. The root module cannot be deleted, and so +// this method will panic if that is attempted. +// +// Removing a module implicitly discards all of the resources, outputs and +// local values within it, and so this should usually be done only for empty +// modules. For callers accessing the state through a SyncState wrapper, modules +// are automatically pruned if they are empty after one of their contained +// elements is removed. +func (s *State) RemoveModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + panic("attempted to remove root module") + } + + delete(s.Modules, addr.String()) +} + +// RootModule is a convenient alias for Module(addrs.RootModuleInstance). +func (s *State) RootModule() *Module { + if s == nil { + panic("RootModule called on nil State") + } + return s.Modules[addrs.RootModuleInstance.String()] +} + +// EnsureModule returns the state for the module with the given address, +// creating and adding a new one if necessary. +// +// Since this might modify the state to add a new instance, it is considered +// to be a write operation. +func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { + ms := s.Module(addr) + if ms == nil { + ms = NewModule(addr) + s.Modules[addr.String()] = ms + } + return ms +} + +// HasManagedResourceInstanceObjects returns true if there is at least one +// resource instance object (current or deposed) associated with a managed +// resource in the receiving state. +// +// A true result would suggest that just discarding this state without first +// destroying these objects could leave "dangling" objects in remote systems, +// no longer tracked by any Terraform state. +func (s *State) HasManagedResourceInstanceObjects() bool { + if s == nil { + return false + } + for _, ms := range s.Modules { + for _, rs := range ms.Resources { + if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { + continue + } + for _, is := range rs.Instances { + if is.Current != nil || len(is.Deposed) != 0 { + return true + } + } + } + } + return false +} + +// Resource returns the state for the resource with the given address, or nil +// if no such resource is tracked in the state. +func (s *State) Resource(addr addrs.AbsResource) *Resource { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.Resource(addr.Resource) +} + +// Resources returns the set of resources that match the given configuration path. +func (s *State) Resources(addr addrs.ConfigResource) []*Resource { + var ret []*Resource + for _, m := range s.ModuleInstances(addr.Module) { + r := m.Resource(addr.Resource) + if r != nil { + ret = append(ret, r) + } + } + return ret +} + +// AllManagedResourceInstanceObjectAddrs returns a set of addresses for all of +// the leaf resource instance objects associated with managed resources that +// are tracked in this state. +// +// This result is the set of objects that would be effectively "forgotten" +// (like "terraform state rm") if this state were totally discarded, such as +// by deleting a workspace. This function is intended only for reporting +// context in error messages, such as when we reject deleting a "non-empty" +// workspace as detected by s.HasManagedResourceInstanceObjects. +// +// The ordering of the result is meaningless but consistent. DeposedKey will +// be NotDeposed (the zero value of DeposedKey) for any "current" objects. +// This method is guaranteed to return at least one item if +// s.HasManagedResourceInstanceObjects returns true for the same state, and +// to return a zero-length slice if it returns false. +func (s *State) AllResourceInstanceObjectAddrs() []struct { + Instance addrs.AbsResourceInstance + DeposedKey DeposedKey +} { + if s == nil { + return nil + } + + // We use an unnamed return type here just because we currently have no + // general need to return pairs of instance address and deposed key aside + // from this method, and this method itself is only of marginal value + // when producing some error messages. + // + // If that need ends up arising more in future then it might make sense to + // name this as addrs.AbsResourceInstanceObject, although that would require + // moving DeposedKey into the addrs package too. + type ResourceInstanceObject = struct { + Instance addrs.AbsResourceInstance + DeposedKey DeposedKey + } + var ret []ResourceInstanceObject + + for _, ms := range s.Modules { + for _, rs := range ms.Resources { + if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { + continue + } + + for instKey, is := range rs.Instances { + instAddr := rs.Addr.Instance(instKey) + if is.Current != nil { + ret = append(ret, ResourceInstanceObject{instAddr, NotDeposed}) + } + for deposedKey := range is.Deposed { + ret = append(ret, ResourceInstanceObject{instAddr, deposedKey}) + } + } + } + } + + sort.SliceStable(ret, func(i, j int) bool { + objI, objJ := ret[i], ret[j] + switch { + case !objI.Instance.Equal(objJ.Instance): + return objI.Instance.Less(objJ.Instance) + default: + return objI.DeposedKey < objJ.DeposedKey + } + }) + + return ret +} + +// ResourceInstance returns the state for the resource instance with the given +// address, or nil if no such resource is tracked in the state. +func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + if s == nil { + panic("State.ResourceInstance on nil *State") + } + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.ResourceInstance(addr.Resource) +} + +// OutputValue returns the state for the output value with the given address, +// or nil if no such output value is tracked in the state. +func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.OutputValues[addr.OutputValue.Name] +} + +// LocalValue returns the value of the named local value with the given address, +// or cty.NilVal if no such value is tracked in the state. +func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { + ms := s.Module(addr.Module) + if ms == nil { + return cty.NilVal + } + return ms.LocalValues[addr.LocalValue.Name] +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving state. +// +// The result is de-duplicated so that each distinct address appears only once. +func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { + if s == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, ms := range s.Modules { + for _, rc := range ms.Resources { + m[rc.ProviderConfig.String()] = rc.ProviderConfig + } + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} + +// ProviderRequirements returns a description of all of the providers that +// are required to work with the receiving state. +// +// Because the state does not track specific version information for providers, +// the requirements returned by this method will always be unconstrained. +// The result should usually be merged with a Requirements derived from the +// current configuration in order to apply some constraints. +func (s *State) ProviderRequirements() getproviders.Requirements { + configAddrs := s.ProviderAddrs() + ret := make(getproviders.Requirements, len(configAddrs)) + for _, configAddr := range configAddrs { + ret[configAddr.Provider] = nil // unconstrained dependency + } + return ret +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// This should generally be used only after a "terraform destroy" operation, +// to finalize the cleanup of the state. It is not correct to use this after +// other operations because if a resource has "count = 0" or "for_each" over +// an empty collection then we want to retain it in the state so that references +// to it, particularly in "strange" contexts like "terraform console", can be +// properly resolved. +// +// This method MUST NOT be called concurrently with other readers and writers +// of the receiving state. +func (s *State) PruneResourceHusks() { + for _, m := range s.Modules { + m.PruneResourceHusks() + if len(m.Resources) == 0 && !m.Addr.IsRoot() { + s.RemoveModule(m.Addr) + } + } +} + +// SyncWrapper returns a SyncState object wrapping the receiver. +func (s *State) SyncWrapper() *SyncState { + return &SyncState{ + state: s, + } +} + +// MoveAbsResource moves the given src AbsResource's current state to the new +// dst address. This will panic if the src AbsResource does not exist in state, +// or if there is already a resource at the dst address. It is the caller's +// responsibility to verify the validity of the move (for example, that the src +// and dst are compatible types). +func (s *State) MoveAbsResource(src, dst addrs.AbsResource) { + // verify that the src address exists and the dst address does not + rs := s.Resource(src) + if rs == nil { + panic(fmt.Sprintf("no state for src address %s", src.String())) + } + + ds := s.Resource(dst) + if ds != nil { + panic(fmt.Sprintf("dst resource %s already exists", dst.String())) + } + + ms := s.Module(src.Module) + ms.RemoveResource(src.Resource) + + // Remove the module if it is empty (and not root) after removing the + // resource. + if !ms.Addr.IsRoot() && ms.empty() { + s.RemoveModule(src.Module) + } + + // Update the address before adding it to the state + rs.Addr = dst + s.EnsureModule(dst.Module).Resources[dst.Resource.String()] = rs +} + +// MaybeMoveAbsResource moves the given src AbsResource's current state to the +// new dst address. This function will succeed if both the src address does not +// exist in state and the dst address does; the return value indicates whether +// or not the move occurred. This function will panic if either the src does not +// exist or the dst does exist (but not both). +func (s *State) MaybeMoveAbsResource(src, dst addrs.AbsResource) bool { + // Get the source and destinatation addresses from state. + rs := s.Resource(src) + ds := s.Resource(dst) + + // Normal case: the src exists in state, dst does not + if rs != nil && ds == nil { + s.MoveAbsResource(src, dst) + return true + } + + if rs == nil && ds != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information for the caller. + return false + } else { + panic("invalid move") + } +} + +// MoveAbsResourceInstance moves the given src AbsResourceInstance's current state to +// the new dst address. This will panic if the src AbsResourceInstance does not +// exist in state, or if there is already a resource at the dst address. It is +// the caller's responsibility to verify the validity of the move (for example, +// that the src and dst are compatible types). +func (s *State) MoveAbsResourceInstance(src, dst addrs.AbsResourceInstance) { + srcInstanceState := s.ResourceInstance(src) + if srcInstanceState == nil { + panic(fmt.Sprintf("no state for src address %s", src.String())) + } + + dstInstanceState := s.ResourceInstance(dst) + if dstInstanceState != nil { + panic(fmt.Sprintf("dst resource %s already exists", dst.String())) + } + + srcResourceState := s.Resource(src.ContainingResource()) + srcProviderAddr := srcResourceState.ProviderConfig + dstResourceAddr := dst.ContainingResource() + + // Remove the source resource instance from the module's state, and then the + // module if empty. + ms := s.Module(src.Module) + ms.ForgetResourceInstanceAll(src.Resource) + if !ms.Addr.IsRoot() && ms.empty() { + s.RemoveModule(src.Module) + } + + dstModule := s.EnsureModule(dst.Module) + + // See if there is already a resource we can add this instance to. + dstResourceState := s.Resource(dstResourceAddr) + if dstResourceState == nil { + // If we're moving to an address without an index then that + // suggests the user's intent is to establish both the + // resource and the instance at the same time (since the + // address covers both). If there's an index in the + // target then allow creating the new instance here. + dstModule.SetResourceProvider( + dstResourceAddr.Resource, + srcProviderAddr, // in this case, we bring the provider along as if we were moving the whole resource + ) + dstResourceState = dstModule.Resource(dstResourceAddr.Resource) + } + + dstResourceState.Instances[dst.Resource.Key] = srcInstanceState +} + +// MaybeMoveAbsResourceInstance moves the given src AbsResourceInstance's +// current state to the new dst address. This function will succeed if both the +// src address does not exist in state and the dst address does; the return +// value indicates whether or not the move occured. This function will panic if +// either the src does not exist or the dst does exist (but not both). +func (s *State) MaybeMoveAbsResourceInstance(src, dst addrs.AbsResourceInstance) bool { + // get the src and dst resource instances from state + rs := s.ResourceInstance(src) + ds := s.ResourceInstance(dst) + + // Normal case: the src exists in state, dst does not + if rs != nil && ds == nil { + s.MoveAbsResourceInstance(src, dst) + return true + } + + if rs == nil && ds != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information. + return false + } else { + panic("invalid move") + } +} + +// MoveModuleInstance moves the given src ModuleInstance's current state to the +// new dst address. This will panic if the src ModuleInstance does not +// exist in state, or if there is already a resource at the dst address. It is +// the caller's responsibility to verify the validity of the move. +func (s *State) MoveModuleInstance(src, dst addrs.ModuleInstance) { + if src.IsRoot() || dst.IsRoot() { + panic("cannot move to or from root module") + } + + srcMod := s.Module(src) + if srcMod == nil { + panic(fmt.Sprintf("no state for src module %s", src.String())) + } + + dstMod := s.Module(dst) + if dstMod != nil { + panic(fmt.Sprintf("dst module %s already exists in state", dst.String())) + } + + s.RemoveModule(src) + + srcMod.Addr = dst + s.EnsureModule(dst) + s.Modules[dst.String()] = srcMod + + // Update any Resource's addresses. + if srcMod.Resources != nil { + for _, r := range srcMod.Resources { + r.Addr.Module = dst + } + } + + // Update any OutputValues's addresses. + if srcMod.OutputValues != nil { + for _, ov := range srcMod.OutputValues { + ov.Addr.Module = dst + } + } +} + +// MaybeMoveModuleInstance moves the given src ModuleInstance's current state to +// the new dst address. This function will succeed if both the src address does +// not exist in state and the dst address does; the return value indicates +// whether or not the move occured. This function will panic if either the src +// does not exist or the dst does exist (but not both). +func (s *State) MaybeMoveModuleInstance(src, dst addrs.ModuleInstance) bool { + if src.IsRoot() || dst.IsRoot() { + panic("cannot move to or from root module") + } + + srcMod := s.Module(src) + dstMod := s.Module(dst) + + // Normal case: the src exists in state, dst does not + if srcMod != nil && dstMod == nil { + s.MoveModuleInstance(src, dst) + return true + } + + if srcMod == nil || src.IsRoot() && dstMod != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information. + return false + } else { + panic("invalid move") + } +} + +// MoveModule takes a source and destination addrs.Module address, and moves all +// state Modules which are contained by the src address to the new address. +func (s *State) MoveModule(src, dst addrs.AbsModuleCall) { + if src.Module.IsRoot() || dst.Module.IsRoot() { + panic("cannot move to or from root module") + } + + // Modules only exist as ModuleInstances in state, so we need to check each + // state Module and see if it is contained by the src address to get a full + // list of modules to move. + var srcMIs []*Module + for _, module := range s.Modules { + if !module.Addr.IsRoot() { + if src.Module.TargetContains(module.Addr) { + srcMIs = append(srcMIs, module) + } + } + } + + if len(srcMIs) == 0 { + panic(fmt.Sprintf("no matching module instances found for src module %s", src.String())) + } + + for _, ms := range srcMIs { + newInst := make(addrs.ModuleInstance, len(ms.Addr)) + copy(newInst, ms.Addr) + if ms.Addr.IsDeclaredByCall(src) { + // Easy case: we just need to update the last step with the new name + newInst[len(newInst)-1].Name = dst.Call.Name + } else { + // Trickier: this Module is a submodule. we need to find and update + // only that appropriate step + for s := range newInst { + if newInst[s].Name == src.Call.Name { + newInst[s].Name = dst.Call.Name + } + } + } + s.MoveModuleInstance(ms.Addr, newInst) + } +} diff --git a/internal/states/state_deepcopy.go b/states/state_deepcopy.go similarity index 99% rename from internal/states/state_deepcopy.go rename to states/state_deepcopy.go index b8498d53ff11..6204f3e4879b 100644 --- a/internal/states/state_deepcopy.go +++ b/states/state_deepcopy.go @@ -1,7 +1,7 @@ package states import ( - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/states/state_equal.go b/states/state_equal.go similarity index 97% rename from internal/states/state_equal.go rename to states/state_equal.go index b37aba062768..1e6c04c5d12b 100644 --- a/internal/states/state_equal.go +++ b/states/state_equal.go @@ -3,7 +3,7 @@ package states import ( "reflect" - "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/addrs" ) // Equal returns true if the receiver is functionally equivalent to other, diff --git a/internal/states/state_string.go b/states/state_string.go similarity index 98% rename from internal/states/state_string.go rename to states/state_string.go index 2e34834a5518..0f74d5965924 100644 --- a/internal/states/state_string.go +++ b/states/state_string.go @@ -10,8 +10,8 @@ import ( ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs/hcl2shim" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/hcl2shim" ) // String returns a rather-odd string representation of the entire state. diff --git a/states/state_test.go b/states/state_test.go new file mode 100644 index 000000000000..fe43d8644c24 --- /dev/null +++ b/states/state_test.go @@ -0,0 +1,1008 @@ +package states + +import ( + "fmt" + "reflect" + "testing" + + "github.com/go-test/deep" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang/marks" +) + +func TestState(t *testing.T) { + // This basic tests exercises the main mutation methods to construct + // a state. It is not fully comprehensive, so other tests should visit + // more esoteric codepaths. + + state := NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetLocalValue("foo", cty.StringVal("foo value")) + rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) + rootModule.SetOutputValue("secret", cty.StringVal("secret value"), true) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + childModule := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + childModule.SetOutputValue("pizza", cty.StringVal("hawaiian"), false) + multiModA := state.EnsureModule(addrs.RootModuleInstance.Child("multi", addrs.StringKey("a"))) + multiModA.SetOutputValue("pizza", cty.StringVal("cheese"), false) + multiModB := state.EnsureModule(addrs.RootModuleInstance.Child("multi", addrs.StringKey("b"))) + multiModB.SetOutputValue("pizza", cty.StringVal("sausage"), false) + + want := &State{ + Modules: map[string]*Module{ + "": { + Addr: addrs.RootModuleInstance, + LocalValues: map[string]cty.Value{ + "foo": cty.StringVal("foo value"), + }, + OutputValues: map[string]*OutputValue{ + "bar": { + Addr: addrs.AbsOutputValue{ + OutputValue: addrs.OutputValue{ + Name: "bar", + }, + }, + Value: cty.StringVal("bar value"), + Sensitive: false, + }, + "secret": { + Addr: addrs.AbsOutputValue{ + OutputValue: addrs.OutputValue{ + Name: "secret", + }, + }, + Value: cty.StringVal("secret value"), + Sensitive: true, + }, + }, + Resources: map[string]*Resource{ + "test_thing.baz": { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "baz", + }.Absolute(addrs.RootModuleInstance), + + Instances: map[addrs.InstanceKey]*ResourceInstance{ + addrs.IntKey(0): { + Current: &ResourceInstanceObjectSrc{ + SchemaVersion: 1, + Status: ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + }, + "module.child": { + Addr: addrs.RootModuleInstance.Child("child", addrs.NoKey), + LocalValues: map[string]cty.Value{}, + OutputValues: map[string]*OutputValue{ + "pizza": { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("child", addrs.NoKey), + OutputValue: addrs.OutputValue{ + Name: "pizza", + }, + }, + Value: cty.StringVal("hawaiian"), + Sensitive: false, + }, + }, + Resources: map[string]*Resource{}, + }, + `module.multi["a"]`: { + Addr: addrs.RootModuleInstance.Child("multi", addrs.StringKey("a")), + LocalValues: map[string]cty.Value{}, + OutputValues: map[string]*OutputValue{ + "pizza": { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("multi", addrs.StringKey("a")), + OutputValue: addrs.OutputValue{ + Name: "pizza", + }, + }, + Value: cty.StringVal("cheese"), + Sensitive: false, + }, + }, + Resources: map[string]*Resource{}, + }, + `module.multi["b"]`: { + Addr: addrs.RootModuleInstance.Child("multi", addrs.StringKey("b")), + LocalValues: map[string]cty.Value{}, + OutputValues: map[string]*OutputValue{ + "pizza": { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("multi", addrs.StringKey("b")), + OutputValue: addrs.OutputValue{ + Name: "pizza", + }, + }, + Value: cty.StringVal("sausage"), + Sensitive: false, + }, + }, + Resources: map[string]*Resource{}, + }, + }, + } + + { + // Our structure goes deep, so we need to temporarily override the + // deep package settings to ensure that we visit the full structure. + oldDeepDepth := deep.MaxDepth + oldDeepCompareUnexp := deep.CompareUnexportedFields + deep.MaxDepth = 50 + deep.CompareUnexportedFields = true + defer func() { + deep.MaxDepth = oldDeepDepth + deep.CompareUnexportedFields = oldDeepCompareUnexp + }() + } + + for _, problem := range deep.Equal(state, want) { + t.Error(problem) + } + + expectedOutputs := map[string]string{ + `module.multi["a"].output.pizza`: "cheese", + `module.multi["b"].output.pizza`: "sausage", + } + + for _, o := range state.ModuleOutputs(addrs.RootModuleInstance, addrs.ModuleCall{Name: "multi"}) { + addr := o.Addr.String() + expected := expectedOutputs[addr] + delete(expectedOutputs, addr) + + if expected != o.Value.AsString() { + t.Fatalf("expected %q:%q, got %q", addr, expected, o.Value.AsString()) + } + } + + for addr, o := range expectedOutputs { + t.Fatalf("missing output %q:%q", addr, o) + } +} + +func TestStateDeepCopyObject(t *testing.T) { + obj := &ResourceInstanceObject{ + Value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("id"), + }), + Private: []byte("private"), + Status: ObjectReady, + Dependencies: []addrs.ConfigResource{ + { + Module: addrs.RootModule, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }, + }, + }, + CreateBeforeDestroy: true, + } + + objCopy := obj.DeepCopy() + if !reflect.DeepEqual(obj, objCopy) { + t.Fatalf("not equal\n%#v\n%#v", obj, objCopy) + } +} + +func TestStateDeepCopy(t *testing.T) { + state := NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetLocalValue("foo", cty.StringVal("foo value")) + rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) + rootModule.SetOutputValue("secret", cty.StringVal("secret value"), true) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + Private: []byte("private data"), + Dependencies: []addrs.ConfigResource{}, + CreateBeforeDestroy: true, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + // Sensitive path at "woozles" + AttrSensitivePaths: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "woozles"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + Private: []byte("private data"), + Dependencies: []addrs.ConfigResource{ + { + Module: addrs.RootModule, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "baz", + }, + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + childModule := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + childModule.SetOutputValue("pizza", cty.StringVal("hawaiian"), false) + + stateCopy := state.DeepCopy() + if !state.Equal(stateCopy) { + t.Fatalf("\nexpected:\n%q\ngot:\n%q\n", state, stateCopy) + } +} + +func TestStateHasResourceInstanceObjects(t *testing.T) { + providerConfig := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.MustParseProviderSourceString("test/test"), + } + childModuleProviderConfig := addrs.AbsProviderConfig{ + Module: addrs.RootModule.Child("child"), + Provider: addrs.MustParseProviderSourceString("test/test"), + } + + tests := map[string]struct { + Setup func(ss *SyncState) + Want bool + }{ + "empty": { + func(ss *SyncState) {}, + false, + }, + "one current, ready object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + providerConfig, + ) + }, + true, + }, + "one current, ready object in child module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("module.child.test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + childModuleProviderConfig, + ) + }, + true, + }, + "one current, tainted object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + }, + true, + }, + "one deposed, ready object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceDeposed( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + DeposedKey("uhoh"), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + }, + true, + }, + "one empty resource husk in root module": { + func(ss *SyncState) { + // Current Terraform doesn't actually create resource husks + // as part of its everyday work, so this is a "should never + // happen" case but we'll test to make sure we're robust to + // it anyway, because this was a historical bug blocking + // "terraform workspace delete" and similar. + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + s := ss.Lock() + delete(s.Modules[""].Resources["test.foo"].Instances, addrs.NoKey) + ss.Unlock() + }, + false, + }, + "one current data resource object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("data.test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + providerConfig, + ) + }, + false, // data resources aren't managed resources, so they don't count + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + state := BuildState(test.Setup) + got := state.HasManagedResourceInstanceObjects() + if got != test.Want { + t.Errorf("wrong result\nstate content: (using legacy state string format; might not be comprehensive)\n%s\n\ngot: %t\nwant: %t", state, got, test.Want) + } + }) + } + +} + +func TestState_MoveAbsResource(t *testing.T) { + // Set up a starter state for the embedded tests, which should start from a copy of this state. + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Absolute(addrs.RootModuleInstance) + + t.Run("basic move", func(t *testing.T) { + s := state.DeepCopy() + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(addrs.RootModuleInstance) + + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if len(s.RootModule().Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(state.RootModule().Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("move to new module", func(t *testing.T) { + s := state.DeepCopy() + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("one")) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(dstModule) + + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if s.Module(dstModule) == nil { + t.Fatalf("child module %s not in state", dstModule.String()) + } + + if len(s.Module(dstModule).Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(s.Module(dstModule).Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("from a child module to root", func(t *testing.T) { + s := state.DeepCopy() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.IntKey(0)), // Moving the AbsResouce moves all instances + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.IntKey(1)), // Moving the AbsResouce moves all instances + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(addrs.RootModuleInstance) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + if len(s.RootModule().Resources) != 2 { + t.Fatalf("wrong number of resources in state; expected 2, found %d", len(s.RootModule().Resources)) + } + + if len(s.Resource(dst).Instances) != 2 { + t.Fatalf("wrong number of resource instances for dst, got %d expected 2", len(s.Resource(dst).Instances)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("module to new module", func(t *testing.T) { + s := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("exists")) + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("new")) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(dstModule) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + gotMod := s.Module(dstModule) + if len(gotMod.Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(gotMod.Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("module to new module", func(t *testing.T) { + s := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("exists")) + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("new")) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(dstModule) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + gotMod := s.Module(dstModule) + if len(gotMod.Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(gotMod.Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) +} + +func TestState_MaybeMoveAbsResource(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Absolute(addrs.RootModuleInstance) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(addrs.RootModuleInstance) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveAbsResource(src, dst) + if !moved { + t.Fatal("wrong result") + } + }) + + // Trying to move a resource that doesn't exist in state to a resource which does exist should be a noop. + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveAbsResource(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveAbsResourceInstance(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + // src resource from the state above + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + t.Run("resource to resource instance", func(t *testing.T) { + s := state.DeepCopy() + // For a little extra fun, move a resource to a resource instance: test_thing.foo to test_thing.foo[1] + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance) + + s.MoveAbsResourceInstance(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if len(s.RootModule().Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(state.RootModule().Resources)) + } + + got := s.ResourceInstance(dst) + if got == nil { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("move to new module", func(t *testing.T) { + s := state.DeepCopy() + // test_thing.foo to module.kinder.test_thing.foo["baz"] + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(dstModule) + + s.MoveAbsResourceInstance(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if s.Module(dstModule) == nil { + t.Fatalf("child module %s not in state", dstModule.String()) + } + + if len(s.Module(dstModule).Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(s.Module(dstModule).Resources)) + } + + got := s.ResourceInstance(dst) + if got == nil { + t.Fatalf("dst resource not in state") + } + }) +} + +func TestState_MaybeMoveAbsResourceInstance(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + // For a little extra fun, let's go from a resource to a resource instance: test_thing.foo to test_thing.bar[1] + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveAbsResourceInstance(src, dst) + if !moved { + t.Fatal("wrong result") + } + got := state.ResourceInstance(dst) + if got == nil { + t.Fatal("destination resource instance not in state") + } + }) + + // Moving a resource instance that doesn't exist in state to a resource which does exist should be a noop. + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveAbsResourceInstance(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveModuleInstance(t *testing.T) { + state := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + m := state.EnsureModule(srcModule) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + dstModule := addrs.RootModuleInstance.Child("child", addrs.IntKey(3)) + state.MoveModuleInstance(srcModule, dstModule) + + // srcModule should have been removed, dstModule should exist and have one resource + if len(state.Modules) != 2 { // kinder[3] and root + t.Fatalf("wrong number of modules in state. Expected 2, got %d", len(state.Modules)) + } + + got := state.Module(dstModule) + if got == nil { + t.Fatal("dstModule not found") + } + + gone := state.Module(srcModule) + if gone != nil { + t.Fatal("srcModule not removed from state") + } + + r := got.Resource(mustAbsResourceAddr("test_thing.foo").Resource) + if r.Addr.Module.String() != dstModule.String() { + fmt.Println(r.Addr.Module.String()) + t.Fatal("resource address was not updated") + } + +} + +func TestState_MaybeMoveModuleInstance(t *testing.T) { + state := NewState() + src := addrs.RootModuleInstance.Child("child", addrs.StringKey("a")) + cm := state.EnsureModule(src) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + dst := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("b")) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveModuleInstance(src, dst) + if !moved { + t.Fatal("wrong result") + } + }) + + // Second move, should be a noop + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveModuleInstance(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveModule(t *testing.T) { + // For this test, add two module instances (kinder and kinder["a"]). + // MoveModule(kinder) should move both instances. + state := NewState() // starter state, should be copied by the subtests. + srcModule := addrs.RootModule.Child("kinder") + m := state.EnsureModule(srcModule.UnkeyedInstanceShim()) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + moduleInstance := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("a")) + mi := state.EnsureModule(moduleInstance) + mi.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + _, mc := srcModule.Call() + src := mc.Absolute(addrs.RootModuleInstance.Child("kinder", addrs.NoKey)) + + t.Run("basic", func(t *testing.T) { + s := state.DeepCopy() + _, dstMC := addrs.RootModule.Child("child").Call() + dst := dstMC.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + s.MoveModule(src, dst) + + // srcModule should have been removed, dstModule should exist and have one resource + if len(s.Modules) != 3 { // child, child["a"] and root + t.Fatalf("wrong number of modules in state. Expected 3, got %d", len(s.Modules)) + } + + got := s.Module(dst.Module) + if got == nil { + t.Fatal("dstModule not found") + } + + got = s.Module(addrs.RootModuleInstance.Child("child", addrs.StringKey("a"))) + if got == nil { + t.Fatal("dstModule instance \"a\" not found") + } + + gone := s.Module(srcModule.UnkeyedInstanceShim()) + if gone != nil { + t.Fatal("srcModule not removed from state") + } + }) + + t.Run("nested modules", func(t *testing.T) { + s := state.DeepCopy() + + // add a child module to module.kinder + mi := mustParseModuleInstanceStr(`module.kinder.module.grand[1]`) + m := s.EnsureModule(mi) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + _, dstMC := addrs.RootModule.Child("child").Call() + dst := dstMC.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + s.MoveModule(src, dst) + + moved := s.Module(addrs.RootModuleInstance.Child("child", addrs.StringKey("a"))) + if moved == nil { + t.Fatal("dstModule not found") + } + + // The nested module's relative address should also have been updated + nested := s.Module(mustParseModuleInstanceStr(`module.child.module.grand[1]`)) + if nested == nil { + t.Fatal("nested child module of src wasn't moved") + } + }) +} + +func mustParseModuleInstanceStr(str string) addrs.ModuleInstance { + addr, diags := addrs.ParseModuleInstanceStr(str) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} + +func mustAbsResourceAddr(s string) addrs.AbsResource { + addr, diags := addrs.ParseAbsResourceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} diff --git a/states/statefile/diagnostics.go b/states/statefile/diagnostics.go new file mode 100644 index 000000000000..a6d88ecd5844 --- /dev/null +++ b/states/statefile/diagnostics.go @@ -0,0 +1,62 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" +) + +const invalidFormat = "Invalid state file format" + +// jsonUnmarshalDiags is a helper that translates errors returned from +// json.Unmarshal into hopefully-more-helpful diagnostics messages. +func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if err == nil { + return diags + } + + switch tErr := err.(type) { + case *json.SyntaxError: + // We've usually already successfully parsed a source file as JSON at + // least once before we'd use jsonUnmarshalDiags with it (to sniff + // the version number) so this particular error should not appear much + // in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + // This is likely to be the most common area, describing a + // non-conformance between the file and the expected file format + // at a semantic level. + if tErr.Field != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), + )) + break + } else { + // Without a field name, we can't really say anything helpful. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + "The state file does not conform to the expected JSON data structure.", + )) + } + default: + // Fallback for all other types of errors. This can happen only for + // custom UnmarshalJSON implementations, so should be encountered + // only rarely. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), + )) + } + + return diags +} diff --git a/internal/states/statefile/doc.go b/states/statefile/doc.go similarity index 100% rename from internal/states/statefile/doc.go rename to states/statefile/doc.go diff --git a/internal/states/statefile/file.go b/states/statefile/file.go similarity index 97% rename from internal/states/statefile/file.go rename to states/statefile/file.go index 631807b11301..6e202401999b 100644 --- a/internal/states/statefile/file.go +++ b/states/statefile/file.go @@ -3,7 +3,7 @@ package statefile import ( version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/states" tfversion "github.com/hashicorp/terraform/version" ) diff --git a/internal/states/statefile/marshal_equal.go b/states/statefile/marshal_equal.go similarity index 95% rename from internal/states/statefile/marshal_equal.go rename to states/statefile/marshal_equal.go index 2b386cbb7389..4948b39b9ed0 100644 --- a/internal/states/statefile/marshal_equal.go +++ b/states/statefile/marshal_equal.go @@ -3,7 +3,7 @@ package statefile import ( "bytes" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/states" ) // StatesMarshalEqual returns true if and only if the two given states have diff --git a/internal/states/statefile/read.go b/states/statefile/read.go similarity index 99% rename from internal/states/statefile/read.go rename to states/statefile/read.go index 61f8e87d6c7a..8abd3be14da2 100644 --- a/internal/states/statefile/read.go +++ b/states/statefile/read.go @@ -10,7 +10,7 @@ import ( version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" tfversion "github.com/hashicorp/terraform/version" ) diff --git a/internal/states/statefile/roundtrip_test.go b/states/statefile/roundtrip_test.go similarity index 100% rename from internal/states/statefile/roundtrip_test.go rename to states/statefile/roundtrip_test.go diff --git a/internal/states/statefile/testdata/roundtrip/v1-simple.in.tfstate b/states/statefile/testdata/roundtrip/v1-simple.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v1-simple.in.tfstate rename to states/statefile/testdata/roundtrip/v1-simple.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v1-simple.out.tfstate b/states/statefile/testdata/roundtrip/v1-simple.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v1-simple.out.tfstate rename to states/statefile/testdata/roundtrip/v1-simple.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-bigint.in.tfstate b/states/statefile/testdata/roundtrip/v3-bigint.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-bigint.in.tfstate rename to states/statefile/testdata/roundtrip/v3-bigint.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-bigint.out.tfstate b/states/statefile/testdata/roundtrip/v3-bigint.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-bigint.out.tfstate rename to states/statefile/testdata/roundtrip/v3-bigint.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-builtin.in.tfstate b/states/statefile/testdata/roundtrip/v3-builtin.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-builtin.in.tfstate rename to states/statefile/testdata/roundtrip/v3-builtin.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-builtin.out.tfstate b/states/statefile/testdata/roundtrip/v3-builtin.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-builtin.out.tfstate rename to states/statefile/testdata/roundtrip/v3-builtin.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-grabbag.in.tfstate b/states/statefile/testdata/roundtrip/v3-grabbag.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-grabbag.in.tfstate rename to states/statefile/testdata/roundtrip/v3-grabbag.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-grabbag.out.tfstate b/states/statefile/testdata/roundtrip/v3-grabbag.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-grabbag.out.tfstate rename to states/statefile/testdata/roundtrip/v3-grabbag.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-invalid-depends.in.tfstate b/states/statefile/testdata/roundtrip/v3-invalid-depends.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-invalid-depends.in.tfstate rename to states/statefile/testdata/roundtrip/v3-invalid-depends.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-invalid-depends.out.tfstate b/states/statefile/testdata/roundtrip/v3-invalid-depends.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-invalid-depends.out.tfstate rename to states/statefile/testdata/roundtrip/v3-invalid-depends.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-simple.in.tfstate b/states/statefile/testdata/roundtrip/v3-simple.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-simple.in.tfstate rename to states/statefile/testdata/roundtrip/v3-simple.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v3-simple.out.tfstate b/states/statefile/testdata/roundtrip/v3-simple.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v3-simple.out.tfstate rename to states/statefile/testdata/roundtrip/v3-simple.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-cbd.in.tfstate b/states/statefile/testdata/roundtrip/v4-cbd.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-cbd.in.tfstate rename to states/statefile/testdata/roundtrip/v4-cbd.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-cbd.out.tfstate b/states/statefile/testdata/roundtrip/v4-cbd.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-cbd.out.tfstate rename to states/statefile/testdata/roundtrip/v4-cbd.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-foreach.in.tfstate b/states/statefile/testdata/roundtrip/v4-foreach.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-foreach.in.tfstate rename to states/statefile/testdata/roundtrip/v4-foreach.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-foreach.out.tfstate b/states/statefile/testdata/roundtrip/v4-foreach.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-foreach.out.tfstate rename to states/statefile/testdata/roundtrip/v4-foreach.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-future.in.tfstate b/states/statefile/testdata/roundtrip/v4-future.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-future.in.tfstate rename to states/statefile/testdata/roundtrip/v4-future.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-future.out.tfstate b/states/statefile/testdata/roundtrip/v4-future.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-future.out.tfstate rename to states/statefile/testdata/roundtrip/v4-future.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-legacy-foreach.in.tfstate b/states/statefile/testdata/roundtrip/v4-legacy-foreach.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-legacy-foreach.in.tfstate rename to states/statefile/testdata/roundtrip/v4-legacy-foreach.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-legacy-foreach.out.tfstate b/states/statefile/testdata/roundtrip/v4-legacy-foreach.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-legacy-foreach.out.tfstate rename to states/statefile/testdata/roundtrip/v4-legacy-foreach.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-legacy-modules.in.tfstate b/states/statefile/testdata/roundtrip/v4-legacy-modules.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-legacy-modules.in.tfstate rename to states/statefile/testdata/roundtrip/v4-legacy-modules.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-legacy-modules.out.tfstate b/states/statefile/testdata/roundtrip/v4-legacy-modules.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-legacy-modules.out.tfstate rename to states/statefile/testdata/roundtrip/v4-legacy-modules.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-legacy-simple.in.tfstate b/states/statefile/testdata/roundtrip/v4-legacy-simple.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-legacy-simple.in.tfstate rename to states/statefile/testdata/roundtrip/v4-legacy-simple.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-legacy-simple.out.tfstate b/states/statefile/testdata/roundtrip/v4-legacy-simple.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-legacy-simple.out.tfstate rename to states/statefile/testdata/roundtrip/v4-legacy-simple.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-modules.in.tfstate b/states/statefile/testdata/roundtrip/v4-modules.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-modules.in.tfstate rename to states/statefile/testdata/roundtrip/v4-modules.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-modules.out.tfstate b/states/statefile/testdata/roundtrip/v4-modules.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-modules.out.tfstate rename to states/statefile/testdata/roundtrip/v4-modules.out.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-simple.in.tfstate b/states/statefile/testdata/roundtrip/v4-simple.in.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-simple.in.tfstate rename to states/statefile/testdata/roundtrip/v4-simple.in.tfstate diff --git a/internal/states/statefile/testdata/roundtrip/v4-simple.out.tfstate b/states/statefile/testdata/roundtrip/v4-simple.out.tfstate similarity index 100% rename from internal/states/statefile/testdata/roundtrip/v4-simple.out.tfstate rename to states/statefile/testdata/roundtrip/v4-simple.out.tfstate diff --git a/internal/states/statefile/version0.go b/states/statefile/version0.go similarity index 100% rename from internal/states/statefile/version0.go rename to states/statefile/version0.go diff --git a/internal/states/statefile/version1.go b/states/statefile/version1.go similarity index 99% rename from internal/states/statefile/version1.go rename to states/statefile/version1.go index 0b82a13e2290..2a5edc01bd3d 100644 --- a/internal/states/statefile/version1.go +++ b/states/statefile/version1.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { diff --git a/internal/states/statefile/version1_upgrade.go b/states/statefile/version1_upgrade.go similarity index 100% rename from internal/states/statefile/version1_upgrade.go rename to states/statefile/version1_upgrade.go diff --git a/internal/states/statefile/version2.go b/states/statefile/version2.go similarity index 99% rename from internal/states/statefile/version2.go rename to states/statefile/version2.go index 2c5908c37c09..9f74815ea566 100644 --- a/internal/states/statefile/version2.go +++ b/states/statefile/version2.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { diff --git a/internal/states/statefile/version2_upgrade.go b/states/statefile/version2_upgrade.go similarity index 100% rename from internal/states/statefile/version2_upgrade.go rename to states/statefile/version2_upgrade.go diff --git a/internal/states/statefile/version3.go b/states/statefile/version3.go similarity index 96% rename from internal/states/statefile/version3.go rename to states/statefile/version3.go index 480cae8f4e1c..ab6414b0a70a 100644 --- a/internal/states/statefile/version3.go +++ b/states/statefile/version3.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" ) func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { diff --git a/internal/states/statefile/version3_upgrade.go b/states/statefile/version3_upgrade.go similarity index 98% rename from internal/states/statefile/version3_upgrade.go rename to states/statefile/version3_upgrade.go index f46430af38a0..29c3eb77bc60 100644 --- a/internal/states/statefile/version3_upgrade.go +++ b/states/statefile/version3_upgrade.go @@ -10,10 +10,10 @@ import ( "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/configs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { diff --git a/internal/states/statefile/version4.go b/states/statefile/version4.go similarity index 99% rename from internal/states/statefile/version4.go rename to states/statefile/version4.go index 2cdde4be7107..d9ff1ed1361b 100644 --- a/internal/states/statefile/version4.go +++ b/states/statefile/version4.go @@ -10,11 +10,11 @@ import ( "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" - "github.com/hashicorp/terraform/internal/lang/marks" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/checks" + "github.com/hashicorp/terraform/lang/marks" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { diff --git a/internal/states/statefile/version4_test.go b/states/statefile/version4_test.go similarity index 99% rename from internal/states/statefile/version4_test.go rename to states/statefile/version4_test.go index d71d33734f05..f097a6069cc5 100644 --- a/internal/states/statefile/version4_test.go +++ b/states/statefile/version4_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/internal/tfdiags" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/states/statefile/write.go b/states/statefile/write.go similarity index 100% rename from internal/states/statefile/write.go rename to states/statefile/write.go diff --git a/internal/states/statemgr/doc.go b/states/statemgr/doc.go similarity index 100% rename from internal/states/statemgr/doc.go rename to states/statemgr/doc.go diff --git a/internal/states/statemgr/filesystem.go b/states/statemgr/filesystem.go similarity index 98% rename from internal/states/statemgr/filesystem.go rename to states/statemgr/filesystem.go index bdfc6832b5dd..c091bd9e7377 100644 --- a/internal/states/statemgr/filesystem.go +++ b/states/statemgr/filesystem.go @@ -14,9 +14,9 @@ import ( multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terraform" ) // Filesystem is a full state manager that uses a file in the local filesystem diff --git a/internal/states/statemgr/filesystem_lock_unix.go b/states/statemgr/filesystem_lock_unix.go similarity index 100% rename from internal/states/statemgr/filesystem_lock_unix.go rename to states/statemgr/filesystem_lock_unix.go diff --git a/internal/states/statemgr/filesystem_lock_windows.go b/states/statemgr/filesystem_lock_windows.go similarity index 100% rename from internal/states/statemgr/filesystem_lock_windows.go rename to states/statemgr/filesystem_lock_windows.go diff --git a/states/statemgr/filesystem_test.go b/states/statemgr/filesystem_test.go new file mode 100644 index 000000000000..53ef2932e6e3 --- /dev/null +++ b/states/statemgr/filesystem_test.go @@ -0,0 +1,453 @@ +package statemgr + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/go-test/deep" + version "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + tfversion "github.com/hashicorp/terraform/version" +) + +func TestFilesystem(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + ls := testFilesystem(t) + defer os.Remove(ls.readPath) + TestFull(t, ls) +} + +func TestFilesystemRace(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + ls := testFilesystem(t) + defer os.Remove(ls.readPath) + + current := TestFullInitialState() + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + ls.WriteState(current) + }() + } + wg.Wait() +} + +func TestFilesystemLocks(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + s := testFilesystem(t) + defer os.Remove(s.readPath) + + // lock first + info := NewLockInfo() + info.Operation = "test" + lockID, err := s.Lock(info) + if err != nil { + t.Fatal(err) + } + + out, err := exec.Command("go", "run", "testdata/lockstate.go", s.path).CombinedOutput() + if err != nil { + t.Fatal("unexpected lock failure", err, string(out)) + } + + if !strings.Contains(string(out), "lock failed") { + t.Fatal("expected 'locked failed', got", string(out)) + } + + // check our lock info + lockInfo, err := s.lockInfo() + if err != nil { + t.Fatal(err) + } + + if lockInfo.Operation != "test" { + t.Fatalf("invalid lock info %#v\n", lockInfo) + } + + // a noop, since we unlock on exit + if err := s.Unlock(lockID); err != nil { + t.Fatal(err) + } + + // local locks can re-lock + lockID, err = s.Lock(info) + if err != nil { + t.Fatal(err) + } + + if err := s.Unlock(lockID); err != nil { + t.Fatal(err) + } + + // we should not be able to unlock the same lock twice + if err := s.Unlock(lockID); err == nil { + t.Fatal("unlocking an unlocked state should fail") + } + + // make sure lock info is gone + lockInfoPath := s.lockInfoPath() + if _, err := os.Stat(lockInfoPath); !os.IsNotExist(err) { + t.Fatal("lock info not removed") + } +} + +// Verify that we can write to the state file, as Windows' mandatory locking +// will prevent writing to a handle different than the one that hold the lock. +func TestFilesystem_writeWhileLocked(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + s := testFilesystem(t) + defer os.Remove(s.readPath) + + // lock first + info := NewLockInfo() + info.Operation = "test" + lockID, err := s.Lock(info) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := s.Unlock(lockID); err != nil { + t.Fatal(err) + } + }() + + if err := s.WriteState(TestFullInitialState()); err != nil { + t.Fatal(err) + } +} + +func TestFilesystem_pathOut(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + f, err := ioutil.TempFile("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + defer os.Remove(f.Name()) + + ls := testFilesystem(t) + ls.path = f.Name() + defer os.Remove(ls.path) + + TestFull(t, ls) +} + +func TestFilesystem_backup(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + f, err := ioutil.TempFile("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + defer os.Remove(f.Name()) + + ls := testFilesystem(t) + backupPath := f.Name() + ls.SetBackupPath(backupPath) + + TestFull(t, ls) + + // The backup functionality should've saved a copy of the original state + // prior to all of the modifications that TestFull does. + bfh, err := os.Open(backupPath) + if err != nil { + t.Fatal(err) + } + bf, err := statefile.Read(bfh) + if err != nil { + t.Fatal(err) + } + origState := TestFullInitialState() + if !bf.State.Equal(origState) { + for _, problem := range deep.Equal(origState, bf.State) { + t.Error(problem) + } + } +} + +// This test verifies a particularly tricky behavior where the input file +// is overridden and backups are enabled at the same time. This combination +// requires special care because we must ensure that when we create a backup +// it is of the original contents of the output file (which we're overwriting), +// not the contents of the input file (which is left unchanged). +func TestFilesystem_backupAndReadPath(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + + workDir := t.TempDir() + + markerOutput := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) + + outState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-output-state"), + false, // not sensitive + ) + }) + outFile, err := os.Create(filepath.Join(workDir, "output.tfstate")) + if err != nil { + t.Fatalf("failed to create temporary outFile %s", err) + } + defer outFile.Close() + err = statefile.Write(&statefile.File{ + Lineage: "-", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: outState, + }, outFile) + if err != nil { + t.Fatalf("failed to write initial outfile state to %s: %s", outFile.Name(), err) + } + + inState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-input-state"), + false, // not sensitive + ) + }) + inFile, err := os.Create(filepath.Join(workDir, "input.tfstate")) + if err != nil { + t.Fatalf("failed to create temporary inFile %s", err) + } + defer inFile.Close() + err = statefile.Write(&statefile.File{ + Lineage: "-", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: inState, + }, inFile) + if err != nil { + t.Fatalf("failed to write initial infile state to %s: %s", inFile.Name(), err) + } + + backupPath := outFile.Name() + ".backup" + + ls := NewFilesystemBetweenPaths(inFile.Name(), outFile.Name()) + ls.SetBackupPath(backupPath) + + newState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-new-state"), + false, // not sensitive + ) + }) + err = ls.WriteState(newState) + if err != nil { + t.Fatalf("failed to write new state: %s", err) + } + + // The backup functionality should've saved a copy of the original contents + // of the _output_ file, even though the first snapshot was read from + // the _input_ file. + t.Run("backup file", func(t *testing.T) { + bfh, err := os.Open(backupPath) + if err != nil { + t.Fatal(err) + } + bf, err := statefile.Read(bfh) + if err != nil { + t.Fatal(err) + } + os := bf.State.OutputValue(markerOutput) + if got, want := os.Value, cty.StringVal("from-output-state"); !want.RawEquals(got) { + t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) + } + }) + t.Run("output file", func(t *testing.T) { + ofh, err := os.Open(outFile.Name()) + if err != nil { + t.Fatal(err) + } + of, err := statefile.Read(ofh) + if err != nil { + t.Fatal(err) + } + os := of.State.OutputValue(markerOutput) + if got, want := os.Value, cty.StringVal("from-new-state"); !want.RawEquals(got) { + t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) + } + }) +} + +func TestFilesystem_nonExist(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + ls := NewFilesystem("ishouldntexist") + if err := ls.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + + if state := ls.State(); state != nil { + t.Fatalf("bad: %#v", state) + } +} + +func TestFilesystem_lockUnlockWithoutWrite(t *testing.T) { + info := NewLockInfo() + info.Operation = "test" + + ls := testFilesystem(t) + + // Delete the just-created tempfile so that Lock recreates it + os.Remove(ls.path) + + // Lock the state, and in doing so recreate the tempfile + lockID, err := ls.Lock(info) + if err != nil { + t.Fatal(err) + } + + if !ls.created { + t.Fatal("should have marked state as created") + } + + if err := ls.Unlock(lockID); err != nil { + t.Fatal(err) + } + + _, err = os.Stat(ls.path) + if os.IsNotExist(err) { + // Success! Unlocking the state successfully deleted the tempfile + return + } else if err != nil { + t.Fatalf("unexpected error from os.Stat: %s", err) + } else { + os.Remove(ls.readPath) + t.Fatal("should have removed path, but exists") + } +} + +func TestFilesystem_impl(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + var _ Reader = new(Filesystem) + var _ Writer = new(Filesystem) + var _ Persister = new(Filesystem) + var _ Refresher = new(Filesystem) + var _ OutputReader = new(Filesystem) + var _ Locker = new(Filesystem) +} + +func testFilesystem(t *testing.T) *Filesystem { + f, err := ioutil.TempFile("", "tf") + if err != nil { + t.Fatalf("failed to create temporary file %s", err) + } + t.Logf("temporary state file at %s", f.Name()) + + err = statefile.Write(&statefile.File{ + Lineage: "test-lineage", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: TestFullInitialState(), + }, f) + if err != nil { + t.Fatalf("failed to write initial state to %s: %s", f.Name(), err) + } + f.Close() + + ls := NewFilesystem(f.Name()) + if err := ls.RefreshState(); err != nil { + t.Fatalf("initial refresh failed: %s", err) + } + + return ls +} + +// Make sure we can refresh while the state is locked +func TestFilesystem_refreshWhileLocked(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + f, err := ioutil.TempFile("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + + err = statefile.Write(&statefile.File{ + Lineage: "test-lineage", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: TestFullInitialState(), + }, f) + if err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + + s := NewFilesystem(f.Name()) + defer os.Remove(s.path) + + // lock first + info := NewLockInfo() + info.Operation = "test" + lockID, err := s.Lock(info) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := s.Unlock(lockID); err != nil { + t.Fatal(err) + } + }() + + if err := s.RefreshState(); err != nil { + t.Fatal(err) + } + + readState := s.State() + if readState == nil { + t.Fatal("missing state") + } +} + +func TestFilesystem_GetRootOutputValues(t *testing.T) { + fs := testFilesystem(t) + + outputs, err := fs.GetRootOutputValues() + if err != nil { + t.Errorf("Expected GetRootOutputValues to not return an error, but it returned %v", err) + } + + if len(outputs) != 2 { + t.Errorf("Expected %d outputs, but received %d", 2, len(outputs)) + } +} + +func testOverrideVersion(t *testing.T, v string) func() { + oldVersionStr := tfversion.Version + oldPrereleaseStr := tfversion.Prerelease + oldSemVer := tfversion.SemVer + + var newPrereleaseStr string + if dash := strings.Index(v, "-"); dash != -1 { + newPrereleaseStr = v[dash+1:] + v = v[:dash] + } + + newSemVer, err := version.NewVersion(v) + if err != nil { + t.Errorf("invalid override version %q: %s", v, err) + } + newVersionStr := newSemVer.String() + + tfversion.Version = newVersionStr + tfversion.Prerelease = newPrereleaseStr + tfversion.SemVer = newSemVer + + return func() { // reset function + tfversion.Version = oldVersionStr + tfversion.Prerelease = oldPrereleaseStr + tfversion.SemVer = oldSemVer + } +} diff --git a/states/statemgr/helper.go b/states/statemgr/helper.go new file mode 100644 index 000000000000..d4ba44509662 --- /dev/null +++ b/states/statemgr/helper.go @@ -0,0 +1,54 @@ +package statemgr + +// The functions in this file are helper wrappers for common sequences of +// operations done against full state managers. + +import ( + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/version" +) + +// NewStateFile creates a new statefile.File object, with a newly-minted +// lineage identifier and serial 0, and returns a pointer to it. +func NewStateFile() *statefile.File { + return &statefile.File{ + Lineage: NewLineage(), + TerraformVersion: version.SemVer, + State: states.NewState(), + } +} + +// RefreshAndRead refreshes the persistent snapshot in the given state manager +// and then returns it. +// +// This is a wrapper around calling RefreshState and then State on the given +// manager. +func RefreshAndRead(mgr Storage) (*states.State, error) { + err := mgr.RefreshState() + if err != nil { + return nil, err + } + return mgr.State(), nil +} + +// WriteAndPersist writes a snapshot of the given state to the given state +// manager's transient store and then immediately persists it. +// +// The caller must ensure that the given state is not concurrently modified +// while this function is running, but it is safe to modify it after this +// function has returned. +// +// If an error is returned, it is undefined whether the state has been saved +// to the transient store or not, and so the only safe response is to bail +// out quickly with a user-facing error. In situations where more control +// is required, call WriteState and PersistState on the state manager directly +// and handle their errors. +func WriteAndPersist(mgr Storage, state *states.State, schemas *terraform.Schemas) error { + err := mgr.WriteState(state) + if err != nil { + return err + } + return mgr.PersistState(schemas) +} diff --git a/internal/states/statemgr/lineage.go b/states/statemgr/lineage.go similarity index 100% rename from internal/states/statemgr/lineage.go rename to states/statemgr/lineage.go diff --git a/internal/states/statemgr/lock.go b/states/statemgr/lock.go similarity index 90% rename from internal/states/statemgr/lock.go rename to states/statemgr/lock.go index 863dc2f0dd18..0aeda6d8ecc5 100644 --- a/internal/states/statemgr/lock.go +++ b/states/statemgr/lock.go @@ -1,8 +1,8 @@ package statemgr import ( - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" ) // LockDisabled implements State and Locker but disables state locking. diff --git a/internal/states/statemgr/lock_test.go b/states/statemgr/lock_test.go similarity index 100% rename from internal/states/statemgr/lock_test.go rename to states/statemgr/lock_test.go diff --git a/internal/states/statemgr/locker.go b/states/statemgr/locker.go similarity index 100% rename from internal/states/statemgr/locker.go rename to states/statemgr/locker.go diff --git a/internal/states/statemgr/migrate.go b/states/statemgr/migrate.go similarity index 99% rename from internal/states/statemgr/migrate.go rename to states/statemgr/migrate.go index 099e26a88e74..9b55fe9a7016 100644 --- a/internal/states/statemgr/migrate.go +++ b/states/statemgr/migrate.go @@ -3,7 +3,7 @@ package statemgr import ( "fmt" - "github.com/hashicorp/terraform/internal/states/statefile" + "github.com/hashicorp/terraform/states/statefile" ) // Migrator is an optional interface implemented by state managers that diff --git a/internal/states/statemgr/migrate_test.go b/states/statemgr/migrate_test.go similarity index 95% rename from internal/states/statemgr/migrate_test.go rename to states/statemgr/migrate_test.go index e9269a177039..0cf2113a252e 100644 --- a/internal/states/statemgr/migrate_test.go +++ b/states/statemgr/migrate_test.go @@ -5,9 +5,9 @@ import ( "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/states/statefile" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" ) func TestCheckValidImport(t *testing.T) { diff --git a/internal/states/statemgr/persistent.go b/states/statemgr/persistent.go similarity index 97% rename from internal/states/statemgr/persistent.go rename to states/statemgr/persistent.go index 70d709f85f4e..852015887968 100644 --- a/internal/states/statemgr/persistent.go +++ b/states/statemgr/persistent.go @@ -3,8 +3,8 @@ package statemgr import ( version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" ) // Persistent is a union of the Refresher and Persistent interfaces, for types diff --git a/states/statemgr/plan.go b/states/statemgr/plan.go new file mode 100644 index 000000000000..b5036030a640 --- /dev/null +++ b/states/statemgr/plan.go @@ -0,0 +1,71 @@ +package statemgr + +import ( + "fmt" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" +) + +// PlannedStateUpdate is a special helper to obtain a statefile representation +// of a not-yet-written state snapshot that can be written later by a call +// to the companion function WritePlannedStateUpdate. +// +// The statefile object returned here has an unusual interpretation of its +// metadata that is understood only by WritePlannedStateUpdate, and so the +// returned object should not be used for any other purpose. +// +// If the state manager implements Locker then it is the caller's +// responsibility to hold the lock at least for the duration of this call. +// It is not safe to modify the given state concurrently while +// PlannedStateUpdate is running. +func PlannedStateUpdate(mgr Transient, planned *states.State) *statefile.File { + ret := &statefile.File{ + State: planned.DeepCopy(), + } + + // If the given manager uses snapshot metadata then we'll save that + // in our file so we can check it again during WritePlannedStateUpdate. + if mr, ok := mgr.(PersistentMeta); ok { + m := mr.StateSnapshotMeta() + ret.Lineage = m.Lineage + ret.Serial = m.Serial + } + + return ret +} + +// WritePlannedStateUpdate is a companion to PlannedStateUpdate that attempts +// to apply a state update that was planned earlier to the given state +// manager. +// +// An error is returned if this function detects that a new state snapshot +// has been written to the backend since the update was planned, since that +// invalidates the plan. An error is returned also if the manager itself +// rejects the given state when asked to store it. +// +// If the returned error is nil, the given manager's transient state snapshot +// is updated to match what was planned. It is the caller's responsibility +// to then persist that state if the manager also implements Persistent and +// the snapshot should be written to the persistent store. +// +// If the state manager implements Locker then it is the caller's +// responsibility to hold the lock at least for the duration of this call. +func WritePlannedStateUpdate(mgr Transient, planned *statefile.File) error { + // If the given manager uses snapshot metadata then we'll check to make + // sure no new snapshots have been created since we planned to write + // the given state file. + if mr, ok := mgr.(PersistentMeta); ok { + m := mr.StateSnapshotMeta() + if planned.Lineage != "" { + if planned.Lineage != m.Lineage { + return fmt.Errorf("planned state update is from an unrelated state lineage than the current state") + } + if planned.Serial != m.Serial { + return fmt.Errorf("stored state has been changed by another operation since the given update was planned") + } + } + } + + return mgr.WriteState(planned.State) +} diff --git a/internal/states/statemgr/snapshotmetarel_string.go b/states/statemgr/snapshotmetarel_string.go similarity index 100% rename from internal/states/statemgr/snapshotmetarel_string.go rename to states/statemgr/snapshotmetarel_string.go diff --git a/internal/states/statemgr/statemgr.go b/states/statemgr/statemgr.go similarity index 100% rename from internal/states/statemgr/statemgr.go rename to states/statemgr/statemgr.go diff --git a/internal/states/statemgr/statemgr_fake.go b/states/statemgr/statemgr_fake.go similarity index 97% rename from internal/states/statemgr/statemgr_fake.go rename to states/statemgr/statemgr_fake.go index 985e6c677517..95a0449e236b 100644 --- a/internal/states/statemgr/statemgr_fake.go +++ b/states/statemgr/statemgr_fake.go @@ -4,8 +4,8 @@ import ( "errors" "sync" - "github.com/hashicorp/terraform/internal/states" - "github.com/hashicorp/terraform/internal/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" ) // NewFullFake returns a full state manager that really only supports transient diff --git a/internal/states/statemgr/statemgr_test.go b/states/statemgr/statemgr_test.go similarity index 97% rename from internal/states/statemgr/statemgr_test.go rename to states/statemgr/statemgr_test.go index e9e8226712c2..a49781208ea4 100644 --- a/internal/states/statemgr/statemgr_test.go +++ b/states/statemgr/statemgr_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - _ "github.com/hashicorp/terraform/internal/logging" + _ "github.com/hashicorp/terraform/logging" ) func TestNewLockInfo(t *testing.T) { diff --git a/internal/states/statemgr/testdata/lockstate.go b/states/statemgr/testdata/lockstate.go similarity index 88% rename from internal/states/statemgr/testdata/lockstate.go rename to states/statemgr/testdata/lockstate.go index a353900cfa9a..f0b336068f35 100644 --- a/internal/states/statemgr/testdata/lockstate.go +++ b/states/statemgr/testdata/lockstate.go @@ -5,7 +5,7 @@ import ( "log" "os" - "github.com/hashicorp/terraform/internal/states/statemgr" + "github.com/hashicorp/terraform/states/statemgr" ) // Attempt to open and lock a terraform state file. diff --git a/states/statemgr/testing.go b/states/statemgr/testing.go new file mode 100644 index 000000000000..584122473c81 --- /dev/null +++ b/states/statemgr/testing.go @@ -0,0 +1,163 @@ +package statemgr + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" +) + +// TestFull is a helper for testing full state manager implementations. It +// expects that the given implementation is pre-loaded with a snapshot of the +// result from TestFullInitialState. +// +// If the given state manager also implements PersistentMeta, this function +// will test that the snapshot metadata changes as expected between calls +// to the methods of Persistent. +func TestFull(t *testing.T, s Full) { + t.Helper() + + if err := s.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + + // Check that the initial state is correct. + // These do have different Lineages, but we will replace current below. + initial := TestFullInitialState() + if state := s.State(); !state.Equal(initial) { + t.Fatalf("state does not match expected initial state\n\ngot:\n%s\nwant:\n%s", spew.Sdump(state), spew.Sdump(initial)) + } + + var initialMeta SnapshotMeta + if sm, ok := s.(PersistentMeta); ok { + initialMeta = sm.StateSnapshotMeta() + } + + // Now we've proven that the state we're starting with is an initial + // state, we'll complete our work here with that state, since otherwise + // further writes would violate the invariant that we only try to write + // states that share the same lineage as what was initially written. + current := s.State() + + // Write a new state and verify that we have it + current.RootModule().SetOutputValue("bar", cty.StringVal("baz"), false) + + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + + if actual := s.State(); !actual.Equal(current) { + t.Fatalf("bad:\n%#v\n\n%#v", actual, current) + } + + // Test persistence + if err := s.PersistState(nil); err != nil { + t.Fatalf("err: %s", err) + } + + // Refresh if we got it + if err := s.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + + var newMeta SnapshotMeta + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + if got, want := newMeta.Lineage, initialMeta.Lineage; got != want { + t.Errorf("Lineage changed from %q to %q", want, got) + } + if after, before := newMeta.Serial, initialMeta.Serial; after == before { + t.Errorf("Serial didn't change from %d after new module added", before) + } + } + + // Same serial + serial := newMeta.Serial + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + if err := s.PersistState(nil); err != nil { + t.Fatalf("err: %s", err) + } + + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + if newMeta.Serial != serial { + t.Fatalf("serial changed after persisting with no changes: got %d, want %d", newMeta.Serial, serial) + } + } + + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + } + + // Change the serial + current = current.DeepCopy() + current.EnsureModule(addrs.RootModuleInstance).SetOutputValue( + "serialCheck", cty.StringVal("true"), false, + ) + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + if err := s.PersistState(nil); err != nil { + t.Fatalf("err: %s", err) + } + + if sm, ok := s.(PersistentMeta); ok { + oldMeta := newMeta + newMeta = sm.StateSnapshotMeta() + + if newMeta.Serial <= serial { + t.Fatalf("serial incorrect after persisting with changes: got %d, want > %d", newMeta.Serial, serial) + } + + if newMeta.TerraformVersion != oldMeta.TerraformVersion { + t.Fatalf("TFVersion changed from %s to %s", oldMeta.TerraformVersion, newMeta.TerraformVersion) + } + + // verify that Lineage doesn't change along with Serial, or during copying. + if newMeta.Lineage != oldMeta.Lineage { + t.Fatalf("Lineage changed from %q to %q", oldMeta.Lineage, newMeta.Lineage) + } + } + + // Check that State() returns a copy by modifying the copy and comparing + // to the current state. + stateCopy := s.State() + stateCopy.EnsureModule(addrs.RootModuleInstance.Child("another", addrs.NoKey)) + if reflect.DeepEqual(stateCopy, s.State()) { + t.Fatal("State() should return a copy") + } + + // our current expected state should also marshal identically to the persisted state + if !statefile.StatesMarshalEqual(current, s.State()) { + t.Fatalf("Persisted state altered unexpectedly.\n\ngot:\n%s\nwant:\n%s", spew.Sdump(s.State()), spew.Sdump(current)) + } +} + +// TestFullInitialState is a state that should be snapshotted into a +// full state manager before passing it into TestFull. +func TestFullInitialState() *states.State { + state := states.NewState() + childMod := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + rAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "foo", + } + providerAddr := addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider(rAddr.ImpliedProvider()), + Module: addrs.RootModule, + } + childMod.SetResourceProvider(rAddr, providerAddr) + + state.RootModule().SetOutputValue("sensitive_output", cty.StringVal("it's a secret"), true) + state.RootModule().SetOutputValue("nonsensitive_output", cty.StringVal("hello, world!"), false) + + return state +} diff --git a/internal/states/statemgr/transient.go b/states/statemgr/transient.go similarity index 98% rename from internal/states/statemgr/transient.go rename to states/statemgr/transient.go index e47683e98bb3..c62297754b2c 100644 --- a/internal/states/statemgr/transient.go +++ b/states/statemgr/transient.go @@ -1,6 +1,6 @@ package statemgr -import "github.com/hashicorp/terraform/internal/states" +import "github.com/hashicorp/terraform/states" // Transient is a union of the Reader and Writer interfaces, for types that // deal with transient snapshots. diff --git a/internal/states/statemgr/transient_inmem.go b/states/statemgr/transient_inmem.go similarity index 94% rename from internal/states/statemgr/transient_inmem.go rename to states/statemgr/transient_inmem.go index 4692225cb54d..07fd3726f560 100644 --- a/internal/states/statemgr/transient_inmem.go +++ b/states/statemgr/transient_inmem.go @@ -3,7 +3,7 @@ package statemgr import ( "sync" - "github.com/hashicorp/terraform/internal/states" + "github.com/hashicorp/terraform/states" ) // NewTransientInMemory returns a Transient implementation that retains diff --git a/internal/states/sync.go b/states/sync.go similarity index 99% rename from internal/states/sync.go rename to states/sync.go index d48fa755189c..6541cc22a769 100644 --- a/internal/states/sync.go +++ b/states/sync.go @@ -4,8 +4,8 @@ import ( "log" "sync" - "github.com/hashicorp/terraform/internal/addrs" - "github.com/hashicorp/terraform/internal/checks" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/checks" "github.com/zclconf/go-cty/cty" ) diff --git a/internal/terminal/impl_others.go b/terminal/impl_others.go similarity index 100% rename from internal/terminal/impl_others.go rename to terminal/impl_others.go diff --git a/internal/terminal/impl_windows.go b/terminal/impl_windows.go similarity index 100% rename from internal/terminal/impl_windows.go rename to terminal/impl_windows.go diff --git a/internal/terminal/stream.go b/terminal/stream.go similarity index 100% rename from internal/terminal/stream.go rename to terminal/stream.go diff --git a/internal/terminal/streams.go b/terminal/streams.go similarity index 100% rename from internal/terminal/streams.go rename to terminal/streams.go diff --git a/internal/terminal/streams_test.go b/terminal/streams_test.go similarity index 100% rename from internal/terminal/streams_test.go rename to terminal/streams_test.go diff --git a/internal/terminal/testing.go b/terminal/testing.go similarity index 100% rename from internal/terminal/testing.go rename to terminal/testing.go diff --git a/internal/tfdiags/config_traversals.go b/tfdiags/config_traversals.go similarity index 100% rename from internal/tfdiags/config_traversals.go rename to tfdiags/config_traversals.go diff --git a/internal/tfdiags/consolidate_warnings.go b/tfdiags/consolidate_warnings.go similarity index 100% rename from internal/tfdiags/consolidate_warnings.go rename to tfdiags/consolidate_warnings.go diff --git a/internal/tfdiags/consolidate_warnings_test.go b/tfdiags/consolidate_warnings_test.go similarity index 92% rename from internal/tfdiags/consolidate_warnings_test.go rename to tfdiags/consolidate_warnings_test.go index df94d4af8f90..4d3e1f0b60aa 100644 --- a/internal/tfdiags/consolidate_warnings_test.go +++ b/tfdiags/consolidate_warnings_test.go @@ -56,7 +56,7 @@ func TestConsolidateWarnings(t *testing.T) { got := diags.ConsolidateWarnings(2).ForRPC() want := Diagnostics{ // First set - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 1", Detail_: "This one has a subject 0", @@ -66,7 +66,7 @@ func TestConsolidateWarnings(t *testing.T) { End: SourcePos{Line: 1, Column: 1, Byte: 0}, }, }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Error, Summary_: "Error 1", Detail_: "This one has a subject 0", @@ -76,18 +76,18 @@ func TestConsolidateWarnings(t *testing.T) { End: SourcePos{Line: 1, Column: 1, Byte: 0}, }, }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 2", Detail_: "This one is sourceless 0", }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 3", }, // Second set (consolidation begins; note additional paragraph in Warning 1 detail) - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 1", Detail_: "This one has a subject 1\n\n(and 2 more similar warnings elsewhere)", @@ -97,7 +97,7 @@ func TestConsolidateWarnings(t *testing.T) { End: SourcePos{Line: 1, Column: 1, Byte: 0}, }, }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Error, Summary_: "Error 1", Detail_: "This one has a subject 1", @@ -107,18 +107,18 @@ func TestConsolidateWarnings(t *testing.T) { End: SourcePos{Line: 1, Column: 1, Byte: 0}, }, }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 2", Detail_: "This one is sourceless 1", }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 3", }, // Third set (no more Warning 1, because it's consolidated) - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Error, Summary_: "Error 1", Detail_: "This one has a subject 2", @@ -128,18 +128,18 @@ func TestConsolidateWarnings(t *testing.T) { End: SourcePos{Line: 1, Column: 1, Byte: 0}, }, }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 2", Detail_: "This one is sourceless 2", }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 3", }, // Fourth set (still no warning 1) - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Error, Summary_: "Error 1", Detail_: "This one has a subject 3", @@ -149,19 +149,19 @@ func TestConsolidateWarnings(t *testing.T) { End: SourcePos{Line: 1, Column: 1, Byte: 0}, }, }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 2", Detail_: "This one is sourceless 3", }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 3", }, // Special straggler warning gets to show up unconsolidated, because // there is only one of it. - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "Warning 4", Detail_: "Only one of this one", diff --git a/internal/tfdiags/contextual.go b/tfdiags/contextual.go similarity index 100% rename from internal/tfdiags/contextual.go rename to tfdiags/contextual.go diff --git a/internal/tfdiags/contextual_test.go b/tfdiags/contextual_test.go similarity index 100% rename from internal/tfdiags/contextual_test.go rename to tfdiags/contextual_test.go diff --git a/internal/tfdiags/diagnostic.go b/tfdiags/diagnostic.go similarity index 100% rename from internal/tfdiags/diagnostic.go rename to tfdiags/diagnostic.go diff --git a/internal/tfdiags/diagnostic_base.go b/tfdiags/diagnostic_base.go similarity index 100% rename from internal/tfdiags/diagnostic_base.go rename to tfdiags/diagnostic_base.go diff --git a/internal/tfdiags/diagnostic_extra.go b/tfdiags/diagnostic_extra.go similarity index 100% rename from internal/tfdiags/diagnostic_extra.go rename to tfdiags/diagnostic_extra.go diff --git a/internal/tfdiags/diagnostics.go b/tfdiags/diagnostics.go similarity index 100% rename from internal/tfdiags/diagnostics.go rename to tfdiags/diagnostics.go diff --git a/internal/tfdiags/diagnostics_test.go b/tfdiags/diagnostics_test.go similarity index 100% rename from internal/tfdiags/diagnostics_test.go rename to tfdiags/diagnostics_test.go diff --git a/internal/tfdiags/doc.go b/tfdiags/doc.go similarity index 100% rename from internal/tfdiags/doc.go rename to tfdiags/doc.go diff --git a/internal/tfdiags/error.go b/tfdiags/error.go similarity index 100% rename from internal/tfdiags/error.go rename to tfdiags/error.go diff --git a/internal/tfdiags/hcl.go b/tfdiags/hcl.go similarity index 100% rename from internal/tfdiags/hcl.go rename to tfdiags/hcl.go diff --git a/internal/tfdiags/hcl_test.go b/tfdiags/hcl_test.go similarity index 100% rename from internal/tfdiags/hcl_test.go rename to tfdiags/hcl_test.go diff --git a/tfdiags/rpc_friendly.go b/tfdiags/rpc_friendly.go new file mode 100644 index 000000000000..e8a7a42e6fc3 --- /dev/null +++ b/tfdiags/rpc_friendly.go @@ -0,0 +1,64 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiagTF struct { + Severity_ Severity + Summary_ string + Detail_ string + Subject_ *SourceRange + Context_ *SourceRange +} + +// rpcFriendlyDiagTF transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + source := diag.Source() + return &rpcFriendlyDiagTF{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + Subject_: source.Subject, + Context_: source.Context, + } +} + +func (d *rpcFriendlyDiagTF) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiagTF) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func (d *rpcFriendlyDiagTF) Source() Source { + return Source{ + Subject: d.Subject_, + Context: d.Context_, + } +} + +func (d rpcFriendlyDiagTF) FromExpr() *FromExpr { + // RPC-friendly diagnostics cannot preserve expression information because + // expressions themselves are not RPC-friendly. + return nil +} + +func (d rpcFriendlyDiagTF) ExtraInfo() interface{} { + // RPC-friendly diagnostics always discard any "extra information". + return nil +} + +func init() { + gob.Register((*rpcFriendlyDiagTF)(nil)) +} diff --git a/internal/tfdiags/rpc_friendly_test.go b/tfdiags/rpc_friendly_test.go similarity index 95% rename from internal/tfdiags/rpc_friendly_test.go rename to tfdiags/rpc_friendly_test.go index bf5170753950..e9009e93cfd0 100644 --- a/internal/tfdiags/rpc_friendly_test.go +++ b/tfdiags/rpc_friendly_test.go @@ -45,15 +45,15 @@ func TestDiagnosticsForRPC(t *testing.T) { } want := Diagnostics{ - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Error, Summary_: "bad", }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Warning, Summary_: "less bad", }, - &rpcFriendlyDiag{ + &rpcFriendlyDiagTF{ Severity_: Error, Summary_: "bad bad bad", Detail_: "badily bad bad", diff --git a/internal/tfdiags/severity_string.go b/tfdiags/severity_string.go similarity index 100% rename from internal/tfdiags/severity_string.go rename to tfdiags/severity_string.go diff --git a/internal/tfdiags/simple_warning.go b/tfdiags/simple_warning.go similarity index 100% rename from internal/tfdiags/simple_warning.go rename to tfdiags/simple_warning.go diff --git a/internal/tfdiags/source_range.go b/tfdiags/source_range.go similarity index 100% rename from internal/tfdiags/source_range.go rename to tfdiags/source_range.go diff --git a/internal/tfdiags/sourceless.go b/tfdiags/sourceless.go similarity index 100% rename from internal/tfdiags/sourceless.go rename to tfdiags/sourceless.go diff --git a/internal/tfplugin5/tfplugin5.pb.go b/tfplugin5/tfplugin5.pb.go similarity index 100% rename from internal/tfplugin5/tfplugin5.pb.go rename to tfplugin5/tfplugin5.pb.go diff --git a/internal/tfplugin5/tfplugin5.proto b/tfplugin5/tfplugin5.proto similarity index 100% rename from internal/tfplugin5/tfplugin5.proto rename to tfplugin5/tfplugin5.proto diff --git a/internal/tfplugin6/tfplugin6.pb.go b/tfplugin6/tfplugin6.pb.go similarity index 100% rename from internal/tfplugin6/tfplugin6.pb.go rename to tfplugin6/tfplugin6.pb.go diff --git a/internal/tfplugin6/tfplugin6.proto b/tfplugin6/tfplugin6.proto similarity index 100% rename from internal/tfplugin6/tfplugin6.proto rename to tfplugin6/tfplugin6.proto diff --git a/working_dir.go b/working_dir.go index 6d9945c0c5f5..da24627848ac 100644 --- a/working_dir.go +++ b/working_dir.go @@ -1,6 +1,6 @@ package main -import "github.com/hashicorp/terraform/internal/command/workdir" +import "github.com/hashicorp/terraform/command/workdir" func WorkingDir(originalDir string, overrideDataDir string) *workdir.Dir { ret := workdir.NewDir(".") // caller should already have used os.Chdir in "-chdir=..." mode